query
stringlengths
7
3.85k
document
stringlengths
11
430k
metadata
dict
negatives
sequencelengths
0
101
negative_scores
sequencelengths
0
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
listBySubscriptionCreateRequest creates the ListBySubscription request.
func (client *DataCollectionEndpointsClient) listBySubscriptionCreateRequest(ctx context.Context, options *DataCollectionEndpointsListBySubscriptionOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Insights/dataCollectionEndpoints" if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-04-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *DevicesClient) listBySubscriptionCreateRequest(ctx context.Context, options *DevicesClientListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/devices\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AvailabilitySetsClient) listBySubscriptionCreateRequest(ctx context.Context, options *AvailabilitySetsListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagementAssociationsClient) listBySubscriptionCreateRequest(ctx context.Context, options *ManagementAssociationsListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.OperationsManagement/ManagementAssociations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2015-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *WorkspacesClient) listBySubscriptionCreateRequest(ctx context.Context, options *WorkspacesListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Databricks/workspaces\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ServersClient) listBySubscriptionCreateRequest(ctx context.Context, options *ServersClientListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.FluidRelay/fluidRelayServers\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SpatialAnchorsAccountsClient) listBySubscriptionCreateRequest(ctx context.Context, options *SpatialAnchorsAccountsClientListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.MixedReality/spatialAnchorsAccounts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *IotSecuritySolutionClient) listBySubscriptionCreateRequest(ctx context.Context, options *IotSecuritySolutionClientListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Security/iotSecuritySolutions\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-08-01\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) listBySubscriptionCreateRequest(ctx context.Context, options *LocalRulestacksClientListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DataCollectionEndpointsClient) listBySubscriptionCreateRequest(ctx context.Context, options *DataCollectionEndpointsClientListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Insights/dataCollectionEndpoints\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ConnectedEnvironmentsClient) listBySubscriptionCreateRequest(ctx context.Context, options *ConnectedEnvironmentsClientListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.App/connectedEnvironments\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CustomAssessmentAutomationsClient) listBySubscriptionCreateRequest(ctx context.Context, options *CustomAssessmentAutomationsListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Security/customAssessmentAutomations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *MetricAlertsClient) listBySubscriptionCreateRequest(ctx context.Context, options *MetricAlertsClientListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Insights/metricAlerts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AlertProcessingRulesClient) listBySubscriptionCreateRequest(ctx context.Context, options *AlertProcessingRulesClientListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.AlertsManagement/actionRules\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CassandraClustersClient) listBySubscriptionCreateRequest(ctx context.Context, options *CassandraClustersClientListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/cassandraClusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RedisClient) listBySubscriptionCreateRequest(ctx context.Context, options *RedisListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Cache/redis\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SubscriptionClient) listCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *SubscriptionClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/subscriptions\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client IotHubResourceClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2022-04-30-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/providers/Microsoft.Devices/IotHubs\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (client ServicesClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2022-11-01-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/providers/Microsoft.AppPlatform/Spring\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (client *TriggersClient) listByShareSubscriptionCreateRequest(ctx context.Context, resourceGroupName string, accountName string, shareSubscriptionName string, options *TriggersClientListByShareSubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shareSubscriptions/{shareSubscriptionName}/triggers\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif shareSubscriptionName == \"\" {\n\t\treturn nil, errors.New(\"parameter shareSubscriptionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{shareSubscriptionName}\", url.PathEscape(shareSubscriptionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-09-01\")\n\tif options != nil && options.SkipToken != nil {\n\t\treqQP.Set(\"$skipToken\", *options.SkipToken)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client LabClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tazure.DoRetryWithRegistration(client.Client))\n}", "func (client AccountClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {\n return client.Send(req, azure.DoRetryWithRegistration(client.Client))\n }", "func (client LabClient) ListBySubscription(ctx context.Context, filter string, top *int32, orderBy string) (result ResponseWithContinuationLabPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/LabClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.rwcl.Response.Response != nil {\n\t\t\t\tsc = result.rwcl.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listBySubscriptionNextResults\n\treq, err := client.ListBySubscriptionPreparer(ctx, filter, top, orderBy)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"ListBySubscription\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListBySubscriptionSender(req)\n\tif err != nil {\n\t\tresult.rwcl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.rwcl, err = client.ListBySubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (client ServicesClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}", "func (client ServicesClient) ListBySubscription(ctx context.Context) (result ServiceResourceListPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/ServicesClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.srl.Response.Response != nil {\n\t\t\t\tsc = result.srl.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listBySubscriptionNextResults\n\treq, err := client.ListBySubscriptionPreparer(ctx)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.ServicesClient\", \"ListBySubscription\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListBySubscriptionSender(req)\n\tif err != nil {\n\t\tresult.srl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.ServicesClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.srl, err = client.ListBySubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.ServicesClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.srl.hasNextLink() && result.srl.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}", "func (client *VendorSKUPreviewClient) listCreateRequest(ctx context.Context, vendorName string, skuName string, options *VendorSKUPreviewClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/vendors/{vendorName}/vendorSkus/{skuName}/previewSubscriptions\"\n\tif vendorName == \"\" {\n\t\treturn nil, errors.New(\"parameter vendorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vendorName}\", url.PathEscape(vendorName))\n\tif skuName == \"\" {\n\t\treturn nil, errors.New(\"parameter skuName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{skuName}\", url.PathEscape(skuName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *DefaultApiService) ListSubscription(params *ListSubscriptionParams) (*ListSubscriptionResponse, error) {\n\tpath := \"/v1/Subscriptions\"\n\n\tdata := url.Values{}\n\theaders := make(map[string]interface{})\n\n\tif params != nil && params.SinkSid != nil {\n\t\tdata.Set(\"SinkSid\", *params.SinkSid)\n\t}\n\tif params != nil && params.PageSize != nil {\n\t\tdata.Set(\"PageSize\", fmt.Sprint(*params.PageSize))\n\t}\n\n\tresp, err := c.requestHandler.Get(c.baseURL+path, data, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tps := &ListSubscriptionResponse{}\n\tif err := json.NewDecoder(resp.Body).Decode(ps); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps, err\n}", "func DecodeCreateSubscriptionRequest(b []byte) (*CreateSubscriptionRequest, error) {\n\tc := &CreateSubscriptionRequest{}\n\tif err := c.DecodeFromBytes(b); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}", "func (client IotHubResourceClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}", "func CreateDescribeSubscriptionInstancesRequest() (request *DescribeSubscriptionInstancesRequest) {\n\trequest = &DescribeSubscriptionInstancesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Dts\", \"2018-08-01\", \"DescribeSubscriptionInstances\", \"dts\", \"openAPI\")\n\treturn\n}", "func (r ApiCreateSubscriptionRequest) SubscriptionRequest(subscriptionRequest SubscriptionRequest) ApiCreateSubscriptionRequest {\n\tr.subscriptionRequest = &subscriptionRequest\n\treturn r\n}", "func (client *AccountsClient) listCreateRequest(ctx context.Context, options *AccountsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.CognitiveServices/accounts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WCFRelaysClient) listByNamespaceCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *WCFRelaysClientListByNamespaceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/wcfRelays\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (r *SubscriptionsService) List() *SubscriptionsListCall {\n\treturn &SubscriptionsListCall{\n\t\ts: r.s,\n\t\tcaller_: googleapi.JSONCall{},\n\t\tparams_: make(map[string][]string),\n\t\tpathTemplate_: \"subscriptions\",\n\t\tcontext_: googleapi.NoContext,\n\t}\n}", "func (client *ProviderShareSubscriptionsClient) listByShareCreateRequest(ctx context.Context, resourceGroupName string, accountName string, shareName string, options *ProviderShareSubscriptionsClientListByShareOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/providerShareSubscriptions\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif shareName == \"\" {\n\t\treturn nil, errors.New(\"parameter shareName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{shareName}\", url.PathEscape(shareName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-09-01\")\n\tif options != nil && options.SkipToken != nil {\n\t\treqQP.Set(\"$skipToken\", *options.SkipToken)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (d *DatastoreSubscription) List() ([]*Subscription, error) {\n\treturn d.collectByField(func(s *Subscription) bool {\n\t\treturn true\n\t})\n}", "func (client *CapacitiesClient) listSKUsCreateRequest(ctx context.Context, options *CapacitiesClientListSKUsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.PowerBIDedicated/skus\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-01-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (s *API) CreateSubscription(\n\trequest SubscriptionCreate,\n) (data SubscriptionResponse, err error) {\n\tif request.CustomerID == \"\" {\n\t\tif request.Customer.DisplayName == \"\" || request.Customer.Email == \"\" {\n\t\t\terr = fmt.Errorf(\n\t\t\t\t\"CustomerID is a required field if subscription is created for existen customer. For new customer Customer.DisplayName and Customer.Email fields are required\",\n\t\t\t)\n\t\t\treturn\n\t\t}\n\t}\n\n\tendpoint := zoho.Endpoint{\n\t\tName: \"subscriptions\",\n\t\tURL: fmt.Sprintf(\"https://subscriptions.zoho.%s/api/v1/subscriptions\", s.ZohoTLD),\n\t\tMethod: zoho.HTTPPost,\n\t\tResponseData: &SubscriptionResponse{},\n\t\tRequestBody: request,\n\t\tHeaders: map[string]string{\n\t\t\tZohoSubscriptionsEndpointHeader: s.OrganizationID,\n\t\t},\n\t}\n\n\terr = s.Zoho.HTTPRequest(&endpoint)\n\tif err != nil {\n\t\treturn SubscriptionResponse{}, fmt.Errorf(\"Failed to create subscription: %s\", err)\n\t}\n\n\tif v, ok := endpoint.ResponseData.(*SubscriptionResponse); ok {\n\t\treturn *v, nil\n\t}\n\n\treturn SubscriptionResponse{}, fmt.Errorf(\"Data returned was nil\")\n}", "func (client *AvailabilitySetsClient) ListBySubscription(options *AvailabilitySetsListBySubscriptionOptions) *AvailabilitySetsListBySubscriptionPager {\n\treturn &AvailabilitySetsListBySubscriptionPager{\n\t\tclient: client,\n\t\trequester: func(ctx context.Context) (*policy.Request, error) {\n\t\t\treturn client.listBySubscriptionCreateRequest(ctx, options)\n\t\t},\n\t\tadvancer: func(ctx context.Context, resp AvailabilitySetsListBySubscriptionResponse) (*policy.Request, error) {\n\t\t\treturn runtime.NewRequest(ctx, http.MethodGet, *resp.AvailabilitySetListResult.NextLink)\n\t\t},\n\t}\n}", "func (o *CreateSubscriptionV2Params) WithRequest(Request *models.CreateSubscriptionRequest) *CreateSubscriptionV2Params {\n\to.Request = Request\n\treturn o\n}", "func (client LabClient) ListBySubscriptionPreparer(ctx context.Context, filter string, top *int32, orderBy string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2015-05-21-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\tif len(filter) > 0 {\n\t\tqueryParameters[\"$filter\"] = autorest.Encode(\"query\", filter)\n\t}\n\tif top != nil {\n\t\tqueryParameters[\"$top\"] = autorest.Encode(\"query\", *top)\n\t}\n\tif len(orderBy) > 0 {\n\t\tqueryParameters[\"$orderBy\"] = autorest.Encode(\"query\", orderBy)\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/providers/Microsoft.DevTestLab/labs\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (client AccountClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) {\n pathParameters := map[string]interface{} {\n \"subscriptionId\": autorest.Encode(\"path\",client.SubscriptionID),\n }\n\n const APIVersion = \"2020-12-01-preview\"\n queryParameters := map[string]interface{} {\n \"api-version\": APIVersion,\n }\n\n preparer := autorest.CreatePreparer(\nautorest.AsGet(),\nautorest.WithBaseURL(client.BaseURI),\nautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/providers/Microsoft.AISupercomputer/accounts\",pathParameters),\nautorest.WithQueryParameters(queryParameters))\n return preparer.Prepare((&http.Request{}).WithContext(ctx))\n }", "func (r *SubscriptionsService) Create(subscription *Subscription) *SubscriptionsCreateCall {\n\tc := &SubscriptionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.subscription = subscription\n\treturn c\n}", "func (client *VirtualApplianceSKUsClient) listCreateRequest(ctx context.Context, options *VirtualApplianceSKUsListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualApplianceSkus\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (ss *SubscriptionsService) Create(ctx context.Context, cID string, sc *Subscription) (\n\tres *Response,\n\ts *Subscription,\n\terr error,\n) {\n\turi := fmt.Sprintf(\"v2/customers/%s/subscriptions\", cID)\n\n\tif ss.client.HasAccessToken() && ss.client.config.testing {\n\t\tsc.TestMode = true\n\t}\n\n\tres, err = ss.client.post(ctx, uri, sc, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif err = json.Unmarshal(res.content, &s); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (client *AccountsClient) listSKUsCreateRequest(ctx context.Context, resourceGroupName string, accountName string, options *AccountsClientListSKUsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}/skus\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client AccountClient) ListBySubscription(ctx context.Context) (result AccountResourceDescriptionListPage, err error) {\n if tracing.IsEnabled() {\n ctx = tracing.StartSpan(ctx, fqdn + \"/AccountClient.ListBySubscription\")\n defer func() {\n sc := -1\n if result.ardl.Response.Response != nil {\n sc = result.ardl.Response.Response.StatusCode\n }\n tracing.EndSpan(ctx, sc, err)\n }()\n }\n result.fn = client.listBySubscriptionNextResults\n req, err := client.ListBySubscriptionPreparer(ctx)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.AccountClient\", \"ListBySubscription\", nil , \"Failure preparing request\")\n return\n }\n\n resp, err := client.ListBySubscriptionSender(req)\n if err != nil {\n result.ardl.Response = autorest.Response{Response: resp}\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.AccountClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n return\n }\n\n result.ardl, err = client.ListBySubscriptionResponder(resp)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.AccountClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n }\n if result.ardl.hasNextLink() && result.ardl.IsEmpty() {\n err = result.NextWithContext(ctx)\n }\n\n return\n}", "func (client *CloudServicesClient) listAllCreateRequest(ctx context.Context, options *CloudServicesClientListAllOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/cloudServices\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (o *CreateSubscriptionV2Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tvar res []error\n\n\tif o.Request == nil {\n\t\to.Request = new(models.CreateSubscriptionRequest)\n\t}\n\n\tif err := r.SetBodyParam(o.Request); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (client IotHubResourceClient) ListBySubscription(ctx context.Context) (result IotHubDescriptionListResultPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/IotHubResourceClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.ihdlr.Response.Response != nil {\n\t\t\t\tsc = result.ihdlr.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listBySubscriptionNextResults\n\treq, err := client.ListBySubscriptionPreparer(ctx)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"devices.IotHubResourceClient\", \"ListBySubscription\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListBySubscriptionSender(req)\n\tif err != nil {\n\t\tresult.ihdlr.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"devices.IotHubResourceClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.ihdlr, err = client.ListBySubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"devices.IotHubResourceClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.ihdlr.hasNextLink() && result.ihdlr.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}", "func (r *SubscriptionsService) List() *SubscriptionsListCall {\n\tc := &SubscriptionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\treturn c\n}", "func (client *WebAppsClient) listCreateRequest(ctx context.Context, options *WebAppsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Web/sites\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CloudServicesClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *CloudServicesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SchemaRegistryClient) listByNamespaceCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *SchemaRegistryClientListByNamespaceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-10-01-preview\")\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (ss *SubscriptionsService) List(ctx context.Context, cID string, opts *SubscriptionListOptions) (\n\tres *Response,\n\tsl *SubscriptionList,\n\terr error,\n) {\n\tu := fmt.Sprintf(\"v2/customers/%s/subscriptions\", cID)\n\n\tres, err = ss.list(ctx, u, opts)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif err = json.Unmarshal(res.content, &sl); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (client *ServersClient) listCreateRequest(ctx context.Context, options *ServersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Sql/servers\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SubscriptionClient) listSecretsCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, sid string, options *SubscriptionClientListSecretsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/subscriptions/{sid}/listSecrets\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif sid == \"\" {\n\t\treturn nil, errors.New(\"parameter sid cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sid}\", url.PathEscape(sid))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AccountsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *AccountsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ClientImpl) ListSubscriptions(ctx context.Context, args ListSubscriptionsArgs) (*[]Subscription, error) {\n\tqueryParams := url.Values{}\n\tif args.PublisherId != nil {\n\t\tqueryParams.Add(\"publisherId\", *args.PublisherId)\n\t}\n\tif args.EventType != nil {\n\t\tqueryParams.Add(\"eventType\", *args.EventType)\n\t}\n\tif args.ConsumerId != nil {\n\t\tqueryParams.Add(\"consumerId\", *args.ConsumerId)\n\t}\n\tif args.ConsumerActionId != nil {\n\t\tqueryParams.Add(\"consumerActionId\", *args.ConsumerActionId)\n\t}\n\tlocationId, _ := uuid.Parse(\"fc50d02a-849f-41fb-8af1-0a5216103269\")\n\tresp, err := client.Client.Send(ctx, http.MethodGet, locationId, \"7.1-preview.1\", nil, queryParams, nil, \"\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue []Subscription\n\terr = client.Client.UnmarshalCollectionBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func (client *ActivityLogsClient) listCreateRequest(ctx context.Context, filter string, options *ActivityLogsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Insights/eventtypes/management/values\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2015-04-01\")\n\treqQP.Set(\"$filter\", filter)\n\tif options != nil && options.Select != nil {\n\t\treqQP.Set(\"$select\", *options.Select)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client NotificationDataPlaneClient) createSubscription(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/subscriptions\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateSubscriptionResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client *InteractionsClient) listByHubCreateRequest(ctx context.Context, resourceGroupName string, hubName string, options *InteractionsClientListByHubOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/interactions\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.LocaleCode != nil {\n\t\treqQP.Set(\"locale-code\", *options.LocaleCode)\n\t}\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CertificateOrdersClient) listCreateRequest(ctx context.Context, options *CertificateOrdersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/certificateOrders\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AccountsClient) listUsagesCreateRequest(ctx context.Context, resourceGroupName string, accountName string, options *AccountsClientListUsagesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}/usages\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RegistrationDefinitionsClient) listCreateRequest(ctx context.Context, scope string, options *RegistrationDefinitionsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-01-preview\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (a *SubscriptionsApiService) CreateSubscription(ctx context.Context, id string) ApiCreateSubscriptionRequest {\n\treturn ApiCreateSubscriptionRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tid: id,\n\t}\n}", "func (client *subscriptionClient) getCreateRequest(ctx context.Context, topicName string, subscriptionName string, options *SubscriptionGetOptions) (*policy.Request, error) {\n\turlPath := \"/{topicName}/subscriptions/{subscriptionName}\"\n\tif topicName == \"\" {\n\t\treturn nil, errors.New(\"parameter topicName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{topicName}\", url.PathEscape(topicName))\n\tif subscriptionName == \"\" {\n\t\treturn nil, errors.New(\"parameter subscriptionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionName}\", url.PathEscape(subscriptionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Enrich != nil {\n\t\treqQP.Set(\"enrich\", strconv.FormatBool(*options.Enrich))\n\t}\n\tif client.apiVersion != nil {\n\t\treqQP.Set(\"api-version\", \"2017_04\")\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/xml, application/atom+xml\")\n\treturn req, nil\n}", "func (client *PermissionsClient) listByBillingProfileCreateRequest(ctx context.Context, billingAccountName string, billingProfileName string, options *PermissionsClientListByBillingProfileOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/billingPermissions\"\n\tif billingAccountName == \"\" {\n\t\treturn nil, errors.New(\"parameter billingAccountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{billingAccountName}\", url.PathEscape(billingAccountName))\n\tif billingProfileName == \"\" {\n\t\treturn nil, errors.New(\"parameter billingProfileName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{billingProfileName}\", url.PathEscape(billingProfileName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *KpiClient) listByHubCreateRequest(ctx context.Context, resourceGroupName string, hubName string, options *KpiClientListByHubOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/kpi\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WebhooksClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *WebhooksClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client NotificationDataPlaneClient) listSubscriptions(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/subscriptions\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListSubscriptionsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client *ReplicationsClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *ReplicationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client DataControllersClient) ListInSubscriptionSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}", "func (client *PermissionsClient) listByBillingAccountCreateRequest(ctx context.Context, billingAccountName string, options *PermissionsClientListByBillingAccountOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingPermissions\"\n\tif billingAccountName == \"\" {\n\t\treturn nil, errors.New(\"parameter billingAccountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{billingAccountName}\", url.PathEscape(billingAccountName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *DefaultApiService) CreateSubscription(params *CreateSubscriptionParams) (*EventsV1Subscription, error) {\n\tpath := \"/v1/Subscriptions\"\n\n\tdata := url.Values{}\n\theaders := make(map[string]interface{})\n\n\tif params != nil && params.Description != nil {\n\t\tdata.Set(\"Description\", *params.Description)\n\t}\n\tif params != nil && params.SinkSid != nil {\n\t\tdata.Set(\"SinkSid\", *params.SinkSid)\n\t}\n\tif params != nil && params.Types != nil {\n\t\tv, err := json.Marshal(params.Types)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdata.Set(\"Types\", string(v))\n\t}\n\n\tresp, err := c.requestHandler.Post(c.baseURL+path, data, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tps := &EventsV1Subscription{}\n\tif err := json.NewDecoder(resp.Body).Decode(ps); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps, err\n}", "func (client *ReplicationsClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *ReplicationsListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewAllowlistSubscriptionFilter(topics ...string) SubscriptionFilter {\n\tallow := make(map[string]struct{})\n\tfor _, topic := range topics {\n\t\tallow[topic] = struct{}{}\n\t}\n\n\treturn &allowlistSubscriptionFilter{allow: allow}\n}", "func (a Accessor) GetSubscriptionList(service, servicePath string, subscriptions *[]Subscription) error {\n\treturn a.access(&AccessParameter{\n\t\tEpID: EntryPointIDs.Subscriptions,\n\t\tMethod: gohttp.HttpMethods.GET,\n\t\tService: service,\n\t\tServicePath: servicePath,\n\t\tPath: \"\",\n\t\tReceivedBody: subscriptions,\n\t})\n}", "func (client *FileServicesClient) listCreateRequest(ctx context.Context, resourceGroupName string, accountName string, options *FileServicesListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2019-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (a *SubscriptionsApiService) CreateSubscriptionExecute(r ApiCreateSubscriptionRequest) (*SubscriptionResponse, *http.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = http.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tformFiles []formFile\n\t\tlocalVarReturnValue *SubscriptionResponse\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"SubscriptionsApiService.CreateSubscription\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/customers/{id}/subscription\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", url.PathEscape(parameterValueToString(r.id, \"id\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\tif r.subscriptionRequest == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"subscriptionRequest is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/vnd.conekta-v2.1.0+json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.acceptLanguage != nil {\n\t\tparameterAddToHeaderOrQuery(localVarHeaderParams, \"Accept-Language\", r.acceptLanguage, \"\")\n\t}\n\tif r.xChildCompanyId != nil {\n\t\tparameterAddToHeaderOrQuery(localVarHeaderParams, \"X-Child-Company-Id\", r.xChildCompanyId, \"\")\n\t}\n\t// body params\n\tlocalVarPostBody = r.subscriptionRequest\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := io.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = io.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 422 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\t\t\tnewErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v)\n\t\t\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\t\t\tnewErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v)\n\t\t\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\t\t\tnewErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v)\n\t\t\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\t\t\tnewErr.error = formatErrorMessage(localVarHTTPResponse.Status, &v)\n\t\t\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (client *subscriptionClient) deleteCreateRequest(ctx context.Context, topicName string, subscriptionName string, options *SubscriptionDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/{topicName}/subscriptions/{subscriptionName}\"\n\tif topicName == \"\" {\n\t\treturn nil, errors.New(\"parameter topicName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{topicName}\", url.PathEscape(topicName))\n\tif subscriptionName == \"\" {\n\t\treturn nil, errors.New(\"parameter subscriptionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionName}\", url.PathEscape(subscriptionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif client.apiVersion != nil {\n\t\treqQP.Set(\"api-version\", \"2017_04\")\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/xml, application/atom+xml\")\n\treturn req, nil\n}", "func (client *DevicesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DevicesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridNetwork/devices\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AvailabilitySetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *AvailabilitySetsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client ServicesClient) ListBySubscriptionResponder(resp *http.Response) (result ServiceResourceList, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *APIClient) listByServiceCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *APIClientListByServiceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\tif options != nil && options.Tags != nil {\n\t\treqQP.Set(\"tags\", *options.Tags)\n\t}\n\tif options != nil && options.ExpandAPIVersionSet != nil {\n\t\treqQP.Set(\"expandApiVersionSet\", strconv.FormatBool(*options.ExpandAPIVersionSet))\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SubscriptionClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, sid string, options *SubscriptionClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/subscriptions/{sid}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif sid == \"\" {\n\t\treturn nil, errors.New(\"parameter sid cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sid}\", url.PathEscape(sid))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) listCreateRequest(ctx context.Context, options *ContainerGroupsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.ContainerInstance/containerGroups\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ClustersClient) listCreateRequest(ctx context.Context, options *ClustersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (r CreateSubscriptionRequest) ToSubscription() (Subscription, error) {\n\t_, err := url.Parse(r.URL)\n\tif err != nil {\n\t\treturn Subscription{}, errors.Wrap(err, \"failed to parse subscription URL\")\n\t}\n\tif r.EventType == \"\" {\n\t\treturn Subscription{}, errors.New(\"event type is required when registering subscription\")\n\t}\n\tif r.OwnerID == \"\" {\n\t\treturn Subscription{}, errors.New(\"owner ID is required when registering subscription\")\n\t}\n\tif r.FailureThreshold < 0 || r.FailureThreshold > 72*time.Hour {\n\t\treturn Subscription{}, errors.New(\"failure threshold need to be between 0 and 72 hours\")\n\t}\n\n\treturn Subscription{\n\t\tName: r.Name,\n\t\tURL: r.URL,\n\t\tOwnerID: r.OwnerID,\n\t\tEventType: r.EventType,\n\t\tLastDeliveryStatus: SubscriptionDeliveryNone,\n\t\tLastDeliveryAttemptAt: 0,\n\t\tFailureThreshold: r.FailureThreshold,\n\t\tHeaders: r.Headers,\n\t}, nil\n}", "func (c *SubscriptionsCollectionApiController) CreateSubscription(w http.ResponseWriter, r *http.Request) { \n\tsubscriptionData := &SubscriptionData{}\n\tif err := json.NewDecoder(r.Body).Decode(&subscriptionData); err != nil {\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\t\n\tresult, err := c.service.CreateSubscription(*subscriptionData)\n\tif err != nil {\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\t\n\tEncodeJSONResponse(result, nil, w)\n}", "func (client *ExpressRoutePortsLocationsClient) listCreateRequest(ctx context.Context, options *ExpressRoutePortsLocationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *GroupClient) listByServiceCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *GroupListByServiceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualRoutersClient) listCreateRequest(ctx context.Context, options *VirtualRoutersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualRouters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SubscriptionClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, sid string, parameters SubscriptionCreateParameters, options *SubscriptionClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/subscriptions/{sid}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif sid == \"\" {\n\t\treturn nil, errors.New(\"parameter sid cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sid}\", url.PathEscape(sid))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Notify != nil {\n\t\treqQP.Set(\"notify\", strconv.FormatBool(*options.Notify))\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\tif options != nil && options.AppType != nil {\n\t\treqQP.Set(\"appType\", string(*options.AppType))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header[\"If-Match\"] = []string{*options.IfMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *OperationsClient) listCreateRequest(ctx context.Context, options *OperationsListOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.RecoveryServices/operations\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RecommendationsClient) listCreateRequest(ctx context.Context, options *RecommendationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Web/recommendations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Featured != nil {\n\t\treqQP.Set(\"featured\", strconv.FormatBool(*options.Featured))\n\t}\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *APIClient) listByTagsCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *APIClientListByTagsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apisByTags\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\tif options != nil && options.IncludeNotTaggedApis != nil {\n\t\treqQP.Set(\"includeNotTaggedApis\", strconv.FormatBool(*options.IncludeNotTaggedApis))\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *IotSecuritySolutionClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *IotSecuritySolutionClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-08-01\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RecommendationsClient) listCreateRequest(ctx context.Context, options *RecommendationsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Web/recommendations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Featured != nil {\n\t\treqQP.Set(\"featured\", strconv.FormatBool(*options.Featured))\n\t}\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client DataControllersClient) ListInSubscriptionPreparer(ctx context.Context) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2019-07-24-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/providers/Microsoft.AzureData/dataControllers\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (client *PermissionBindingsClient) listByNamespaceCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *PermissionBindingsClientListByNamespaceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/namespaces/{namespaceName}/permissionBindings\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-01-preview\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}" ]
[ "0.7839764", "0.7792721", "0.777648", "0.7766895", "0.7694845", "0.7682546", "0.76752776", "0.76353943", "0.76195145", "0.7615538", "0.75823325", "0.75491273", "0.7399178", "0.73666984", "0.7287603", "0.6893647", "0.63321304", "0.6311873", "0.6288278", "0.62622976", "0.62359613", "0.61689323", "0.61656594", "0.60961807", "0.6029196", "0.6009029", "0.59895754", "0.5974609", "0.58578", "0.5828821", "0.58222514", "0.58200955", "0.57782", "0.575699", "0.57015806", "0.5691091", "0.5658642", "0.56462383", "0.56397885", "0.56391746", "0.56388676", "0.5637929", "0.56377494", "0.5629929", "0.560915", "0.56089556", "0.5608414", "0.55644083", "0.5559027", "0.55588883", "0.5558289", "0.55549824", "0.55513084", "0.5549287", "0.553939", "0.5524817", "0.55123085", "0.54948753", "0.54817283", "0.54655385", "0.54572916", "0.54557496", "0.5438082", "0.54370546", "0.54280525", "0.5423964", "0.54192585", "0.54149777", "0.54129267", "0.54126644", "0.5403318", "0.5402231", "0.5390307", "0.53673106", "0.5365939", "0.5365732", "0.5360018", "0.5356137", "0.535181", "0.533805", "0.53307796", "0.53276885", "0.53237206", "0.5323682", "0.5319623", "0.53126395", "0.5311774", "0.5308943", "0.530518", "0.5297139", "0.52705383", "0.5268308", "0.5267612", "0.5258455", "0.5257728", "0.52565414", "0.52515924", "0.52494377", "0.5247053", "0.5241889" ]
0.7618742
9
listBySubscriptionHandleResponse handles the ListBySubscription response.
func (client *DataCollectionEndpointsClient) listBySubscriptionHandleResponse(resp *http.Response) (DataCollectionEndpointsListBySubscriptionResponse, error) { result := DataCollectionEndpointsListBySubscriptionResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResourceListResult); err != nil { return DataCollectionEndpointsListBySubscriptionResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *RedisClient) listBySubscriptionHandleResponse(resp *http.Response) (RedisListBySubscriptionResponse, error) {\n\tresult := RedisListBySubscriptionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RedisListResult); err != nil {\n\t\treturn RedisListBySubscriptionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *DataCollectionEndpointsClient) listBySubscriptionHandleResponse(resp *http.Response) (DataCollectionEndpointsClientListBySubscriptionResponse, error) {\n\tresult := DataCollectionEndpointsClientListBySubscriptionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResourceListResult); err != nil {\n\t\treturn DataCollectionEndpointsClientListBySubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DevicesClient) listBySubscriptionHandleResponse(resp *http.Response) (DevicesClientListBySubscriptionResponse, error) {\n\tresult := DevicesClientListBySubscriptionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DeviceListResult); err != nil {\n\t\treturn DevicesClientListBySubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SpatialAnchorsAccountsClient) listBySubscriptionHandleResponse(resp *http.Response) (SpatialAnchorsAccountsClientListBySubscriptionResponse, error) {\n\tresult := SpatialAnchorsAccountsClientListBySubscriptionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SpatialAnchorsAccountPage); err != nil {\n\t\treturn SpatialAnchorsAccountsClientListBySubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WorkspacesClient) listBySubscriptionHandleResponse(resp *http.Response) (WorkspacesListBySubscriptionResponse, error) {\n\tresult := WorkspacesListBySubscriptionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WorkspaceListResult); err != nil {\n\t\treturn WorkspacesListBySubscriptionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ServersClient) listBySubscriptionHandleResponse(resp *http.Response) (ServersClientListBySubscriptionResponse, error) {\n\tresult := ServersClientListBySubscriptionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerList); err != nil {\n\t\treturn ServersClientListBySubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MetricAlertsClient) listBySubscriptionHandleResponse(resp *http.Response) (MetricAlertsClientListBySubscriptionResponse, error) {\n\tresult := MetricAlertsClientListBySubscriptionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MetricAlertResourceCollection); err != nil {\n\t\treturn MetricAlertsClientListBySubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CustomAssessmentAutomationsClient) listBySubscriptionHandleResponse(resp *http.Response) (CustomAssessmentAutomationsListBySubscriptionResponse, error) {\n\tresult := CustomAssessmentAutomationsListBySubscriptionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomAssessmentAutomationsListResult); err != nil {\n\t\treturn CustomAssessmentAutomationsListBySubscriptionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *AvailabilitySetsClient) listBySubscriptionHandleResponse(resp *http.Response) (AvailabilitySetsListBySubscriptionResponse, error) {\n\tresult := AvailabilitySetsListBySubscriptionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilitySetListResult); err != nil {\n\t\treturn AvailabilitySetsListBySubscriptionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *LocalRulestacksClient) listBySubscriptionHandleResponse(resp *http.Response) (LocalRulestacksClientListBySubscriptionResponse, error) {\n\tresult := LocalRulestacksClientListBySubscriptionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LocalRulestackResourceListResult); err != nil {\n\t\treturn LocalRulestacksClientListBySubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IotSecuritySolutionClient) listBySubscriptionHandleResponse(resp *http.Response) (IotSecuritySolutionClientListBySubscriptionResponse, error) {\n\tresult := IotSecuritySolutionClientListBySubscriptionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IoTSecuritySolutionsList); err != nil {\n\t\treturn IotSecuritySolutionClientListBySubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagementAssociationsClient) listBySubscriptionHandleResponse(resp *http.Response) (ManagementAssociationsListBySubscriptionResponse, error) {\n\tresult := ManagementAssociationsListBySubscriptionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagementAssociationPropertiesList); err != nil {\n\t\treturn ManagementAssociationsListBySubscriptionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *CassandraClustersClient) listBySubscriptionHandleResponse(resp *http.Response) (CassandraClustersClientListBySubscriptionResponse, error) {\n\tresult := CassandraClustersClientListBySubscriptionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListClusters); err != nil {\n\t\treturn CassandraClustersClientListBySubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ConnectedEnvironmentsClient) listBySubscriptionHandleResponse(resp *http.Response) (ConnectedEnvironmentsClientListBySubscriptionResponse, error) {\n\tresult := ConnectedEnvironmentsClientListBySubscriptionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ConnectedEnvironmentCollection); err != nil {\n\t\treturn ConnectedEnvironmentsClientListBySubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AlertProcessingRulesClient) listBySubscriptionHandleResponse(resp *http.Response) (AlertProcessingRulesClientListBySubscriptionResponse, error) {\n\tresult := AlertProcessingRulesClientListBySubscriptionResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"x-ms-request-id\"); val != \"\" {\n\t\tresult.XMSRequestID = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AlertProcessingRulesList); err != nil {\n\t\treturn AlertProcessingRulesClientListBySubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client AccountClient) ListBySubscriptionResponder(resp *http.Response) (result AccountResourceDescriptionList, err error) {\n err = autorest.Respond(\n resp,\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }", "func (client *TriggersClient) listByShareSubscriptionHandleResponse(resp *http.Response) (TriggersClientListByShareSubscriptionResponse, error) {\n\tresult := TriggersClientListByShareSubscriptionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TriggerList); err != nil {\n\t\treturn TriggersClientListByShareSubscriptionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SubscriptionClient) listHandleResponse(resp *http.Response) (SubscriptionClientListResponse, error) {\n\tresult := SubscriptionClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SubscriptionCollection); err != nil {\n\t\treturn SubscriptionClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client ServicesClient) ListBySubscriptionResponder(resp *http.Response) (result ServiceResourceList, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client IotHubResourceClient) ListBySubscriptionResponder(resp *http.Response) (result IotHubDescriptionListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client LabClient) ListBySubscriptionResponder(resp *http.Response) (result ResponseWithContinuationLab, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *ProviderShareSubscriptionsClient) listByShareHandleResponse(resp *http.Response) (ProviderShareSubscriptionsClientListByShareResponse, error) {\n\tresult := ProviderShareSubscriptionsClientListByShareResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProviderShareSubscriptionList); err != nil {\n\t\treturn ProviderShareSubscriptionsClientListByShareResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client ServicesClient) ListBySubscription(ctx context.Context) (result ServiceResourceListPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/ServicesClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.srl.Response.Response != nil {\n\t\t\t\tsc = result.srl.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listBySubscriptionNextResults\n\treq, err := client.ListBySubscriptionPreparer(ctx)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.ServicesClient\", \"ListBySubscription\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListBySubscriptionSender(req)\n\tif err != nil {\n\t\tresult.srl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.ServicesClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.srl, err = client.ListBySubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.ServicesClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.srl.hasNextLink() && result.srl.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}", "func (client IotHubResourceClient) ListBySubscription(ctx context.Context) (result IotHubDescriptionListResultPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/IotHubResourceClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.ihdlr.Response.Response != nil {\n\t\t\t\tsc = result.ihdlr.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listBySubscriptionNextResults\n\treq, err := client.ListBySubscriptionPreparer(ctx)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"devices.IotHubResourceClient\", \"ListBySubscription\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListBySubscriptionSender(req)\n\tif err != nil {\n\t\tresult.ihdlr.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"devices.IotHubResourceClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.ihdlr, err = client.ListBySubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"devices.IotHubResourceClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.ihdlr.hasNextLink() && result.ihdlr.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}", "func (c *DefaultApiService) ListSubscription(params *ListSubscriptionParams) (*ListSubscriptionResponse, error) {\n\tpath := \"/v1/Subscriptions\"\n\n\tdata := url.Values{}\n\theaders := make(map[string]interface{})\n\n\tif params != nil && params.SinkSid != nil {\n\t\tdata.Set(\"SinkSid\", *params.SinkSid)\n\t}\n\tif params != nil && params.PageSize != nil {\n\t\tdata.Set(\"PageSize\", fmt.Sprint(*params.PageSize))\n\t}\n\n\tresp, err := c.requestHandler.Get(c.baseURL+path, data, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tps := &ListSubscriptionResponse{}\n\tif err := json.NewDecoder(resp.Body).Decode(ps); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps, err\n}", "func (client LabClient) ListBySubscription(ctx context.Context, filter string, top *int32, orderBy string) (result ResponseWithContinuationLabPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/LabClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.rwcl.Response.Response != nil {\n\t\t\t\tsc = result.rwcl.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listBySubscriptionNextResults\n\treq, err := client.ListBySubscriptionPreparer(ctx, filter, top, orderBy)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"ListBySubscription\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListBySubscriptionSender(req)\n\tif err != nil {\n\t\tresult.rwcl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.rwcl, err = client.ListBySubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (client AccountClient) ListBySubscription(ctx context.Context) (result AccountResourceDescriptionListPage, err error) {\n if tracing.IsEnabled() {\n ctx = tracing.StartSpan(ctx, fqdn + \"/AccountClient.ListBySubscription\")\n defer func() {\n sc := -1\n if result.ardl.Response.Response != nil {\n sc = result.ardl.Response.Response.StatusCode\n }\n tracing.EndSpan(ctx, sc, err)\n }()\n }\n result.fn = client.listBySubscriptionNextResults\n req, err := client.ListBySubscriptionPreparer(ctx)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.AccountClient\", \"ListBySubscription\", nil , \"Failure preparing request\")\n return\n }\n\n resp, err := client.ListBySubscriptionSender(req)\n if err != nil {\n result.ardl.Response = autorest.Response{Response: resp}\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.AccountClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n return\n }\n\n result.ardl, err = client.ListBySubscriptionResponder(resp)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.AccountClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n }\n if result.ardl.hasNextLink() && result.ardl.IsEmpty() {\n err = result.NextWithContext(ctx)\n }\n\n return\n}", "func (client AccountClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {\n return client.Send(req, azure.DoRetryWithRegistration(client.Client))\n }", "func (client LabClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tazure.DoRetryWithRegistration(client.Client))\n}", "func (client ServicesClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}", "func (client IotHubResourceClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}", "func (client *VendorSKUPreviewClient) listHandleResponse(resp *http.Response) (VendorSKUPreviewClientListResponse, error) {\n\tresult := VendorSKUPreviewClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PreviewSubscriptionsList); err != nil {\n\t\treturn VendorSKUPreviewClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PermissionsClient) listByBillingProfileHandleResponse(resp *http.Response) (PermissionsClientListByBillingProfileResponse, error) {\n\tresult := PermissionsClientListByBillingProfileResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PermissionsListResult); err != nil {\n\t\treturn PermissionsClientListByBillingProfileResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client DataControllersClient) ListInSubscriptionResponder(resp *http.Response) (result PageOfDataControllerResource, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client ProviderShareSubscriptionsClient) ListByShareResponder(resp *http.Response) (result ProviderShareSubscriptionList, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *PermissionsClient) listByBillingAccountHandleResponse(resp *http.Response) (PermissionsClientListByBillingAccountResponse, error) {\n\tresult := PermissionsClientListByBillingAccountResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PermissionsListResult); err != nil {\n\t\treturn PermissionsClientListByBillingAccountResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client AccountClient) ListBySubscriptionComplete(ctx context.Context) (result AccountResourceDescriptionListIterator, err error) {\n if tracing.IsEnabled() {\n ctx = tracing.StartSpan(ctx, fqdn + \"/AccountClient.ListBySubscription\")\n defer func() {\n sc := -1\n if result.Response().Response.Response != nil {\n sc = result.page.Response().Response.Response.StatusCode\n }\n tracing.EndSpan(ctx, sc, err)\n }()\n }\n result.page, err = client.ListBySubscription(ctx)\n return\n }", "func (client *WCFRelaysClient) listByNamespaceHandleResponse(resp *http.Response) (WCFRelaysClientListByNamespaceResponse, error) {\n\tresult := WCFRelaysClientListByNamespaceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WcfRelaysListResult); err != nil {\n\t\treturn WCFRelaysClientListByNamespaceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KpiClient) listByHubHandleResponse(resp *http.Response) (KpiClientListByHubResponse, error) {\n\tresult := KpiClientListByHubResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KpiListResult); err != nil {\n\t\treturn KpiClientListByHubResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PermissionBindingsClient) listByNamespaceHandleResponse(resp *http.Response) (PermissionBindingsClientListByNamespaceResponse, error) {\n\tresult := PermissionBindingsClientListByNamespaceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PermissionBindingsListResult); err != nil {\n\t\treturn PermissionBindingsClientListByNamespaceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client NotificationDataPlaneClient) ListSubscriptions(ctx context.Context, request ListSubscriptionsRequest) (response ListSubscriptionsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listSubscriptions, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tresponse = ListSubscriptionsResponse{RawResponse: ociResponse.HTTPResponse()}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListSubscriptionsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListSubscriptionsResponse\")\n\t}\n\treturn\n}", "func (s *API) ListSubscriptions(status SubscriptionStatus) (data SubscriptionsResponse, err error) {\n\tif status == \"\" {\n\t\tstatus = SubscriptionStatusAll\n\t}\n\tendpoint := zoho.Endpoint{\n\t\tName: \"subscriptions\",\n\t\tURL: fmt.Sprintf(\"https://subscriptions.zoho.%s/api/v1/subscriptions\", s.ZohoTLD),\n\t\tMethod: zoho.HTTPGet,\n\t\tResponseData: &SubscriptionsResponse{},\n\t\tURLParameters: map[string]zoho.Parameter{\n\t\t\t\"filter_by\": zoho.Parameter(status),\n\t\t},\n\t\tHeaders: map[string]string{\n\t\t\tZohoSubscriptionsEndpointHeader: s.OrganizationID,\n\t\t},\n\t}\n\n\terr = s.Zoho.HTTPRequest(&endpoint)\n\tif err != nil {\n\t\treturn SubscriptionsResponse{}, fmt.Errorf(\"Failed to retrieve subscriptions: %s\", err)\n\t}\n\n\tif v, ok := endpoint.ResponseData.(*SubscriptionsResponse); ok {\n\t\treturn *v, nil\n\t}\n\n\treturn SubscriptionsResponse{}, fmt.Errorf(\"Data retrieved was not 'SubscriptionsResponse'\")\n}", "func (client *AvailabilitySetsClient) ListBySubscription(options *AvailabilitySetsListBySubscriptionOptions) *AvailabilitySetsListBySubscriptionPager {\n\treturn &AvailabilitySetsListBySubscriptionPager{\n\t\tclient: client,\n\t\trequester: func(ctx context.Context) (*policy.Request, error) {\n\t\t\treturn client.listBySubscriptionCreateRequest(ctx, options)\n\t\t},\n\t\tadvancer: func(ctx context.Context, resp AvailabilitySetsListBySubscriptionResponse) (*policy.Request, error) {\n\t\t\treturn runtime.NewRequest(ctx, http.MethodGet, *resp.AvailabilitySetListResult.NextLink)\n\t\t},\n\t}\n}", "func (client NotificationDataPlaneClient) listSubscriptions(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/subscriptions\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListSubscriptionsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client *OutputsClient) listByStreamingJobHandleResponse(resp *http.Response) (OutputsClientListByStreamingJobResponse, error) {\n\tresult := OutputsClientListByStreamingJobResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.OutputListResult); err != nil {\n\t\treturn OutputsClientListByStreamingJobResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) listByStreamingJobHandleResponse(resp *http.Response) (OutputsListByStreamingJobResponse, error) {\n\tresult := OutputsListByStreamingJobResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.OutputListResult); err != nil {\n\t\treturn OutputsListByStreamingJobResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *RoleAssignmentsClient) listByBillingProfileHandleResponse(resp *http.Response) (RoleAssignmentsClientListByBillingProfileResponse, error) {\n\tresult := RoleAssignmentsClientListByBillingProfileResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RoleAssignmentListResult); err != nil {\n\t\treturn RoleAssignmentsClientListByBillingProfileResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RoleAssignmentsClient) listByBillingProfileHandleResponse(resp *http.Response) (RoleAssignmentsClientListByBillingProfileResponse, error) {\n\tresult := RoleAssignmentsClientListByBillingProfileResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RoleAssignmentListResult); err != nil {\n\t\treturn RoleAssignmentsClientListByBillingProfileResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client LabClient) ListBySubscriptionComplete(ctx context.Context, filter string, top *int32, orderBy string) (result ResponseWithContinuationLabIterator, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/LabClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response().Response.Response != nil {\n\t\t\t\tsc = result.page.Response().Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.page, err = client.ListBySubscription(ctx, filter, top, orderBy)\n\treturn\n}", "func (client *SubscriptionClient) listSecretsHandleResponse(resp *http.Response) (SubscriptionClientListSecretsResponse, error) {\n\tresult := SubscriptionClientListSecretsResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SubscriptionKeysContract); err != nil {\n\t\treturn SubscriptionClientListSecretsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SchemaRegistryClient) listByNamespaceHandleResponse(resp *http.Response) (SchemaRegistryClientListByNamespaceResponse, error) {\n\tresult := SchemaRegistryClientListByNamespaceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SchemaGroupListResult); err != nil {\n\t\treturn SchemaRegistryClientListByNamespaceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client ServicesClient) ListBySubscriptionComplete(ctx context.Context) (result ServiceResourceListIterator, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/ServicesClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response().Response.Response != nil {\n\t\t\t\tsc = result.page.Response().Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.page, err = client.ListBySubscription(ctx)\n\treturn\n}", "func (client *InteractionsClient) listByHubHandleResponse(resp *http.Response) (InteractionsClientListByHubResponse, error) {\n\tresult := InteractionsClientListByHubResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.InteractionListResult); err != nil {\n\t\treturn InteractionsClientListByHubResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client IotHubResourceClient) ListBySubscriptionComplete(ctx context.Context) (result IotHubDescriptionListResultIterator, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/IotHubResourceClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response().Response.Response != nil {\n\t\t\t\tsc = result.page.Response().Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.page, err = client.ListBySubscription(ctx)\n\treturn\n}", "func (client *PermissionsClient) listByCustomerHandleResponse(resp *http.Response) (PermissionsClientListByCustomerResponse, error) {\n\tresult := PermissionsClientListByCustomerResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PermissionsListResult); err != nil {\n\t\treturn PermissionsClientListByCustomerResponse{}, err\n\t}\n\treturn result, nil\n}", "func (ss *SubscriptionsService) List(ctx context.Context, cID string, opts *SubscriptionListOptions) (\n\tres *Response,\n\tsl *SubscriptionList,\n\terr error,\n) {\n\tu := fmt.Sprintf(\"v2/customers/%s/subscriptions\", cID)\n\n\tres, err = ss.list(ctx, u, opts)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif err = json.Unmarshal(res.content, &sl); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (client *RoleAssignmentsClient) listByBillingAccountHandleResponse(resp *http.Response) (RoleAssignmentsClientListByBillingAccountResponse, error) {\n\tresult := RoleAssignmentsClientListByBillingAccountResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RoleAssignmentListResult); err != nil {\n\t\treturn RoleAssignmentsClientListByBillingAccountResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ProviderShareSubscriptionsClient) getByShareHandleResponse(resp *http.Response) (ProviderShareSubscriptionsClientGetByShareResponse, error) {\n\tresult := ProviderShareSubscriptionsClientGetByShareResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProviderShareSubscription); err != nil {\n\t\treturn ProviderShareSubscriptionsClientGetByShareResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ClientImpl) ListSubscriptions(ctx context.Context, args ListSubscriptionsArgs) (*[]Subscription, error) {\n\tqueryParams := url.Values{}\n\tif args.PublisherId != nil {\n\t\tqueryParams.Add(\"publisherId\", *args.PublisherId)\n\t}\n\tif args.EventType != nil {\n\t\tqueryParams.Add(\"eventType\", *args.EventType)\n\t}\n\tif args.ConsumerId != nil {\n\t\tqueryParams.Add(\"consumerId\", *args.ConsumerId)\n\t}\n\tif args.ConsumerActionId != nil {\n\t\tqueryParams.Add(\"consumerActionId\", *args.ConsumerActionId)\n\t}\n\tlocationId, _ := uuid.Parse(\"fc50d02a-849f-41fb-8af1-0a5216103269\")\n\tresp, err := client.Client.Send(ctx, http.MethodGet, locationId, \"7.1-preview.1\", nil, queryParams, nil, \"\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue []Subscription\n\terr = client.Client.UnmarshalCollectionBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func (client *ReplicationsClient) listHandleResponse(resp *azcore.Response) (ReplicationListResultResponse, error) {\n\tvar val *ReplicationListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn ReplicationListResultResponse{}, err\n\t}\n\treturn ReplicationListResultResponse{RawResponse: resp.Response, ReplicationListResult: val}, nil\n}", "func (client *RoleAssignmentsClient) listByBillingAccountHandleResponse(resp *http.Response) (RoleAssignmentsClientListByBillingAccountResponse, error) {\n\tresult := RoleAssignmentsClientListByBillingAccountResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RoleAssignmentListResult); err != nil {\n\t\treturn RoleAssignmentsClientListByBillingAccountResponse{}, err\n\t}\n\treturn result, nil\n}", "func (o *GetAllSubscriptionsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = models.SubscriptionList{}\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (a Accessor) GetSubscriptionList(service, servicePath string, subscriptions *[]Subscription) error {\n\treturn a.access(&AccessParameter{\n\t\tEpID: EntryPointIDs.Subscriptions,\n\t\tMethod: gohttp.HttpMethods.GET,\n\t\tService: service,\n\t\tServicePath: servicePath,\n\t\tPath: \"\",\n\t\tReceivedBody: subscriptions,\n\t})\n}", "func (d *DatastoreSubscription) List() ([]*Subscription, error) {\n\treturn d.collectByField(func(s *Subscription) bool {\n\t\treturn true\n\t})\n}", "func (client *ReplicationsClient) listHandleResponse(resp *http.Response) (ReplicationsClientListResponse, error) {\n\tresult := ReplicationsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ReplicationListResult); err != nil {\n\t\treturn ReplicationsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listHybridConnectionsSlotHandleResponse(resp *http.Response) (WebAppsListHybridConnectionsSlotResponse, error) {\n\tresult := WebAppsListHybridConnectionsSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.HybridConnection); err != nil {\n\t\treturn WebAppsListHybridConnectionsSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func UnmarshalSubscriptionList(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(SubscriptionList)\n\terr = core.UnmarshalPrimitive(m, \"total_count\", &obj.TotalCount)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"offset\", &obj.Offset)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"limit\", &obj.Limit)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"subscriptions\", &obj.Subscriptions, UnmarshalSubscriptionListItem)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"first\", &obj.First, UnmarshalPageHrefResponse)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"previous\", &obj.Previous, UnmarshalPageHrefResponse)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"next\", &obj.Next, UnmarshalPageHrefResponse)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (client *subscriptionClient) getHandleResponse(resp *http.Response) (SubscriptionGetResponse, error) {\n\tresult := SubscriptionGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsXML(resp, &result.Object); err != nil {\n\t\treturn SubscriptionGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ProductPolicyClient) listByProductHandleResponse(resp *http.Response) (ProductPolicyClientListByProductResponse, error) {\n\tresult := ProductPolicyClientListByProductResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyCollection); err != nil {\n\t\treturn ProductPolicyClientListByProductResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *APIClient) listByServiceHandleResponse(resp *http.Response) (APIClientListByServiceResponse, error) {\n\tresult := APIClientListByServiceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.APICollection); err != nil {\n\t\treturn APIClientListByServiceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RegistrationDefinitionsClient) listHandleResponse(resp *http.Response) (RegistrationDefinitionsClientListResponse, error) {\n\tresult := RegistrationDefinitionsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RegistrationDefinitionList); err != nil {\n\t\treturn RegistrationDefinitionsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *NotificationRecipientEmailClient) listByNotificationHandleResponse(resp *http.Response) (NotificationRecipientEmailClientListByNotificationResponse, error) {\n\tresult := NotificationRecipientEmailClientListByNotificationResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RecipientEmailCollection); err != nil {\n\t\treturn NotificationRecipientEmailClientListByNotificationResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *GroupClient) listByServiceHandleResponse(resp *http.Response) (GroupListByServiceResponse, error) {\n\tresult := GroupListByServiceResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GroupCollection); err != nil {\n\t\treturn GroupListByServiceResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *HardwareComponentGroupsClient) listByDeviceHandleResponse(resp *http.Response) (HardwareComponentGroupsClientListByDeviceResponse, error) {\n\tresult := HardwareComponentGroupsClientListByDeviceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.HardwareComponentGroupList); err != nil {\n\t\treturn HardwareComponentGroupsClientListByDeviceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client DataControllersClient) ListInSubscription(ctx context.Context) (result PageOfDataControllerResourcePage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/DataControllersClient.ListInSubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.podcr.Response.Response != nil {\n\t\t\t\tsc = result.podcr.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listInSubscriptionNextResults\n\treq, err := client.ListInSubscriptionPreparer(ctx)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"azuredata.DataControllersClient\", \"ListInSubscription\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListInSubscriptionSender(req)\n\tif err != nil {\n\t\tresult.podcr.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"azuredata.DataControllersClient\", \"ListInSubscription\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.podcr, err = client.ListInSubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"azuredata.DataControllersClient\", \"ListInSubscription\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.podcr.hasNextLink() && result.podcr.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}", "func (client *DicomServicesClient) listByWorkspaceHandleResponse(resp *http.Response) (DicomServicesClientListByWorkspaceResponse, error) {\n\tresult := DicomServicesClientListByWorkspaceResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DicomServiceCollection); err != nil {\n\t\treturn DicomServicesClientListByWorkspaceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationvCentersClient) listByReplicationFabricsHandleResponse(resp *http.Response) (ReplicationvCentersClientListByReplicationFabricsResponse, error) {\n\tresult := ReplicationvCentersClientListByReplicationFabricsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VCenterCollection); err != nil {\n\t\treturn ReplicationvCentersClientListByReplicationFabricsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AccountsClient) listHandleResponse(resp *http.Response) (AccountsClientListResponse, error) {\n\tresult := AccountsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AccountListResult); err != nil {\n\t\treturn AccountsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listRelayServiceConnectionsSlotHandleResponse(resp *http.Response) (WebAppsListRelayServiceConnectionsSlotResponse, error) {\n\tresult := WebAppsListRelayServiceConnectionsSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RelayServiceConnectionEntity); err != nil {\n\t\treturn WebAppsListRelayServiceConnectionsSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *PortalConfigClient) listByServiceHandleResponse(resp *http.Response) (PortalConfigClientListByServiceResponse, error) {\n\tresult := PortalConfigClientListByServiceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PortalConfigCollection); err != nil {\n\t\treturn PortalConfigClientListByServiceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebhooksClient) listHandleResponse(resp *http.Response) (WebhooksClientListResponse, error) {\n\tresult := WebhooksClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebhookListResult); err != nil {\n\t\treturn WebhooksClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ChargesClient) listHandleResponse(resp *http.Response) (ChargesClientListResponse, error) {\n\tresult := ChargesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ChargesListResult); err != nil {\n\t\treturn ChargesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client DataControllersClient) ListInSubscriptionSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}", "func (client *LiveOutputsClient) listHandleResponse(resp *http.Response) (LiveOutputsClientListResponse, error) {\n\tresult := LiveOutputsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LiveOutputListResult); err != nil {\n\t\treturn LiveOutputsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client IdentityClient) ListRegionSubscriptions(ctx context.Context, request ListRegionSubscriptionsRequest) (response ListRegionSubscriptionsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listRegionSubscriptions, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListRegionSubscriptionsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListRegionSubscriptionsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListRegionSubscriptionsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListRegionSubscriptionsResponse\")\n\t}\n\treturn\n}", "func (client *WebAppsClient) listPublicCertificatesSlotHandleResponse(resp *http.Response) (WebAppsListPublicCertificatesSlotResponse, error) {\n\tresult := WebAppsListPublicCertificatesSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PublicCertificateCollection); err != nil {\n\t\treturn WebAppsListPublicCertificatesSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VpnLinkConnectionsClient) listByVpnConnectionHandleResponse(resp *azcore.Response) (ListVpnSiteLinkConnectionsResultResponse, error) {\n\tvar val *ListVpnSiteLinkConnectionsResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn ListVpnSiteLinkConnectionsResultResponse{}, err\n\t}\n\treturn ListVpnSiteLinkConnectionsResultResponse{RawResponse: resp.Response, ListVpnSiteLinkConnectionsResult: val}, nil\n}", "func (client *ReplicationProtectionContainersClient) listByReplicationFabricsHandleResponse(resp *http.Response) (ReplicationProtectionContainersClientListByReplicationFabricsResponse, error) {\n\tresult := ReplicationProtectionContainersClientListByReplicationFabricsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProtectionContainerCollection); err != nil {\n\t\treturn ReplicationProtectionContainersClientListByReplicationFabricsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SubscriptionClient) getHandleResponse(resp *http.Response) (SubscriptionClientGetResponse, error) {\n\tresult := SubscriptionClientGetResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SubscriptionContract); err != nil {\n\t\treturn SubscriptionClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CustomDomainsClient) listHandleResponse(resp *http.Response) (CustomDomainsListResponse, error) {\n\tresult := CustomDomainsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomDomainResourceCollection); err != nil {\n\t\treturn CustomDomainsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *CompliancesClient) listHandleResponse(resp *http.Response) (CompliancesClientListResponse, error) {\n\tresult := CompliancesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ComplianceList); err != nil {\n\t\treturn CompliancesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (r *ProjectsLocationsDataExchangesListingsService) ListSubscriptions(resource string) *ProjectsLocationsDataExchangesListingsListSubscriptionsCall {\n\tc := &ProjectsLocationsDataExchangesListingsListSubscriptionsCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.resource = resource\n\treturn c\n}", "func (client ProviderShareSubscriptionsClient) ListByShareSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}", "func (client *AnalysisResultsClient) listHandleResponse(resp *http.Response) (AnalysisResultsClientListResponse, error) {\n\tresult := AnalysisResultsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AnalysisResultListResult); err != nil {\n\t\treturn AnalysisResultsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *APIClient) listByTagsHandleResponse(resp *http.Response) (APIClientListByTagsResponse, error) {\n\tresult := APIClientListByTagsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TagResourceCollection); err != nil {\n\t\treturn APIClientListByTagsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (p *Proxy) List() (data [][]string) {\n\tlumber.Trace(\"Proxy listing subscriptions...\")\n\tp.RLock()\n\tdata = p.subscriptions.ToSlice()\n\tp.RUnlock()\n\n\treturn\n}", "func (client *WebAppsClient) listWebJobsSlotHandleResponse(resp *http.Response) (WebAppsListWebJobsSlotResponse, error) {\n\tresult := WebAppsListWebJobsSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebJobCollection); err != nil {\n\t\treturn WebAppsListWebJobsSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listBasicPublishingCredentialsPoliciesSlotHandleResponse(resp *http.Response) (WebAppsListBasicPublishingCredentialsPoliciesSlotResponse, error) {\n\tresult := WebAppsListBasicPublishingCredentialsPoliciesSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PublishingCredentialsPoliciesCollection); err != nil {\n\t\treturn WebAppsListBasicPublishingCredentialsPoliciesSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *AccountsClient) listSKUsHandleResponse(resp *http.Response) (AccountsClientListSKUsResponse, error) {\n\tresult := AccountsClientListSKUsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AccountSKUListResult); err != nil {\n\t\treturn AccountsClientListSKUsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationvCentersClient) listHandleResponse(resp *http.Response) (ReplicationvCentersClientListResponse, error) {\n\tresult := ReplicationvCentersClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VCenterCollection); err != nil {\n\t\treturn ReplicationvCentersClientListResponse{}, err\n\t}\n\treturn result, nil\n}" ]
[ "0.83168864", "0.8227859", "0.81885844", "0.81504506", "0.81241256", "0.81087255", "0.809703", "0.8004202", "0.79758334", "0.79476243", "0.7904227", "0.771174", "0.76578695", "0.7624304", "0.7593285", "0.75220805", "0.74099624", "0.7322869", "0.73116237", "0.7243065", "0.70872605", "0.69773", "0.6870088", "0.685828", "0.6819468", "0.680039", "0.67371535", "0.6694174", "0.6685893", "0.66402906", "0.658273", "0.65754855", "0.6389965", "0.6314984", "0.624315", "0.6230946", "0.6225418", "0.61957043", "0.61902225", "0.6170078", "0.6166001", "0.61590093", "0.6158858", "0.61366403", "0.6109965", "0.60313", "0.603126", "0.6021949", "0.5976554", "0.5959777", "0.5951298", "0.5934279", "0.59334075", "0.5925338", "0.59096843", "0.58972335", "0.58581275", "0.5855637", "0.58533734", "0.58514977", "0.58468246", "0.58335173", "0.5824375", "0.58156157", "0.57803017", "0.5778849", "0.5764164", "0.5757568", "0.5747066", "0.5745535", "0.57236946", "0.56864893", "0.5684645", "0.5653464", "0.56485367", "0.56177855", "0.5616872", "0.56167233", "0.559794", "0.55928075", "0.55924916", "0.5589153", "0.5588475", "0.55868834", "0.5581491", "0.55785966", "0.55748004", "0.55694795", "0.556523", "0.556331", "0.556122", "0.5545853", "0.5543168", "0.5539117", "0.5515161", "0.5514001", "0.550871", "0.55054474", "0.54965156", "0.54954106" ]
0.82509714
1
updateCreateRequest creates the Update request.
func (client *DataCollectionEndpointsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, dataCollectionEndpointName string, options *DataCollectionEndpointsUpdateOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints/{dataCollectionEndpointName}" if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) if resourceGroupName == "" { return nil, errors.New("parameter resourceGroupName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) if dataCollectionEndpointName == "" { return nil, errors.New("parameter dataCollectionEndpointName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{dataCollectionEndpointName}", url.PathEscape(dataCollectionEndpointName)) req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-04-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") if options != nil && options.Body != nil { return req, runtime.MarshalAsJSON(req, *options.Body) } return req, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *ServersClient) updateCreateRequest(ctx context.Context, resourceGroupName string, serverName string, parameters ServerUpdate, options *ServersClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *AlertsClient) updateCreateRequest(ctx context.Context, scope string, alertID string, parameters Alert, options *AlertsClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleManagementAlerts/{alertId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\turlPath = strings.ReplaceAll(urlPath, \"{alertId}\", alertID)\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *FactoriesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, factoryUpdateParameters FactoryUpdateParameters, options *FactoriesClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, factoryUpdateParameters)\n}", "func (client *CloudServicesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters CloudServiceUpdate, options *CloudServicesClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif cloudServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter cloudServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cloudServiceName}\", url.PathEscape(cloudServiceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *AccountsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, accountName string, account Account, options *AccountsClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, account)\n}", "func (client *WorkspacesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, parameters WorkspaceUpdate, options *WorkspacesBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *CertificateOrdersClient) updateCreateRequest(ctx context.Context, resourceGroupName string, certificateOrderName string, certificateDistinguishedName CertificateOrderPatchResource, options *CertificateOrdersClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif certificateOrderName == \"\" {\n\t\treturn nil, errors.New(\"parameter certificateOrderName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{certificateOrderName}\", url.PathEscape(certificateOrderName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, certificateDistinguishedName)\n}", "func (client *MonitorsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, options *MonitorsClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\tif options != nil && options.Body != nil {\n\t\treturn req, runtime.MarshalAsJSON(req, *options.Body)\n\t}\n\treturn req, nil\n}", "func (client *SQLVirtualMachinesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineName string, parameters Update, options *SQLVirtualMachinesClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/{sqlVirtualMachineName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineName}\", url.PathEscape(sqlVirtualMachineName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *ServersClient) updateCreateRequest(ctx context.Context, resourceGroup string, fluidRelayServerName string, resource ServerUpdate, options *ServersClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{fluidRelayServerName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroup == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroup cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroup}\", url.PathEscape(resourceGroup))\n\tif fluidRelayServerName == \"\" {\n\t\treturn nil, errors.New(\"parameter fluidRelayServerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{fluidRelayServerName}\", url.PathEscape(fluidRelayServerName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, resource)\n}", "func (client *ClustersClient) updateCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, parameters ClusterForUpdate, options *ClustersClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2/{clusterName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *AlertProcessingRulesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, alertProcessingRuleName string, alertProcessingRulePatch PatchObject, options *AlertProcessingRulesClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AlertsManagement/actionRules/{alertProcessingRuleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif alertProcessingRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter alertProcessingRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{alertProcessingRuleName}\", url.PathEscape(alertProcessingRuleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, alertProcessingRulePatch)\n}", "func (client *GroupClient) updateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, groupID string, ifMatch string, parameters GroupUpdateParameters, options *GroupUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif groupID == \"\" {\n\t\treturn nil, errors.New(\"parameter groupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"If-Match\", ifMatch)\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *WebAppsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, name string, siteEnvelope SitePatchResource, options *WebAppsUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, siteEnvelope)\n}", "func (client *APIClient) updateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, apiID string, ifMatch string, parameters APIUpdateContract, options *APIClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif apiID == \"\" {\n\t\treturn nil, errors.New(\"parameter apiID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{apiId}\", url.PathEscape(apiID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"If-Match\"] = []string{ifMatch}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *IotSecuritySolutionClient) updateCreateRequest(ctx context.Context, resourceGroupName string, solutionName string, updateIotSecuritySolutionData UpdateIotSecuritySolutionData, options *IotSecuritySolutionClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif solutionName == \"\" {\n\t\treturn nil, errors.New(\"parameter solutionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{solutionName}\", url.PathEscape(solutionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, updateIotSecuritySolutionData)\n}", "func (client *CapacityReservationsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservationUpdate, options *CapacityReservationsBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif capacityReservationGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationGroupName}\", url.PathEscape(capacityReservationGroupName))\n\tif capacityReservationName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationName}\", url.PathEscape(capacityReservationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *VirtualMachinesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, virtualMachineName string, body VirtualMachineUpdate, options *VirtualMachinesClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{virtualMachineName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif virtualMachineName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualMachineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualMachineName}\", url.PathEscape(virtualMachineName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, body)\n}", "func (client *VirtualMachineImageTemplatesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, imageTemplateName string, parameters ImageTemplateUpdateParameters, options *VirtualMachineImageTemplatesClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imageTemplates/{imageTemplateName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif imageTemplateName == \"\" {\n\t\treturn nil, errors.New(\"parameter imageTemplateName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{imageTemplateName}\", url.PathEscape(imageTemplateName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *WebhooksClient) updateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, webhookName string, webhookUpdateParameters WebhookUpdateParameters, options *WebhooksClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif webhookName == \"\" {\n\t\treturn nil, errors.New(\"parameter webhookName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{webhookName}\", url.PathEscape(webhookName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, webhookUpdateParameters)\n}", "func (client *LocalRulestacksClient) updateCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, properties LocalRulestackResourceUpdate, options *LocalRulestacksClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif localRulestackName == \"\" {\n\t\treturn nil, errors.New(\"parameter localRulestackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{localRulestackName}\", url.PathEscape(localRulestackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, properties)\n}", "func (client *CassandraClustersClient) updateCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, body ClusterResource, options *CassandraClustersClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, body)\n}", "func (client *ApplyUpdatesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, providerName string, resourceType string, resourceName string, options *ApplyUpdatesClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/applyUpdates/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif providerName == \"\" {\n\t\treturn nil, errors.New(\"parameter providerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{providerName}\", url.PathEscape(providerName))\n\tif resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(resourceType))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ManagedInstancesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, parameters ManagedInstanceUpdate, options *ManagedInstancesClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *ConnectedEnvironmentsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, connectedEnvironmentName string, options *ConnectedEnvironmentsClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.App/connectedEnvironments/{connectedEnvironmentName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif connectedEnvironmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter connectedEnvironmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{connectedEnvironmentName}\", url.PathEscape(connectedEnvironmentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ClustersClient) updateCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, parameters ClusterPatch, options *ClustersUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/clusters/{clusterName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *SapMonitorsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, sapMonitorName string, tagsParameter Tags, options *SapMonitorsClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/sapMonitors/{sapMonitorName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sapMonitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter sapMonitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sapMonitorName}\", url.PathEscape(sapMonitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-02-07-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, tagsParameter)\n}", "func (client *KeyVaultClient) updateKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyUpdateParameters, options *KeyVaultClientUpdateKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\tif keyVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *SubscriptionClient) updateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, sid string, ifMatch string, parameters SubscriptionUpdateParameters, options *SubscriptionClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/subscriptions/{sid}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif sid == \"\" {\n\t\treturn nil, errors.New(\"parameter sid cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sid}\", url.PathEscape(sid))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Notify != nil {\n\t\treqQP.Set(\"notify\", strconv.FormatBool(*options.Notify))\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\tif options != nil && options.AppType != nil {\n\t\treqQP.Set(\"appType\", string(*options.AppType))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"If-Match\"] = []string{ifMatch}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *AgentsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, storageMoverName string, agentName string, agent AgentUpdateParameters, options *AgentsClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageMover/storageMovers/{storageMoverName}/agents/{agentName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif storageMoverName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageMoverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storageMoverName}\", url.PathEscape(storageMoverName))\n\tif agentName == \"\" {\n\t\treturn nil, errors.New(\"parameter agentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{agentName}\", url.PathEscape(agentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, agent)\n}", "func (client *PortalConfigClient) updateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, portalConfigID string, ifMatch string, parameters PortalConfigContract, options *PortalConfigClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalconfigs/{portalConfigId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif portalConfigID == \"\" {\n\t\treturn nil, errors.New(\"parameter portalConfigID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{portalConfigId}\", url.PathEscape(portalConfigID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"If-Match\"] = []string{ifMatch}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *TablesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, tableName string, parameters Table, options *TablesClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif tableName == \"\" {\n\t\treturn nil, errors.New(\"parameter tableName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{tableName}\", url.PathEscape(tableName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *ReplicationvCentersClient) updateCreateRequest(ctx context.Context, fabricName string, vcenterName string, updateVCenterRequest UpdateVCenterRequest, options *ReplicationvCentersClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationvCenters/{vcenterName}\"\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif client.resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(client.resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif fabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter fabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{fabricName}\", url.PathEscape(fabricName))\n\tif vcenterName == \"\" {\n\t\treturn nil, errors.New(\"parameter vcenterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vcenterName}\", url.PathEscape(vcenterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, updateVCenterRequest)\n}", "func (client *NetworkToNetworkInterconnectsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, networkFabricName string, networkToNetworkInterconnectName string, body NetworkToNetworkInterconnectPatch, options *NetworkToNetworkInterconnectsClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/networkFabrics/{networkFabricName}/networkToNetworkInterconnects/{networkToNetworkInterconnectName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif networkFabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkFabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkFabricName}\", url.PathEscape(networkFabricName))\n\tif networkToNetworkInterconnectName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkToNetworkInterconnectName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkToNetworkInterconnectName}\", url.PathEscape(networkToNetworkInterconnectName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, body)\n}", "func (client *KeyVaultClient) updateKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyUpdateParameters, options *KeyVaultClientUpdateKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\t// if keyVersion == \"\" {\n\t// \treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t// }\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *ContainerGroupsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, containerGroupName string, resource Resource, options *ContainerGroupsClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif containerGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerGroupName}\", url.PathEscape(containerGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, resource)\n}", "func (client *SyncGroupsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, syncGroupName string, parameters SyncGroup, options *SyncGroupsClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif syncGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter syncGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{syncGroupName}\", url.PathEscape(syncGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *CassandraClustersClient) createUpdateCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, body ClusterResource, options *CassandraClustersClientBeginCreateUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, body)\n}", "func (client *MetricAlertsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, ruleName string, parameters MetricAlertResourcePatch, options *MetricAlertsClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ruleName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleName}\", url.PathEscape(ruleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *AvailabilitySetsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySetUpdate, options *AvailabilitySetsUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif availabilitySetName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilitySetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilitySetName}\", url.PathEscape(availabilitySetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *DiskEncryptionSetsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate, options *DiskEncryptionSetsBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskEncryptionSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskEncryptionSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskEncryptionSetName}\", url.PathEscape(diskEncryptionSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, diskEncryptionSet)\n}", "func (client *SpatialAnchorsAccountsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, accountName string, spatialAnchorsAccount SpatialAnchorsAccount, options *SpatialAnchorsAccountsClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MixedReality/spatialAnchorsAccounts/{accountName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, spatialAnchorsAccount)\n}", "func (client *IscsiTargetsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, diskPoolName string, iscsiTargetName string, iscsiTargetUpdatePayload IscsiTargetUpdate, options *IscsiTargetsClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}/iscsiTargets/{iscsiTargetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskPoolName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskPoolName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskPoolName}\", url.PathEscape(diskPoolName))\n\tif iscsiTargetName == \"\" {\n\t\treturn nil, errors.New(\"parameter iscsiTargetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{iscsiTargetName}\", url.PathEscape(iscsiTargetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, iscsiTargetUpdatePayload)\n}", "func (client *AFDOriginsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, profileName string, originGroupName string, originName string, originUpdateProperties AFDOriginUpdateParameters, options *AFDOriginsClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/originGroups/{originGroupName}/origins/{originName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif profileName == \"\" {\n\t\treturn nil, errors.New(\"parameter profileName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{profileName}\", url.PathEscape(profileName))\n\tif originGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter originGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{originGroupName}\", url.PathEscape(originGroupName))\n\tif originName == \"\" {\n\t\treturn nil, errors.New(\"parameter originName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{originName}\", url.PathEscape(originName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, originUpdateProperties)\n}", "func (client *RedisClient) updateCreateRequest(ctx context.Context, resourceGroupName string, name string, parameters RedisUpdateParameters, options *RedisUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{name}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *DataCollectionEndpointsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, dataCollectionEndpointName string, options *DataCollectionEndpointsClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints/{dataCollectionEndpointName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dataCollectionEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter dataCollectionEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dataCollectionEndpointName}\", url.PathEscape(dataCollectionEndpointName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\tif options != nil && options.Body != nil {\n\t\treturn req, runtime.MarshalAsJSON(req, *options.Body)\n\t}\n\treturn req, nil\n}", "func (client *DicomServicesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, dicomServiceName string, workspaceName string, dicomservicePatchResource DicomServicePatchResource, options *DicomServicesClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/workspaces/{workspaceName}/dicomservices/{dicomServiceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif dicomServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter dicomServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dicomServiceName}\", url.PathEscape(dicomServiceName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, dicomservicePatchResource)\n}", "func (client *IntegrationRuntimeNodesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, integrationRuntimeName string, nodeName string, updateIntegrationRuntimeNodeRequest UpdateIntegrationRuntimeNodeRequest, options *IntegrationRuntimeNodesClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/integrationRuntimes/{integrationRuntimeName}/nodes/{nodeName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\tif integrationRuntimeName == \"\" {\n\t\treturn nil, errors.New(\"parameter integrationRuntimeName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{integrationRuntimeName}\", url.PathEscape(integrationRuntimeName))\n\tif nodeName == \"\" {\n\t\treturn nil, errors.New(\"parameter nodeName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{nodeName}\", url.PathEscape(nodeName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, updateIntegrationRuntimeNodeRequest)\n}", "func (client *AgentPoolsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, kubernetesClusterName string, agentPoolName string, agentPoolUpdateParameters AgentPoolPatchParameters, options *AgentPoolsClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/kubernetesClusters/{kubernetesClusterName}/agentPools/{agentPoolName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif kubernetesClusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter kubernetesClusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{kubernetesClusterName}\", url.PathEscape(kubernetesClusterName))\n\tif agentPoolName == \"\" {\n\t\treturn nil, errors.New(\"parameter agentPoolName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{agentPoolName}\", url.PathEscape(agentPoolName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, agentPoolUpdateParameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *ReplicationsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, replicationName string, replicationUpdateParameters ReplicationUpdateParameters, options *ReplicationsClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif replicationName == \"\" {\n\t\treturn nil, errors.New(\"parameter replicationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{replicationName}\", url.PathEscape(replicationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, replicationUpdateParameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *UserMetricsKeysClient) createOrUpdateCreateRequest(ctx context.Context, options *UserMetricsKeysClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/trafficManagerUserMetricsKeys/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *OutputsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, jobName string, outputName string, output Output, options *OutputsClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/outputs/{outputName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif jobName == \"\" {\n\t\treturn nil, errors.New(\"parameter jobName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{jobName}\", url.PathEscape(jobName))\n\tif outputName == \"\" {\n\t\treturn nil, errors.New(\"parameter outputName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{outputName}\", url.PathEscape(outputName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header[\"If-Match\"] = []string{*options.IfMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, output)\n}", "func (client *OutputsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, jobName string, outputName string, output Output, options *OutputsUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/outputs/{outputName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif jobName == \"\" {\n\t\treturn nil, errors.New(\"parameter jobName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{jobName}\", url.PathEscape(jobName))\n\tif outputName == \"\" {\n\t\treturn nil, errors.New(\"parameter outputName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{outputName}\", url.PathEscape(outputName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, output)\n}", "func (client *CapacitiesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, dedicatedCapacityName string, capacityUpdateParameters DedicatedCapacityUpdateParameters, options *CapacitiesClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/capacities/{dedicatedCapacityName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dedicatedCapacityName == \"\" {\n\t\treturn nil, errors.New(\"parameter dedicatedCapacityName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dedicatedCapacityName}\", url.PathEscape(dedicatedCapacityName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-01-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, capacityUpdateParameters)\n}", "func CreateUpdateK8sApplicationConfigRequest() (request *UpdateK8sApplicationConfigRequest) {\n\trequest = &UpdateK8sApplicationConfigRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Edas\", \"2017-08-01\", \"UpdateK8sApplicationConfig\", \"/pop/v5/k8s/acs/k8s_app_configuration\", \"Edas\", \"openAPI\")\n\trequest.Method = requests.PUT\n\treturn\n}", "func (client *DedicatedHostsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHostUpdate, options *DedicatedHostsBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hostGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter hostGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\tif hostName == \"\" {\n\t\treturn nil, errors.New(\"parameter hostName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hostName}\", url.PathEscape(hostName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *DedicatedHostsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHostUpdate, options *DedicatedHostsBeginUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{hostName}\", url.PathEscape(hostName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodPatch, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(parameters)\n}", "func (client *VirtualMachineScaleSetsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSetUpdate, options *VirtualMachineScaleSetsBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *MonitoringSettingsClient) updatePutCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, monitoringSettingResource MonitoringSettingResource, options *MonitoringSettingsClientBeginUpdatePutOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, monitoringSettingResource)\n}", "func CreateUpdateIntegrationRequest() (request *UpdateIntegrationRequest) {\n\trequest = &UpdateIntegrationRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ARMS\", \"2019-08-08\", \"UpdateIntegration\", \"arms\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *VideosClient) updateCreateRequest(ctx context.Context, resourceGroupName string, accountName string, videoName string, parameters VideoEntity, options *VideosClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos/{videoName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif videoName == \"\" {\n\t\treturn nil, errors.New(\"parameter videoName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{videoName}\", url.PathEscape(videoName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *GalleryImagesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImageUpdate, options *GalleryImagesClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif galleryName == \"\" {\n\t\treturn nil, errors.New(\"parameter galleryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryName}\", url.PathEscape(galleryName))\n\tif galleryImageName == \"\" {\n\t\treturn nil, errors.New(\"parameter galleryImageName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryImageName}\", url.PathEscape(galleryImageName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, galleryImage)\n}", "func (client *ManagedDatabasesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, parameters ManagedDatabaseUpdate, options *ManagedDatabasesClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *TaskRunsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, taskRunName string, updateParameters TaskRunUpdateParameters, options *TaskRunsClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns/{taskRunName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif taskRunName == \"\" {\n\t\treturn nil, errors.New(\"parameter taskRunName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{taskRunName}\", url.PathEscape(taskRunName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, updateParameters)\n}", "func (client *ReplicationsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, registryName string, replicationName string, replicationUpdateParameters ReplicationUpdateParameters, options *ReplicationsBeginUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif replicationName == \"\" {\n\t\treturn nil, errors.New(\"parameter replicationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{replicationName}\", url.PathEscape(replicationName))\n\treq, err := azcore.NewRequest(ctx, http.MethodPatch, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(replicationUpdateParameters)\n}", "func (client *VirtualNetworkLinksClient) updateCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, parameters VirtualNetworkLink, options *VirtualNetworkLinksBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks/{virtualNetworkLinkName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif virtualNetworkLinkName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualNetworkLinkName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualNetworkLinkName}\", url.PathEscape(virtualNetworkLinkName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func CreateUpdateMessageAppRequest() (request *UpdateMessageAppRequest) {\n\trequest = &UpdateMessageAppRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"live\", \"2016-11-01\", \"UpdateMessageApp\", \"live\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateUpdateRulesAttributeRequest() (request *UpdateRulesAttributeRequest) {\n\trequest = &UpdateRulesAttributeRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Alb\", \"2020-06-16\", \"UpdateRulesAttribute\", \"alb\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateUpdateEcsImageRequest() (request *UpdateEcsImageRequest) {\n\trequest = &UpdateEcsImageRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"faas\", \"2020-02-17\", \"UpdateEcsImage\", \"faas\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateUpdateTicketRequest() (request *UpdateTicketRequest) {\n\trequest = &UpdateTicketRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"scsp\", \"2020-07-02\", \"UpdateTicket\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *DefenderSettingsClient) createOrUpdateCreateRequest(ctx context.Context, defenderSettingsModel DefenderSettingsModel, options *DefenderSettingsClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.IoTSecurity/defenderSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, defenderSettingsModel)\n}", "func (client *ApplyUpdatesClient) listCreateRequest(ctx context.Context, options *ApplyUpdatesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Maintenance/applyUpdates\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *GalleryImageVersionsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersionUpdate, options *GalleryImageVersionsBeginUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryName}\", url.PathEscape(galleryName))\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryImageName}\", url.PathEscape(galleryImageName))\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryImageVersionName}\", url.PathEscape(galleryImageVersionName))\n\treq, err := azcore.NewRequest(ctx, http.MethodPatch, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-09-30\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(galleryImageVersion)\n}", "func (client *subscriptionClient) putCreateRequest(ctx context.Context, topicName string, subscriptionName string, requestBody map[string]interface{}, options *SubscriptionPutOptions) (*policy.Request, error) {\n\turlPath := \"/{topicName}/subscriptions/{subscriptionName}\"\n\tif topicName == \"\" {\n\t\treturn nil, errors.New(\"parameter topicName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{topicName}\", url.PathEscape(topicName))\n\tif subscriptionName == \"\" {\n\t\treturn nil, errors.New(\"parameter subscriptionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionName}\", url.PathEscape(subscriptionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif client.apiVersion != nil {\n\t\treqQP.Set(\"api-version\", \"2017_04\")\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Raw().Header.Set(\"Accept\", \"application/xml, application/atom+xml\")\n\treturn req, runtime.MarshalAsXML(req, requestBody)\n}", "func NewUpdateRequest(payload *roles.Role) *rolespb.UpdateRequest {\n\tmessage := &rolespb.UpdateRequest{\n\t\tName: payload.Name,\n\t}\n\tif payload.Description != nil {\n\t\tmessage.Description = *payload.Description\n\t}\n\treturn message\n}", "func newUpdateAttestorUpdateAttestorRequest(ctx context.Context, f *Attestor, c *Client) (map[string]interface{}, error) {\n\treq := map[string]interface{}{}\n\n\treturn req, nil\n}", "func (client *KeyVaultClient) updateCertificateCreateRequest(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string, parameters CertificateUpdateParameters, options *KeyVaultClientUpdateCertificateOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/certificates/{certificate-name}/{certificate-version}\"\n\tif certificateName == \"\" {\n\t\treturn nil, errors.New(\"parameter certificateName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{certificate-name}\", url.PathEscape(certificateName))\n\tif certificateVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter certificateVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{certificate-version}\", url.PathEscape(certificateVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *KeyVaultClient) updateCertificateOperationCreateRequest(ctx context.Context, vaultBaseURL string, certificateName string, certificateOperation CertificateOperationUpdateParameter, options *KeyVaultClientUpdateCertificateOperationOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/certificates/{certificate-name}/pending\"\n\tif certificateName == \"\" {\n\t\treturn nil, errors.New(\"parameter certificateName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{certificate-name}\", url.PathEscape(certificateName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, certificateOperation)\n}", "func (client *MachineExtensionsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, name string, extensionName string, extensionParameters MachineExtensionUpdate, options *MachineExtensionsClientBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{name}/extensions/{extensionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif extensionName == \"\" {\n\t\treturn nil, errors.New(\"parameter extensionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{extensionName}\", url.PathEscape(extensionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, extensionParameters)\n}", "func (client *RecordSetsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSet, options *RecordSetsUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif recordType == \"\" {\n\t\treturn nil, errors.New(\"parameter recordType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{recordType}\", url.PathEscape(string(recordType)))\n\tif relativeRecordSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter relativeRecordSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{relativeRecordSetName}\", relativeRecordSetName)\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodPatch, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(parameters)\n}", "func (client *APIClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, apiID string, parameters APICreateOrUpdateParameter, options *APIClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif apiID == \"\" {\n\t\treturn nil, errors.New(\"parameter apiID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{apiId}\", url.PathEscape(apiID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header[\"If-Match\"] = []string{*options.IfMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *WebAppsClient) updateConfigurationCreateRequest(ctx context.Context, resourceGroupName string, name string, siteConfig SiteConfigResource, options *WebAppsUpdateConfigurationOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/web\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, siteConfig)\n}", "func (client *ServersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serverName string, parameters Server, options *ServersClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (_BaseLibrary *BaseLibraryTransactor) UpdateRequest(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BaseLibrary.contract.Transact(opts, \"updateRequest\")\n}", "func (client *ApplyUpdatesClient) getCreateRequest(ctx context.Context, resourceGroupName string, providerName string, resourceType string, resourceName string, applyUpdateName string, options *ApplyUpdatesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/applyUpdates/{applyUpdateName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif providerName == \"\" {\n\t\treturn nil, errors.New(\"parameter providerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{providerName}\", url.PathEscape(providerName))\n\tif resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(resourceType))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\tif applyUpdateName == \"\" {\n\t\treturn nil, errors.New(\"parameter applyUpdateName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{applyUpdateName}\", url.PathEscape(applyUpdateName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CustomDomainsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, appName string, domainName string, domainResource CustomDomainResource, options *CustomDomainsBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif appName == \"\" {\n\t\treturn nil, errors.New(\"parameter appName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{appName}\", url.PathEscape(appName))\n\tif domainName == \"\" {\n\t\treturn nil, errors.New(\"parameter domainName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{domainName}\", url.PathEscape(domainName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, domainResource)\n}", "func CreateUpdateEndpointGroupRequest() (request *UpdateEndpointGroupRequest) {\n\trequest = &UpdateEndpointGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Ga\", \"2019-11-20\", \"UpdateEndpointGroup\", \"gaplus\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *GroupClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, groupID string, parameters GroupCreateParameters, options *GroupCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif groupID == \"\" {\n\t\treturn nil, errors.New(\"parameter groupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *VirtualMachineScaleSetVMRunCommandsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommandUpdate, options *VirtualMachineScaleSetVMRunCommandsBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif instanceID == \"\" {\n\t\treturn nil, errors.New(\"parameter instanceID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{instanceId}\", url.PathEscape(instanceID))\n\tif runCommandName == \"\" {\n\t\treturn nil, errors.New(\"parameter runCommandName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{runCommandName}\", url.PathEscape(runCommandName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json, text/json\")\n\treturn req, runtime.MarshalAsJSON(req, runCommand)\n}", "func (client *CloudServicesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters CloudService, options *CloudServicesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif cloudServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter cloudServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cloudServiceName}\", url.PathEscape(cloudServiceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *FactoriesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, factory Factory, options *FactoriesClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header[\"If-Match\"] = []string{*options.IfMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, factory)\n}", "func (client *IPAllocationsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, ipAllocationName string, parameters IPAllocation, options *IPAllocationsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ipAllocationName == \"\" {\n\t\treturn nil, errors.New(\"parameter ipAllocationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ipAllocationName}\", url.PathEscape(ipAllocationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *CertificateOrdersClient) updateCertificateCreateRequest(ctx context.Context, resourceGroupName string, certificateOrderName string, name string, keyVaultCertificate CertificatePatchResource, options *CertificateOrdersClientUpdateCertificateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif certificateOrderName == \"\" {\n\t\treturn nil, errors.New(\"parameter certificateOrderName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{certificateOrderName}\", url.PathEscape(certificateOrderName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, keyVaultCertificate)\n}", "func (client *SubscriptionClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, sid string, parameters SubscriptionCreateParameters, options *SubscriptionClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/subscriptions/{sid}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif sid == \"\" {\n\t\treturn nil, errors.New(\"parameter sid cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sid}\", url.PathEscape(sid))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Notify != nil {\n\t\treqQP.Set(\"notify\", strconv.FormatBool(*options.Notify))\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\tif options != nil && options.AppType != nil {\n\t\treqQP.Set(\"appType\", string(*options.AppType))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header[\"If-Match\"] = []string{*options.IfMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *MonitoringSettingsClient) updatePatchCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, monitoringSettingResource MonitoringSettingResource, options *MonitoringSettingsClientBeginUpdatePatchOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, monitoringSettingResource)\n}", "func (client *WebAppsClient) updateMetadataCreateRequest(ctx context.Context, resourceGroupName string, name string, metadata StringDictionary, options *WebAppsUpdateMetadataOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/metadata\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, metadata)\n}", "func (_BaseAccessControlGroup *BaseAccessControlGroupTransactor) UpdateRequest(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BaseAccessControlGroup.contract.Transact(opts, \"updateRequest\")\n}", "func (client *PolicyDefinitionsClient) createOrUpdateCreateRequest(ctx context.Context, policyDefinitionName string, parameters PolicyDefinition, options *PolicyDefinitionsCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *IPAllocationsClient) updateTagsCreateRequest(ctx context.Context, resourceGroupName string, ipAllocationName string, parameters TagsObject, options *IPAllocationsClientUpdateTagsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ipAllocationName == \"\" {\n\t\treturn nil, errors.New(\"parameter ipAllocationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ipAllocationName}\", url.PathEscape(ipAllocationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *ApplyUpdatesClient) createOrUpdateParentCreateRequest(ctx context.Context, resourceGroupName string, providerName string, resourceParentType string, resourceParentName string, resourceType string, resourceName string, options *ApplyUpdatesClientCreateOrUpdateParentOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceParentType}/{resourceParentName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/applyUpdates/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif providerName == \"\" {\n\t\treturn nil, errors.New(\"parameter providerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{providerName}\", url.PathEscape(providerName))\n\tif resourceParentType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceParentType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceParentType}\", url.PathEscape(resourceParentType))\n\tif resourceParentName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceParentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceParentName}\", url.PathEscape(resourceParentName))\n\tif resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(resourceType))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}" ]
[ "0.71459115", "0.7138837", "0.7115914", "0.7081609", "0.69797033", "0.69454056", "0.6931128", "0.6921976", "0.691563", "0.6890736", "0.6885596", "0.6881603", "0.68747497", "0.6861223", "0.683294", "0.6830553", "0.6821185", "0.6766642", "0.67643875", "0.6764275", "0.67592096", "0.6733731", "0.6713006", "0.6701798", "0.66800535", "0.6671782", "0.6671563", "0.6669067", "0.6658211", "0.6656047", "0.6646883", "0.66432095", "0.664048", "0.662893", "0.6621845", "0.6616565", "0.6600466", "0.6597203", "0.6592885", "0.65786827", "0.6570652", "0.65488183", "0.65090895", "0.6507633", "0.6504223", "0.64932495", "0.6492651", "0.6458242", "0.64565694", "0.6447073", "0.64432836", "0.643522", "0.6426528", "0.6402796", "0.64007217", "0.6398606", "0.63961846", "0.6389821", "0.6376482", "0.63702714", "0.63654226", "0.6360274", "0.6356151", "0.63550663", "0.6326963", "0.6322408", "0.63146645", "0.63110167", "0.63093305", "0.62924635", "0.6285088", "0.6273257", "0.624977", "0.6249223", "0.6236574", "0.6223897", "0.6217198", "0.6214112", "0.6209436", "0.6193852", "0.61919177", "0.61868304", "0.617507", "0.6170639", "0.6147057", "0.6146385", "0.61440843", "0.61431706", "0.61429733", "0.6135712", "0.6122299", "0.61201215", "0.61033", "0.60783166", "0.6072559", "0.6046429", "0.60325843", "0.6031999", "0.60277224", "0.6024899" ]
0.64908904
47
updateHandleResponse handles the Update response.
func (client *DataCollectionEndpointsClient) updateHandleResponse(resp *http.Response) (DataCollectionEndpointsUpdateResponse, error) { result := DataCollectionEndpointsUpdateResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResource); err != nil { return DataCollectionEndpointsUpdateResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *OutputsClient) updateHandleResponse(resp *http.Response) (OutputsUpdateResponse, error) {\n\tresult := OutputsUpdateResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Output); err != nil {\n\t\treturn OutputsUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) updateHandleResponse(resp *http.Response) (OutputsClientUpdateResponse, error) {\n\tresult := OutputsClientUpdateResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Output); err != nil {\n\t\treturn OutputsClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ServersClient) updateHandleResponse(resp *http.Response) (ServersClientUpdateResponse, error) {\n\tresult := ServersClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Server); err != nil {\n\t\treturn ServersClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *APIClient) updateHandleResponse(resp *http.Response) (APIClientUpdateResponse, error) {\n\tresult := APIClientUpdateResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.APIContract); err != nil {\n\t\treturn APIClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *GalleryImageVersionsClient) updateHandleResponse(resp *azcore.Response) (GalleryImageVersionResponse, error) {\n\tvar val *GalleryImageVersion\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn GalleryImageVersionResponse{}, err\n\t}\n\treturn GalleryImageVersionResponse{RawResponse: resp.Response, GalleryImageVersion: val}, nil\n}", "func (client *WebAppsClient) updateHandleResponse(resp *http.Response) (WebAppsUpdateResponse, error) {\n\tresult := WebAppsUpdateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Site); err != nil {\n\t\treturn WebAppsUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VideosClient) updateHandleResponse(resp *http.Response) (VideosClientUpdateResponse, error) {\n\tresult := VideosClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VideoEntity); err != nil {\n\t\treturn VideosClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *TablesClient) updateHandleResponse(resp *http.Response) (TablesClientUpdateResponse, error) {\n\tresult := TablesClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Table); err != nil {\n\t\treturn TablesClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MetricAlertsClient) updateHandleResponse(resp *http.Response) (MetricAlertsClientUpdateResponse, error) {\n\tresult := MetricAlertsClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MetricAlertResource); err != nil {\n\t\treturn MetricAlertsClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IotSecuritySolutionClient) updateHandleResponse(resp *http.Response) (IotSecuritySolutionClientUpdateResponse, error) {\n\tresult := IotSecuritySolutionClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IoTSecuritySolutionModel); err != nil {\n\t\treturn IotSecuritySolutionClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AgentsClient) updateHandleResponse(resp *http.Response) (AgentsClientUpdateResponse, error) {\n\tresult := AgentsClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Agent); err != nil {\n\t\treturn AgentsClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *GroupClient) updateHandleResponse(resp *http.Response) (GroupUpdateResponse, error) {\n\tresult := GroupUpdateResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GroupContract); err != nil {\n\t\treturn GroupUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *DataCollectionEndpointsClient) updateHandleResponse(resp *http.Response) (DataCollectionEndpointsClientUpdateResponse, error) {\n\tresult := DataCollectionEndpointsClientUpdateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResource); err != nil {\n\t\treturn DataCollectionEndpointsClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ClustersClient) updateHandleResponse(resp *http.Response) (ClustersUpdateResponse, error) {\n\tresult := ClustersUpdateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Cluster); err != nil {\n\t\treturn ClustersUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *LocalRulestacksClient) updateHandleResponse(resp *http.Response) (LocalRulestacksClientUpdateResponse, error) {\n\tresult := LocalRulestacksClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LocalRulestackResource); err != nil {\n\t\treturn LocalRulestacksClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RedisClient) updateHandleResponse(resp *http.Response) (RedisUpdateResponse, error) {\n\tresult := RedisUpdateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RedisResource); err != nil {\n\t\treturn RedisUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *PortalConfigClient) updateHandleResponse(resp *http.Response) (PortalConfigClientUpdateResponse, error) {\n\tresult := PortalConfigClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PortalConfigContract); err != nil {\n\t\treturn PortalConfigClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SpatialAnchorsAccountsClient) updateHandleResponse(resp *http.Response) (SpatialAnchorsAccountsClientUpdateResponse, error) {\n\tresult := SpatialAnchorsAccountsClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SpatialAnchorsAccount); err != nil {\n\t\treturn SpatialAnchorsAccountsClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AvailabilitySetsClient) updateHandleResponse(resp *http.Response) (AvailabilitySetsUpdateResponse, error) {\n\tresult := AvailabilitySetsUpdateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilitySet); err != nil {\n\t\treturn AvailabilitySetsUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SapMonitorsClient) updateHandleResponse(resp *http.Response) (SapMonitorsClientUpdateResponse, error) {\n\tresult := SapMonitorsClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SapMonitor); err != nil {\n\t\treturn SapMonitorsClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AlertProcessingRulesClient) updateHandleResponse(resp *http.Response) (AlertProcessingRulesClientUpdateResponse, error) {\n\tresult := AlertProcessingRulesClientUpdateResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"x-ms-request-id\"); val != \"\" {\n\t\tresult.XMSRequestID = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AlertProcessingRule); err != nil {\n\t\treturn AlertProcessingRulesClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RecordSetsClient) updateHandleResponse(resp *azcore.Response) (RecordSetResponse, error) {\n\tvar val *RecordSet\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn RecordSetResponse{}, err\n\t}\n\treturn RecordSetResponse{RawResponse: resp.Response, RecordSet: val}, nil\n}", "func (client *KeyVaultClient) updateKeyHandleResponse(resp *http.Response) (KeyVaultClientUpdateKeyResponse, error) {\n\tresult := KeyVaultClientUpdateKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientUpdateKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SubscriptionClient) updateHandleResponse(resp *http.Response) (SubscriptionClientUpdateResponse, error) {\n\tresult := SubscriptionClientUpdateResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SubscriptionContract); err != nil {\n\t\treturn SubscriptionClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) updateKeyHandleResponse(resp *http.Response) (KeyVaultClientUpdateKeyResponse, error) {\n\tresult := KeyVaultClientUpdateKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientUpdateKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DedicatedHostsClient) updateHandleResponse(resp *azcore.Response) (DedicatedHostResponse, error) {\n\tvar val *DedicatedHost\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DedicatedHostResponse{}, err\n\t}\n\treturn DedicatedHostResponse{RawResponse: resp.Response, DedicatedHost: val}, nil\n}", "func (client *WebAppsClient) updateMetadataHandleResponse(resp *http.Response) (WebAppsUpdateMetadataResponse, error) {\n\tresult := WebAppsUpdateMetadataResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.StringDictionary); err != nil {\n\t\treturn WebAppsUpdateMetadataResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *CertificateOrdersClient) updateHandleResponse(resp *http.Response) (CertificateOrdersClientUpdateResponse, error) {\n\tresult := CertificateOrdersClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CertificateOrder); err != nil {\n\t\treturn CertificateOrdersClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ContainerGroupsClient) updateHandleResponse(resp *http.Response) (ContainerGroupsClientUpdateResponse, error) {\n\tresult := ContainerGroupsClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ContainerGroup); err != nil {\n\t\treturn ContainerGroupsClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IntegrationRuntimeNodesClient) updateHandleResponse(resp *http.Response) (IntegrationRuntimeNodesClientUpdateResponse, error) {\n\tresult := IntegrationRuntimeNodesClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SelfHostedIntegrationRuntimeNode); err != nil {\n\t\treturn IntegrationRuntimeNodesClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateSlotHandleResponse(resp *http.Response) (WebAppsUpdateSlotResponse, error) {\n\tresult := WebAppsUpdateSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Site); err != nil {\n\t\treturn WebAppsUpdateSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ConnectedEnvironmentsClient) updateHandleResponse(resp *http.Response) (ConnectedEnvironmentsClientUpdateResponse, error) {\n\tresult := ConnectedEnvironmentsClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ConnectedEnvironment); err != nil {\n\t\treturn ConnectedEnvironmentsClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateMetadataSlotHandleResponse(resp *http.Response) (WebAppsUpdateMetadataSlotResponse, error) {\n\tresult := WebAppsUpdateMetadataSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.StringDictionary); err != nil {\n\t\treturn WebAppsUpdateMetadataSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *FactoriesClient) updateHandleResponse(resp *http.Response) (FactoriesClientUpdateResponse, error) {\n\tresult := FactoriesClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Factory); err != nil {\n\t\treturn FactoriesClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) updateSecretHandleResponse(resp *http.Response) (KeyVaultClientUpdateSecretResponse, error) {\n\tresult := KeyVaultClientUpdateSecretResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SecretBundle); err != nil {\n\t\treturn KeyVaultClientUpdateSecretResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateConfigurationHandleResponse(resp *http.Response) (WebAppsUpdateConfigurationResponse, error) {\n\tresult := WebAppsUpdateConfigurationResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SiteConfigResource); err != nil {\n\t\treturn WebAppsUpdateConfigurationResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ApplyUpdatesClient) getHandleResponse(resp *http.Response) (ApplyUpdatesClientGetResponse, error) {\n\tresult := ApplyUpdatesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ApplyUpdate); err != nil {\n\t\treturn ApplyUpdatesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateApplicationSettingsHandleResponse(resp *http.Response) (WebAppsUpdateApplicationSettingsResponse, error) {\n\tresult := WebAppsUpdateApplicationSettingsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.StringDictionary); err != nil {\n\t\treturn WebAppsUpdateApplicationSettingsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateApplicationSettingsSlotHandleResponse(resp *http.Response) (WebAppsUpdateApplicationSettingsSlotResponse, error) {\n\tresult := WebAppsUpdateApplicationSettingsSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.StringDictionary); err != nil {\n\t\treturn WebAppsUpdateApplicationSettingsSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateConfigurationSlotHandleResponse(resp *http.Response) (WebAppsUpdateConfigurationSlotResponse, error) {\n\tresult := WebAppsUpdateConfigurationSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SiteConfigResource); err != nil {\n\t\treturn WebAppsUpdateConfigurationSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updatePremierAddOnSlotHandleResponse(resp *http.Response) (WebAppsUpdatePremierAddOnSlotResponse, error) {\n\tresult := WebAppsUpdatePremierAddOnSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PremierAddOn); err != nil {\n\t\treturn WebAppsUpdatePremierAddOnSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateHybridConnectionHandleResponse(resp *http.Response) (WebAppsUpdateHybridConnectionResponse, error) {\n\tresult := WebAppsUpdateHybridConnectionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.HybridConnection); err != nil {\n\t\treturn WebAppsUpdateHybridConnectionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateDomainOwnershipIdentifierSlotHandleResponse(resp *http.Response) (WebAppsUpdateDomainOwnershipIdentifierSlotResponse, error) {\n\tresult := WebAppsUpdateDomainOwnershipIdentifierSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Identifier); err != nil {\n\t\treturn WebAppsUpdateDomainOwnershipIdentifierSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateHybridConnectionSlotHandleResponse(resp *http.Response) (WebAppsUpdateHybridConnectionSlotResponse, error) {\n\tresult := WebAppsUpdateHybridConnectionSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.HybridConnection); err != nil {\n\t\treturn WebAppsUpdateHybridConnectionSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updatePremierAddOnHandleResponse(resp *http.Response) (WebAppsUpdatePremierAddOnResponse, error) {\n\tresult := WebAppsUpdatePremierAddOnResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PremierAddOn); err != nil {\n\t\treturn WebAppsUpdatePremierAddOnResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *RouteTablesClient) updateTagsHandleResponse(resp *http.Response) (RouteTablesUpdateTagsResponse, error) {\n\tresult := RouteTablesUpdateTagsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RouteTable); err != nil {\n\t\treturn RouteTablesUpdateTagsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateSourceControlSlotHandleResponse(resp *http.Response) (WebAppsUpdateSourceControlSlotResponse, error) {\n\tresult := WebAppsUpdateSourceControlSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SiteSourceControl); err != nil {\n\t\treturn WebAppsUpdateSourceControlSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateBackupConfigurationSlotHandleResponse(resp *http.Response) (WebAppsUpdateBackupConfigurationSlotResponse, error) {\n\tresult := WebAppsUpdateBackupConfigurationSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.BackupRequest); err != nil {\n\t\treturn WebAppsUpdateBackupConfigurationSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateDomainOwnershipIdentifierHandleResponse(resp *http.Response) (WebAppsUpdateDomainOwnershipIdentifierResponse, error) {\n\tresult := WebAppsUpdateDomainOwnershipIdentifierResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Identifier); err != nil {\n\t\treturn WebAppsUpdateDomainOwnershipIdentifierResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ApplyUpdatesClient) listHandleResponse(resp *http.Response) (ApplyUpdatesClientListResponse, error) {\n\tresult := ApplyUpdatesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListApplyUpdate); err != nil {\n\t\treturn ApplyUpdatesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (e *Entry) updateResponse(eTag string, maxAge int, resp *ocsp.Response, respBytes []byte, write bool) error {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\te.eTag = eTag\n\te.maxAge = time.Second * time.Duration(maxAge)\n\te.lastSync = e.clk.Now()\n\tif resp != nil {\n\t\te.response = respBytes\n\t\te.nextUpdate = resp.NextUpdate\n\t\te.thisUpdate = resp.ThisUpdate\n\t\tif e.responseFilename != \"\" && write {\n\t\t\terr := e.writeToDisk()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (client *IPAllocationsClient) updateTagsHandleResponse(resp *http.Response) (IPAllocationsClientUpdateTagsResponse, error) {\n\tresult := IPAllocationsClientUpdateTagsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IPAllocation); err != nil {\n\t\treturn IPAllocationsClientUpdateTagsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client VersionsClient) UpdateResponder(resp *http.Response) (result VersionTemplatespecs, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *PublicIPAddressesClient) updateTagsHandleResponse(resp *http.Response) (PublicIPAddressesClientUpdateTagsResponse, error) {\n\tresult := PublicIPAddressesClientUpdateTagsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PublicIPAddress); err != nil {\n\t\treturn PublicIPAddressesClientUpdateTagsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualNetworkTapsClient) updateTagsHandleResponse(resp *azcore.Response) (VirtualNetworkTapResponse, error) {\n\tvar val *VirtualNetworkTap\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn VirtualNetworkTapResponse{}, err\n\t}\n\treturn VirtualNetworkTapResponse{RawResponse: resp.Response, VirtualNetworkTap: val}, nil\n}", "func (client *DevicesClient) updateTagsHandleResponse(resp *http.Response) (DevicesClientUpdateTagsResponse, error) {\n\tresult := DevicesClientUpdateTagsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Device); err != nil {\n\t\treturn DevicesClientUpdateTagsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateRelayServiceConnectionHandleResponse(resp *http.Response) (WebAppsUpdateRelayServiceConnectionResponse, error) {\n\tresult := WebAppsUpdateRelayServiceConnectionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RelayServiceConnectionEntity); err != nil {\n\t\treturn WebAppsUpdateRelayServiceConnectionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateRelayServiceConnectionSlotHandleResponse(resp *http.Response) (WebAppsUpdateRelayServiceConnectionSlotResponse, error) {\n\tresult := WebAppsUpdateRelayServiceConnectionSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RelayServiceConnectionEntity); err != nil {\n\t\treturn WebAppsUpdateRelayServiceConnectionSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ApplyUpdatesClient) createOrUpdateHandleResponse(resp *http.Response) (ApplyUpdatesClientCreateOrUpdateResponse, error) {\n\tresult := ApplyUpdatesClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ApplyUpdate); err != nil {\n\t\treturn ApplyUpdatesClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (ch *Channel) HandleUpdate(chUpdate pclient.ChannelUpdate, responder ChUpdateResponder) {\n\tch.Lock()\n\tdefer ch.Unlock()\n\n\tif ch.status == closed {\n\t\tch.Error(\"Unexpected HandleUpdate call for closed channel\")\n\t\treturn\n\t}\n\n\texpiry := time.Now().UTC().Add(ch.timeoutCfg.response).Unix()\n\tnotif := makeChUpdateNotif(ch.getChInfo(), chUpdate.State, expiry)\n\tentry := chUpdateResponderEntry{\n\t\tnotif: notif,\n\t\tresponder: responder,\n\t\tnotifExpiry: expiry,\n\t}\n\n\t// Need not store entries for notification with expiry = 0, as these update requests have\n\t// already been rejected by the perun node. Hence no response is expected for these notifications.\n\tif expiry != 0 {\n\t\tch.chUpdateResponders[notif.UpdateID] = entry\n\t}\n\tch.sendChUpdateNotif(notif)\n}", "func (client *WebAppsClient) updateSourceControlHandleResponse(resp *http.Response) (WebAppsUpdateSourceControlResponse, error) {\n\tresult := WebAppsUpdateSourceControlResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SiteSourceControl); err != nil {\n\t\treturn WebAppsUpdateSourceControlResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateSlotConfigurationNamesHandleResponse(resp *http.Response) (WebAppsUpdateSlotConfigurationNamesResponse, error) {\n\tresult := WebAppsUpdateSlotConfigurationNamesResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SlotConfigNamesResource); err != nil {\n\t\treturn WebAppsUpdateSlotConfigurationNamesResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (f5 *BigIP) handleUpdate(msg comm.Message) comm.Message {\n\tswitch f5.UpdateMode {\n\tcase vsUpdateMode:\n\t\tm := f5.HandleVSUpdate(msg)\n\t\treturn m\n\tcase policyUpdateMode:\n\t\tm := f5.handleGlobalPolicyUpdate(msg)\n\t\treturn m\n\tdefault:\n\t\tmsg.Error = fmt.Sprintf(\"unsupported updateMode %v\", f5.UpdateMode)\n\t\treturn msg\n\t}\n}", "func (client *WebAppsClient) updateAzureStorageAccountsSlotHandleResponse(resp *http.Response) (WebAppsUpdateAzureStorageAccountsSlotResponse, error) {\n\tresult := WebAppsUpdateAzureStorageAccountsSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AzureStoragePropertyDictionaryResource); err != nil {\n\t\treturn WebAppsUpdateAzureStorageAccountsSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *CertificateOrdersClient) updateCertificateHandleResponse(resp *http.Response) (CertificateOrdersClientUpdateCertificateResponse, error) {\n\tresult := CertificateOrdersClientUpdateCertificateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CertificateResource); err != nil {\n\t\treturn CertificateOrdersClientUpdateCertificateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PacketCoreDataPlanesClient) updateTagsHandleResponse(resp *http.Response) (PacketCoreDataPlanesClientUpdateTagsResponse, error) {\n\tresult := PacketCoreDataPlanesClientUpdateTagsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PacketCoreDataPlane); err != nil {\n\t\treturn PacketCoreDataPlanesClientUpdateTagsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateBackupConfigurationHandleResponse(resp *http.Response) (WebAppsUpdateBackupConfigurationResponse, error) {\n\tresult := WebAppsUpdateBackupConfigurationResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.BackupRequest); err != nil {\n\t\treturn WebAppsUpdateBackupConfigurationResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateDiagnosticLogsConfigSlotHandleResponse(resp *http.Response) (WebAppsUpdateDiagnosticLogsConfigSlotResponse, error) {\n\tresult := WebAppsUpdateDiagnosticLogsConfigSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SiteLogsConfig); err != nil {\n\t\treturn WebAppsUpdateDiagnosticLogsConfigSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateVnetConnectionHandleResponse(resp *http.Response) (WebAppsUpdateVnetConnectionResponse, error) {\n\tresult := WebAppsUpdateVnetConnectionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VnetInfoResource); err != nil {\n\t\treturn WebAppsUpdateVnetConnectionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) updateCertificateHandleResponse(resp *http.Response) (KeyVaultClientUpdateCertificateResponse, error) {\n\tresult := KeyVaultClientUpdateCertificateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CertificateBundle); err != nil {\n\t\treturn KeyVaultClientUpdateCertificateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateSitePushSettingsSlotHandleResponse(resp *http.Response) (WebAppsUpdateSitePushSettingsSlotResponse, error) {\n\tresult := WebAppsUpdateSitePushSettingsSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PushSettings); err != nil {\n\t\treturn WebAppsUpdateSitePushSettingsSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client AppsClient) UpdateResponder(resp *http.Response) (result OperationStatus, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func HandlerResultsUpdate(db *DB) func(c echo.Context) error {\n\treturn func(c echo.Context) error {\n\t\tid, err := GetPlayoutIDAndCheckToken(c, db)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresult := new(protocol.ResultA)\n\t\tif err := c.Bind(result); err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusBadRequest, \"Json Parse Error\")\n\t\t}\n\t\terr = id.Update(*result)\n\t\tif err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusBadRequest, errors.Wrap(err, \"DB error on Update\"))\n\t\t}\n\t\treturn c.String(http.StatusOK, \"\")\n\t}\n}", "func (e *Entry) updateResponse(eTag string, maxAge int, resp *ocsp.Response, respBytes []byte, stableBackings []scache.Cache) {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\te.eTag = eTag\n\te.maxAge = time.Second * time.Duration(maxAge)\n\te.lastSync = e.clk.Now()\n\tif resp != nil {\n\t\te.info(\"Updating with new response, expires in %s\", common.HumanDuration(resp.NextUpdate.Sub(e.clk.Now())))\n\t\te.response = respBytes\n\t\te.nextUpdate = resp.NextUpdate\n\t\te.thisUpdate = resp.ThisUpdate\n\t\tfor _, s := range stableBackings {\n\t\t\ts.Write(e.name, e.response) // logging is internal\n\t\t}\n\t}\n}", "func (client *GalleryImageVersionsClient) createOrUpdateHandleResponse(resp *azcore.Response) (GalleryImageVersionResponse, error) {\n\tvar val *GalleryImageVersion\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn GalleryImageVersionResponse{}, err\n\t}\n\treturn GalleryImageVersionResponse{RawResponse: resp.Response, GalleryImageVersion: val}, nil\n}", "func (client *KeyVaultClient) updateKeyRotationPolicyHandleResponse(resp *http.Response) (KeyVaultClientUpdateKeyRotationPolicyResponse, error) {\n\tresult := KeyVaultClientUpdateKeyRotationPolicyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyRotationPolicy); err != nil {\n\t\treturn KeyVaultClientUpdateKeyRotationPolicyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) updateCertificateOperationHandleResponse(resp *http.Response) (KeyVaultClientUpdateCertificateOperationResponse, error) {\n\tresult := KeyVaultClientUpdateCertificateOperationResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CertificateOperation); err != nil {\n\t\treturn KeyVaultClientUpdateCertificateOperationResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) updateCertificatePolicyHandleResponse(resp *http.Response) (KeyVaultClientUpdateCertificatePolicyResponse, error) {\n\tresult := KeyVaultClientUpdateCertificatePolicyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CertificatePolicy); err != nil {\n\t\treturn KeyVaultClientUpdateCertificatePolicyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateAuthSettingsSlotHandleResponse(resp *http.Response) (WebAppsUpdateAuthSettingsSlotResponse, error) {\n\tresult := WebAppsUpdateAuthSettingsSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SiteAuthSettings); err != nil {\n\t\treturn WebAppsUpdateAuthSettingsSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func handleUpdate(w http.ResponseWriter, r *http.Request) {\n\tfmt.Printf(\"Received request: %v %v %v\\n\", r.Method, r.URL, r.Proto)\n\tbf.RepopulateBloomFilter()\n}", "func (client *WebAppsClient) updateDiagnosticLogsConfigHandleResponse(resp *http.Response) (WebAppsUpdateDiagnosticLogsConfigResponse, error) {\n\tresult := WebAppsUpdateDiagnosticLogsConfigResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SiteLogsConfig); err != nil {\n\t\treturn WebAppsUpdateDiagnosticLogsConfigResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateAuthSettingsHandleResponse(resp *http.Response) (WebAppsUpdateAuthSettingsResponse, error) {\n\tresult := WebAppsUpdateAuthSettingsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SiteAuthSettings); err != nil {\n\t\treturn WebAppsUpdateAuthSettingsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateAzureStorageAccountsHandleResponse(resp *http.Response) (WebAppsUpdateAzureStorageAccountsResponse, error) {\n\tresult := WebAppsUpdateAzureStorageAccountsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AzureStoragePropertyDictionaryResource); err != nil {\n\t\treturn WebAppsUpdateAzureStorageAccountsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateFtpAllowedSlotHandleResponse(resp *http.Response) (WebAppsUpdateFtpAllowedSlotResponse, error) {\n\tresult := WebAppsUpdateFtpAllowedSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CsmPublishingCredentialsPoliciesEntity); err != nil {\n\t\treturn WebAppsUpdateFtpAllowedSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateVnetConnectionSlotHandleResponse(resp *http.Response) (WebAppsUpdateVnetConnectionSlotResponse, error) {\n\tresult := WebAppsUpdateVnetConnectionSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VnetInfoResource); err != nil {\n\t\treturn WebAppsUpdateVnetConnectionSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) updateStorageAccountHandleResponse(resp *http.Response) (KeyVaultClientUpdateStorageAccountResponse, error) {\n\tresult := KeyVaultClientUpdateStorageAccountResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.StorageBundle); err != nil {\n\t\treturn KeyVaultClientUpdateStorageAccountResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateSitePushSettingsHandleResponse(resp *http.Response) (WebAppsUpdateSitePushSettingsResponse, error) {\n\tresult := WebAppsUpdateSitePushSettingsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PushSettings); err != nil {\n\t\treturn WebAppsUpdateSitePushSettingsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func HandleHTTPUpdate(w http.ResponseWriter, r *http.Request) {\n\t//log.Printf(w, \"Hi there, Going to Update %s! Method=%s\\n\", r.URL.Path[1:], r.Method)\n\tif r.Method == \"PUT\" {\n\t\t//This can be used for updating an existing variable\n\t\tcontent, err := ioutil.ReadAll(r.Body)\n\t\tr.Body.Close()\n\t\tif err != nil {\n\t\t\t//log.Printf(w, \"Error understanding the Body %v\", err)\n\t\t\tlogger.Errorf(\"Error understanding the Body %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tvar val HTTPUpdate\n\t\tvar CurrentE Entry\n\t\tvar OK bool\n\t\terr = json.Unmarshal(content, &val)\n\t\tif err != nil {\n\t\t\t//log.Printf(w, \"Wrong json format %v\", err)\n\t\t\tlogger.Errorf(\"Wrong json format %v\", err)\n\t\t\treturn\n\t\t}\n\t\tif CurrentE, OK = ConfigMap[val.Name]; !OK {\n\t\t\tlogger.Infof(\"Error Proxy entry is incorrect / empty for %s\", val.Name)\n\t\t\t//log.Printf(w, \"Error Proxy entry is incorrect / empty for %s\", val.Name)\n\t\t\treturn\n\t\t}\n\t\tlogger.Info(\"Updating From porxy for %s From %s TO %s\", val.Name, CurrentE.Pair.To, val.Addr)\n\t\tCurrentE.Pair.To = val.Addr\n\t\tConfigMap[val.Name] = CurrentE\n\t\treturn\n\t}\n\treturn\n}", "func (o *sampleUpdateHandler) Update(rw http.ResponseWriter, req *http.Request) {\n\to.UpdateHandler.Update(rw, req)\n}", "func (client AccountClient) UpdateResponder(resp *http.Response) (result AccountResourceDescription, err error) {\n err = autorest.Respond(\n resp,\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusCreated,http.StatusAccepted),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }", "func (client *WebAppsClient) updateConnectionStringsSlotHandleResponse(resp *http.Response) (WebAppsUpdateConnectionStringsSlotResponse, error) {\n\tresult := WebAppsUpdateConnectionStringsSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ConnectionStringDictionary); err != nil {\n\t\treturn WebAppsUpdateConnectionStringsSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) updateVnetConnectionGatewaySlotHandleResponse(resp *http.Response) (WebAppsUpdateVnetConnectionGatewaySlotResponse, error) {\n\tresult := WebAppsUpdateVnetConnectionGatewaySlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VnetGateway); err != nil {\n\t\treturn WebAppsUpdateVnetConnectionGatewaySlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (h *Handler) UpdateHandler(w http.ResponseWriter, r *http.Request) {\n\tvar u, updatedUser user.User\n\terr := json.NewDecoder(r.Body).Decode(&u)\n\tif err != nil {\n\t\th.log.Error(err)\n\t\t_ = response.HTTPError(w, http.StatusBadRequest, response.ErrorParsingUser.Error())\n\t\treturn\n\t}\n\n\tid := chi.URLParam(r, \"id\")\n\n\tcu, err := auth.GetID(r)\n\tif err != nil {\n\t\t_ = response.HTTPError(w, http.StatusBadRequest, response.ErrorBadRequest.Error())\n\t\treturn\n\t}\n\trole, err := auth.GetRole(r)\n\tif err != nil {\n\t\t_ = response.HTTPError(w, http.StatusBadRequest, response.ErrorBadRequest.Error())\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithCancel(r.Context())\n\tdefer cancel()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\t_ = response.HTTPError(w, http.StatusBadGateway, response.ErrTimeout.Error())\n\t\treturn\n\tdefault:\n\t\tupdatedUser, err = h.service.Update(ctx, id, cu, role, &u)\n\t}\n\n\tif err != nil {\n\t\th.log.Error(err)\n\t\t_ = response.HTTPError(w, http.StatusNotFound, err.Error())\n\t\treturn\n\t}\n\n\trender.JSON(w, r, render.M{\"user\": updatedUser})\n}", "func (client *WebAppsClient) updateFtpAllowedHandleResponse(resp *http.Response) (WebAppsUpdateFtpAllowedResponse, error) {\n\tresult := WebAppsUpdateFtpAllowedResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CsmPublishingCredentialsPoliciesEntity); err != nil {\n\t\treturn WebAppsUpdateFtpAllowedResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ProviderShareSubscriptionsClient) adjustHandleResponse(resp *http.Response) (ProviderShareSubscriptionsClientAdjustResponse, error) {\n\tresult := ProviderShareSubscriptionsClientAdjustResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProviderShareSubscription); err != nil {\n\t\treturn ProviderShareSubscriptionsClientAdjustResponse{}, err\n\t}\n\treturn result, nil\n}", "func (b *Bot) HandleUpdate(u telegram.Update) {\n\tcmdParts := strings.Split(*u.Message.Text, \" \")\n\tcmdName := strings.TrimSuffix(cmdParts[0], \"@\"+b.username)\n\tswitch cmdName {\n\tcase startCmd:\n\t\tb.onStartCmd(cmdParts[1:], *u.Message)\n\tcase stopCmd:\n\t\tb.onStopCmd(cmdParts[1:], *u.Message)\n\tcase beginCmd:\n\t\tb.onBeginCmd(cmdParts[1:], *u.Message)\n\tcase bidCmd, bidButtonText:\n\t\tb.onBidCmd(cmdParts[1:], *u.Message)\n\tcase challengeCmd:\n\t\tb.onChallengeCmd(cmdParts[1:], *u.Message)\n\tdefault:\n\t\tb.telegram.SendMessage(u.Message.Chat.ID, fmt.Sprintf(\"Unknown command: %v\", cmdName))\n\t}\n}", "func (client *WebAppsClient) updateVnetConnectionGatewayHandleResponse(resp *http.Response) (WebAppsUpdateVnetConnectionGatewayResponse, error) {\n\tresult := WebAppsUpdateVnetConnectionGatewayResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VnetGateway); err != nil {\n\t\treturn WebAppsUpdateVnetConnectionGatewayResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (s *SideTwistHandler) handleResponse(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tguid, ok := vars[\"identifier\"]\n\tif !ok {\n\t\tlogger.Error(\"Identifier not included in POST request to /search/{identifier}\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(serverErrMsg))\n\t\treturn\n\t}\n\tpostBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlogger.Error(fmt.Sprintf(\"Failed to read POST body: %s\", err.Error()))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(serverErrMsg))\n\t\treturn\n\t}\n\tresponse, err := s.processAndForwardImplantOutput(guid, postBody)\n\tif err != nil {\n\t\tlogger.Error(fmt.Sprintf(\"Failed to process and forward task output: %s\", err.Error()))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(serverErrMsg))\n\t\treturn\n\t}\n\tlogger.Success(response)\n\tfmt.Fprint(w, \"\")\n}", "func (client MSIXPackagesClient) UpdateResponder(resp *http.Response) (result MSIXPackage, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func HandleUpdate(w http.ResponseWriter, r *http.Request) error {\n\n\t// Fetch the params\n\tparams, err := mux.Params(r)\n\tif err != nil {\n\t\treturn server.InternalError(err)\n\t}\n\n\t// Find the post\n\tpost, err := posts.Find(params.GetInt(posts.KeyName))\n\tif err != nil {\n\t\treturn server.NotFoundError(err)\n\t}\n\n\t// Check the authenticity token\n\terr = session.CheckAuthenticity(w, r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Authorise update post\n\tuser := session.CurrentUser(w, r)\n\terr = can.Update(post, user)\n\tif err != nil {\n\t\treturn server.NotAuthorizedError(err)\n\t}\n\n\t// Validate the params, removing any we don't accept\n\tpostParams := post.ValidateParams(params.Map(), posts.AllowedParams())\n\n\terr = post.Update(postParams)\n\tif err != nil {\n\t\treturn server.InternalError(err)\n\t}\n\n\t// Redirect to post\n\treturn server.Redirect(w, r, post.ShowURL())\n}" ]
[ "0.80759746", "0.8007575", "0.7810742", "0.77884024", "0.777713", "0.77637255", "0.7760966", "0.77551454", "0.7722201", "0.7672641", "0.76220673", "0.7590562", "0.75634336", "0.7550017", "0.75266266", "0.74942684", "0.7493825", "0.74926215", "0.7406175", "0.74009675", "0.73927224", "0.73470664", "0.7330143", "0.7321968", "0.73139256", "0.72939676", "0.72663194", "0.72341156", "0.7225857", "0.72234267", "0.72029555", "0.7189498", "0.7167227", "0.7164609", "0.69832766", "0.6958423", "0.6948345", "0.6942159", "0.68785745", "0.6852185", "0.6829246", "0.67754745", "0.67702043", "0.67544764", "0.67467767", "0.67424166", "0.67253685", "0.67044914", "0.6655414", "0.6651266", "0.663899", "0.6626353", "0.6618615", "0.66049945", "0.65774286", "0.65750486", "0.65731996", "0.6567166", "0.6560127", "0.6553824", "0.65528584", "0.6552333", "0.6547033", "0.6525714", "0.65237796", "0.6493352", "0.6493203", "0.64885366", "0.64762723", "0.64572936", "0.644181", "0.6435002", "0.6431398", "0.6418045", "0.64169043", "0.6414607", "0.64070475", "0.6405267", "0.6383354", "0.63779634", "0.63577557", "0.6355399", "0.63472414", "0.6339299", "0.633284", "0.6317008", "0.6306595", "0.62996376", "0.629857", "0.62898374", "0.6280789", "0.62529874", "0.62299937", "0.6229372", "0.6228506", "0.6208293", "0.6199634", "0.61956644", "0.61902165", "0.61675906" ]
0.7631488
10
Push reads supporter and donation records and pushes them downstream. Note that the records are maps of strings. Downstream will convert them to useful data.
func (rt *Runtime) Push(d chan map[string]string) (err error) { t := rt.API.NewTable("supporter(supporter_KEY)donation") offset := int32(0) count := 500 c := "donation.RESULT IN 0,-1" for count == 500 { m, err := t.LeftJoinMap(offset, count, c) if err != nil { return err } for _, r := range m { d <- r } count = len(m) log.Printf("Push: read %d from offset %d\n", count, offset) offset += int32(count) } close(d) return err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Read(reader io.Reader) (rj RecordJar, err error) {\n\n\tb := bufio.NewReader(reader)\n\tr := make(Record)\n\n\tcurrentHeader := \"\"\n\tline := \"\"\n\nRECORDS:\n\tfor {\n\n\t\t// If we have a record on the go store it and allocate a new one\n\t\tif len(r) != 0 {\n\t\t\trj = append(rj, r)\n\t\t\tr = make(Record)\n\t\t}\n\n\t\t// Exit at EOF\n\t\tif err == io.EOF {\n\t\t\tbreak RECORDS\n\t\t}\n\n\t\t// Process record header lines\n\tHEADERS:\n\t\tfor {\n\t\t\tline, err = b.ReadString('\\n')\n\t\t\tline = strings.TrimSpace(line)\n\n\t\t\tswitch {\n\t\t\tcase line == HS:\n\t\t\t\tbreak HEADERS\n\t\t\tcase line == RS:\n\t\t\t\tcontinue RECORDS\n\t\t\tcase len(line) > 1 && line[0:2] == REM:\n\t\t\t\tcontinue HEADERS\n\t\t\t}\n\n\t\t\ttokens := splitHeader.FindStringSubmatch(line)\n\t\t\tnewHeader, data := strings.ToLower(tokens[1]), tokens[2]\n\n\t\t\tif newHeader != \"\" {\n\t\t\t\tcurrentHeader = newHeader\n\t\t\t}\n\n\t\t\tif _, ok := r[currentHeader]; ok {\n\t\t\t\tr[currentHeader] += \" \"\n\t\t\t}\n\t\t\tr[currentHeader] += data\n\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tcontinue RECORDS\n\t\t\t}\n\t\t}\n\n\t\t// Process free format data lines - between header separator (HS) and\n\t\t// record separator / EOF (RS).\n\t\tjoiner := \"\"\n\t\tfor {\n\t\t\tline, err = b.ReadString('\\n')\n\t\t\tline = strings.TrimRightFunc(line, unicode.IsSpace)\n\n\t\t\tif line == RS || line == \"\" && err == io.EOF {\n\t\t\t\tcontinue RECORDS\n\t\t\t}\n\n\t\t\tif line == \"\" {\n\t\t\t\tr[\":data:\"] += \"\\n\"\n\t\t\t\tjoiner = \"\\n\"\n\t\t\t} else {\n\t\t\t\tr[\":data:\"] += joiner + line\n\t\t\t\tjoiner = \" \"\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tcontinue RECORDS\n\t\t\t}\n\t\t}\n\t}\n\n\treturn rj, nil\n}", "func ReadSupporters(e *goengage.Environment, c1 chan goengage.Segment, done chan bool, id int) (err error) {\n\tlog.Printf(\"ReadSupporters %v: begin\\n\", id)\n\tfor true {\n\t\tr, ok := <-c1\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\t//Create a CSV filename for the group an see if the file exists.\n\t\tfilename := fmt.Sprintf(\"%v.csv\", r.Name)\n\t\tfilename = strings.Replace(filename, \"/\", \"-\", -1)\n\t\t_, err := os.Stat(filename)\n\t\tif err == nil || os.IsExist(err) {\n\t\t\tlog.Printf(\"ReadSupporters %v: %-32v skipped, file exists\\n\", id, r.Name)\n\t\t} else {\n\t\t\tlog.Printf(\"ReadSupporters %v: %-32v start\\n\", id, r.Name)\n\t\t\t// Create a file using the ID and write to it. We'll rename it to the group\n\t\t\t// when all of the supporters are gathered.\n\t\t\ttemp := fmt.Sprintf(\"%v.csv\", r.SegmentID)\n\t\t\tf, err := os.Create(temp)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tw := csv.NewWriter(f)\n\t\t\theaders := []string{\"SegmentID\", \"SegmentName\", \"SupporterID\", \"Email\"}\n\t\t\tw.Write(headers)\n\n\t\t\t// Read all supporters and write info to the group's CSV.\n\t\t\tcount := e.Metrics.MaxBatchSize\n\t\t\toffset := int32(0)\n\t\t\tfor count == e.Metrics.MaxBatchSize {\n\t\t\t\tpayload := goengage.SegmentMembershipRequestPayload{\n\t\t\t\t\tSegmentID: r.SegmentID,\n\t\t\t\t\tOffset: offset,\n\t\t\t\t\tCount: count,\n\t\t\t\t}\n\t\t\t\trqt := goengage.SegmentMembershipRequest{\n\t\t\t\t\tHeader: goengage.RequestHeader{},\n\t\t\t\t\tPayload: payload,\n\t\t\t\t}\n\t\t\t\tvar resp goengage.SegmentMembershipResponse\n\n\t\t\t\tn := goengage.NetOp{\n\t\t\t\t\tHost: e.Host,\n\t\t\t\t\tMethod: goengage.SearchMethod,\n\t\t\t\t\tEndpoint: goengage.SegmentSearchMembers,\n\t\t\t\t\tToken: e.Token,\n\t\t\t\t\tRequest: &rqt,\n\t\t\t\t\tResponse: &resp,\n\t\t\t\t}\n\t\t\t\tok := false\n\t\t\t\tfor !ok {\n\t\t\t\t\terr = n.Do()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Printf(\"ReadSupporters %v: %-32v %v\\n\", id, r.Name, err)\n\t\t\t\t\t\ttime.Sleep(time.Second)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tok = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, s := range resp.Payload.Supporters {\n\t\t\t\t\temail := goengage.FirstEmail(s)\n\t\t\t\t\tif email != nil {\n\t\t\t\t\t\ta := []string{r.SegmentID, r.Name, s.SupporterID, *email}\n\t\t\t\t\t\tw.Write(a)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tw.Flush()\n\t\t\t\tcount = resp.Payload.Count\n\t\t\t\toffset += int32(count)\n\t\t\t}\n\t\t\terr = os.Rename(temp, filename)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.Printf(\"ReadSupporters %v: %-32v done\\n\", id, r.Name)\n\t\t}\n\t}\n\tdone <- true\n\tlog.Printf(\"ReadSupporters %v: end\\n\", id)\n\treturn nil\n}", "func NewPuller(\n\tctx context.Context,\n\ttempDir string,\n\tchunksPerTF int,\n\tsrcCS, sinkCS chunks.ChunkStore,\n\twalkAddrs WalkAddrs,\n\thashes []hash.Hash,\n\tstatsCh chan Stats,\n) (*Puller, error) {\n\t// Sanity Check\n\ths := hash.NewHashSet(hashes...)\n\tmissing, err := srcCS.HasMany(ctx, hs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif missing.Size() != 0 {\n\t\treturn nil, errors.New(\"not found\")\n\t}\n\n\ths = hash.NewHashSet(hashes...)\n\tmissing, err = sinkCS.HasMany(ctx, hs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif missing.Size() == 0 {\n\t\treturn nil, ErrDBUpToDate\n\t}\n\n\tif srcCS.Version() != sinkCS.Version() {\n\t\treturn nil, fmt.Errorf(\"cannot pull from src to sink; src version is %v and sink version is %v\", srcCS.Version(), sinkCS.Version())\n\t}\n\n\tsrcChunkStore, ok := srcCS.(nbs.NBSCompressedChunkStore)\n\tif !ok {\n\t\treturn nil, ErrIncompatibleSourceChunkStore\n\t}\n\n\twr, err := nbs.NewCmpChunkTableWriter(tempDir)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar pushLogger *log.Logger\n\tif dbg, ok := os.LookupEnv(\"PUSH_LOG\"); ok && strings.ToLower(dbg) == \"true\" {\n\t\tlogFilePath := filepath.Join(tempDir, \"push.log\")\n\t\tf, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, os.ModePerm)\n\n\t\tif err == nil {\n\t\t\tpushLogger = log.New(f, \"\", log.Lmicroseconds)\n\t\t}\n\t}\n\n\tp := &Puller{\n\t\twaf: walkAddrs,\n\t\tsrcChunkStore: srcChunkStore,\n\t\tsinkDBCS: sinkCS,\n\t\thashes: hash.NewHashSet(hashes...),\n\t\ttablefileSema: semaphore.NewWeighted(outstandingTableFiles),\n\t\ttempDir: tempDir,\n\t\twr: wr,\n\t\tchunksPerTF: chunksPerTF,\n\t\tpushLog: pushLogger,\n\t\tstatsCh: statsCh,\n\t\tstats: &stats{},\n\t}\n\n\tif lcs, ok := sinkCS.(chunks.LoggingChunkStore); ok {\n\t\tlcs.SetLogger(p)\n\t}\n\n\treturn p, nil\n}", "func (s *quicHandler) receiveDataFromZipperSenders() {\n\tfor {\n\t\tselect {\n\t\tcase receiver, ok := <-s.zipperReceiver:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tsinks := GetSinks(s.serverlessConfig, &s.connMap)\n\t\t\tif len(sinks) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tgo func() {\n\t\t\t\tfd := decoder.NewFrameDecoder(receiver)\n\t\t\t\tfor {\n\t\t\t\t\tbuf, err := fd.Read(false)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// send data to sinks\n\t\t\t\t\t\tfor _, sink := range sinks {\n\t\t\t\t\t\t\tgo sendDataToSink(sink, buf, \"[Zipper Receiver] sent frame to sink.\", \"❌ [Zipper Receiver] sent frame to sink failed.\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n}", "func (pl *Payload) push(x interface{}) {\n\tswitch x.(type) {\n\tcase DNCReport:\n\t\tpl.DNCReports = append(pl.DNCReports, x.(DNCReport))\n\tcase CNIReport:\n\t\tpl.CNIReports = append(pl.CNIReports, x.(CNIReport))\n\tcase NPMReport:\n\t\tpl.NPMReports = append(pl.NPMReports, x.(NPMReport))\n\tcase CNSReport:\n\t\tpl.CNSReports = append(pl.CNSReports, x.(CNSReport))\n\t}\n}", "func (rt *Runtime) Write(c chan Data) (err error) {\n\tf, err := os.Create(rt.Filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw := csv.NewWriter(f)\n\ts := fmt.Sprintf(\"%v\\n%v\", SupporterFields, DonationFields)\n\ta := strings.Split(s, \"\\n\")\n\t//Headers\n\tw.Write(a)\n\tw.Flush()\n\n\tfor d := range c {\n\t\tvar r []string\n\t\tx := \"\"\n\t\tfor _, k := range a {\n\t\t\tswitch k {\n\t\t\tcase \"supporter_KEY\":\n\t\t\t\tx = d.SupporterKey\n\t\t\tcase \"First_Name\":\n\t\t\t\tx = d.FirstName\n\t\t\tcase \"Last_Name\":\n\t\t\t\tx = d.LastName\n\t\t\tcase \"Email\":\n\t\t\t\tx = d.Email\n\t\t\tcase \"Street\":\n\t\t\t\tx = d.Street\n\t\t\tcase \"Street_2\":\n\t\t\t\tx = d.Street2\n\t\t\tcase \"City\":\n\t\t\t\tx = d.City\n\t\t\tcase \"State\":\n\t\t\t\tx = d.State\n\t\t\tcase \"Zip\":\n\t\t\t\tx = d.Zip\n\t\t\tcase \"Country\":\n\t\t\t\tx = d.Country\n\t\t\tcase \"donation_KEY\":\n\t\t\t\tx = d.DonationKey\n\t\t\tcase \"Transaction_Date\":\n\t\t\t\tx = d.TransactionDate.Format(\"2006-02-01\")\n\t\t\tcase \"Tracking_Code\":\n\t\t\t\tx = d.TrackingCode\n\t\t\tcase \"Donation_Tracking_Code\":\n\t\t\t\tx = d.DonationTrackingCode\n\t\t\tcase \"Designation_Code\":\n\t\t\t\tx = d.DesignationCode\n\t\t\tcase \"Result\":\n\t\t\t\tx = d.Result\n\t\t\tcase \"TransactionType\":\n\t\t\t\tx = d.TransactionType\n\t\t\tcase \"amount\":\n\t\t\t\tx = fmt.Sprintf(\"%.2f\", d.Amount)\n\t\t\t}\n\t\t\tr = append(r, x)\n\t\t}\n\t\tw.Write(r)\n\t\tw.Flush()\n\t}\n\terr = f.Close()\n\treturn err\n}", "func writeRecords(\n\tclient *as.Client,\n\tsize int,\n) {\n var hospitals []string\n var insurers []string\n\n\tfor i := 0; i < size; i++ {\n // A new claims record\n var claim shared.Claim\n rand.Seed(time.Now().UnixNano())\n \n /*\n * Populate the fields of this claim record\n */\n claim.ClaimID = uuid.New().String()\n\n // HospitalID\n // Add entries with exisiting HospID or create a new HospID?\n if (rand.Intn(2) == 1) && len(hospitals) > 0 {\n // Pick from existing entries\n claim.HospitalID = hospitals[rand.Intn(len(hospitals))] \n } else {\n // New Hospital ID\n claim.HospitalID = uuid.New().String()\n hospitals = append(hospitals, claim.HospitalID)\n }\n\n // InsurerID\n // Add entries with exisiting InsurerID or create a new InsurerID?\n if (rand.Intn(2)==1) && len(insurers) > 0 {\n // Pick from existing entries\n claim.InsurerID = insurers[rand.Intn(len(insurers))] \n } else {\n // New Insurer ID\n claim.InsurerID =uuid.New().String() \n insurers = append(insurers, claim.InsurerID)\n }\n\n //ClaimFileTime - Subratract years,months and days from today\n claimTime := time.Now().AddDate(-1*rand.Intn(2), -1*rand.Intn(11), -1*rand.Intn(31))\n claim.ClaimFileTime = claimTime.Unix() \n\n //DischargeTime - a random hour between [0,25] subtracted from claim file time\n //25 is chosen so that some records wil exceed the 24 hour discharge filing period\n claim.DischargeTime = claimTime.Add(time.Duration(-1*rand.Intn(25))*time.Hour).Unix()\n \n //ClaimAmt - Minimum is Rs.10, Maximum Rs. 1cr\n minAmt := 10\n maxAmt := 10000000\n claim.ClaimAmt = float32(minAmt + rand.Intn(maxAmt)) * rand.Float32()\n\n //Penalty, to a maximum of the claimAmt\n claim.Penalty = 0.01*claim.ClaimAmt*float32(rand.Intn(100))\n\n //ClaimState\n claim.ClaimState = shared.ClaimState(int(shared.ClaimFiled) + rand.Intn(int(shared.MaxClaimState)))\n\n //ClaimType\n switch claim.ClaimState {\n case shared.ClaimFiled: \n fallthrough\n case shared.ClaimDocumented:\n fallthrough\n case shared.ClaimOnHold:\n claim.ClaimType = shared.ClaimNoActionType\n\n case shared.ClaimApproved:\n fallthrough\n case shared.ClaimPaid:\n claim.ClaimType = shared.ClaimAcceptedType\n\n case shared.ClaimRejected:\n claim.ClaimType = shared.ClaimRejectedType\n \n\n default: // ClaimAcknowledged, or ClaimContested\n if rand.Float32() >= 0.5 {\n claim.ClaimType = shared.ClaimAcceptedType\n } else {\n claim.ClaimType = shared.ClaimRejectedType\n }\n }\n\n //Audit Status\n if (claim.ClaimState != shared.ClaimFiled &&\n claim.ClaimState != shared.ClaimDocumented) {\n numAudited := float32(0.1) //10% get audited\n numFraud := float32(0.01) //1% are fradulent\n auditRand := rand.Float32()\n if auditRand < numFraud/2 {\n claim.AuditStatus = shared.AuditedAndFraud\n } else if auditRand < numFraud {\n claim.AuditStatus = shared.AuditedAndNotFraud\n } else if auditRand < numAudited {\n claim.AuditStatus = shared.AuditUnderway\n } else {\n claim.AuditStatus = shared.NotAudited\n }\n }\n\n //AuditLog\n if (claim.AuditStatus != shared.NotAudited) {\n claim.AuditLog = \"The case was audited\"\n }\n \n //logTrail - This is updated as the claim's state changes.\n // This is used in debugging, but will be left empty in this dummy DB\n\n //rejectCode\n if claim.ClaimType == shared.ClaimRejectedType { \n claim.RejectCode = shared.RejectCode(rand.Intn(int(shared.MaxRejectCodes)) + 1)\n }\n\n //TDS Head\n if claim.ClaimType == shared.ClaimAcceptedType {\n claim.TDSHead = \"Dr. Rajeev Kapoor\"\n }\n\n //AckTime\n ackTime := claimTime.AddDate(0, rand.Intn(2),rand.Intn(31))\n claim.AckTime = ackTime.Unix()\n\n //PaymentInfo\n paymentInfo := shared.PaymentInfo{123456.70, ackTime.Add(time.Duration(-3)*time.Hour).Unix(), \n claim.InsurerID, claim.HospitalID, \"YHO2648721KSA\", \"Paid and Approved by Admin of Insurer\" }\n\n //Marshal PaymentInfo\n var err error\n claim.PaymentInfo, err = json.Marshal(paymentInfo)\n if err != nil {\n log.Println(\"Marshalling error:\", err)\n }\n\n\t\tkey, _ := as.NewKey(*shared.Namespace, *shared.Set, claim.ClaimID)\n\n // Write all field names and values into the corresponding index of a binMap\n binsMap := make(map[string]interface{})\n\t\tval := reflect.Indirect(reflect.ValueOf(claim))\n for i := 0; i < val.NumField(); i++ {\n binName := val.Type().Field(i).Name\n binValue := val.Field(i).Interface()\n binsMap[binName] = binValue\n\t\t //log.Printf(\"Put: ns=%s set=%s key=%s bin=%s value=%s\",\n\t\t\t// key.Namespace(), key.SetName(), key.Value(), \n // binName, binsMap[binName])\n }\n\t\terr = client.Put(shared.WritePolicy, key, binsMap)\n if err != nil {\n shared.PanicOnError(err)\n }\n\t}\n}", "func (log *Logger) Records(msg string, fd interface{}) {\n\tlog.pipeline[msg] = fd\n}", "func cmdProducer(cmdFile string, recordChan chan recWrap) {\n\tdefer wg.Done()\n\tcmds, err := os.Open(cmdFile)\n\tif err != nil{\n\t\tlog.Fatal(err)\n\t}\n\tdefer cmds.Close()\n\n\tr := csv.NewReader(bufio.NewReader(cmds))\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\trecordChan <- recWrap{input: record}\n\t}\n}", "func (th *DataRequestHandler) pushesToRequests() {\n\tstop := false\n\tpreReadItem := ^Index(0)\n\t// convert pushes into requests\n\tfor {\n\t\t// buffer a bunch of pushes\n\t\ttodoLen := len(th.pushes)\n\t\thitWindow := false\n\t\tcheckWindowHit := func(t Index) {\n\t\t\tif th.lastRequest == nil {\n\t\t\t\tth.lastRequest = &DataRequest{Start: 0, End: defaultWindowSpan}\n\t\t\t}\n\t\t\tif t >= th.lastRequest.Start && t < th.lastRequest.End {\n\t\t\t\thitWindow = true\n\t\t\t}\n\t\t}\n\t\t// we pre-read an item to wait for events, but we don't want to forget about this item\n\t\tif preReadItem != ^Index(0) {\n\t\t\tcheckWindowHit(preReadItem)\n\t\t}\n\t\tfor i := 0; i < todoLen; i++ {\n\t\t\tt, ok := <-th.pushes\n\t\t\tif !ok {\n\t\t\t\t// we just closed, stop processing after this\n\t\t\t\tstop = true\n\t\t\t}\n\t\t\tcheckWindowHit(t)\n\t\t}\n\t\t// if there is no work to do, wait for a bit, and check again\n\t\tif !hitWindow {\n\t\t\t// maybe we just need to stop because we can't receive pushes anymore\n\t\t\tif stop {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// no stopping yet, but no work to do either, wait for an update, then wait another second (to batch pushes), then form a request\n\t\t\titem, ok := <-th.pushes\n\t\t\t// it may also be the last item\n\t\t\tif !ok {\n\t\t\t\tstop = true\n\t\t\t}\n\t\t\tpreReadItem = item\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t} else {\n\t\t\t// there are pushes within the client range, repeat the client request.\n\t\t\t// trigger, if it's not already.\n\t\t\tif len(th.gotRequest) == 0 {\n\t\t\t\tth.gotRequest <- true\n\t\t\t}\n\n\t\t\t// maybe we just need to stop because we can't receive pushes anymore\n\t\t\tif stop {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// continue to process more pushes, if any are remaining in the channel\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func Supporter(rt *Runtime, c chan goengage.Fundraise) (err error) {\n\trt.Log.Println(\"Supporter: start\")\n\tfor true {\n\t\tr, ok := <-c\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tif rt.GoodYear(r.ActivityDate) {\n\t\t\trt.Log.Printf(\"%v Supporter\\n\", r.ActivityID)\n\n\t\t\ts := goengage.Supporter{\n\t\t\t\tSupporterID: r.SupporterID,\n\t\t\t}\n\t\t\trt.DB.FirstOrInit(&s, s)\n\n\t\t\t// rt.DB.Where(\"supporter_id = ?\", r.SupporterID).First(&s)\n\t\t\tif s.CreatedDate == nil {\n\t\t\t\tt, err := goengage.FetchSupporter(rt.Env, r.SupporterID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif t == nil {\n\t\t\t\t\tx := time.Now()\n\t\t\t\t\ts.CreatedDate = &x\n\t\t\t\t} else {\n\t\t\t\t\ts = *t\n\t\t\t\t}\n\t\t\t\trt.DB.Create(&s)\n\t\t\t}\n\t\t}\n\t}\n\trt.Log.Println(\"Supporter: end\")\n\treturn nil\n}", "func (s *quicHandler) receiveDataFromSources() {\n\tfor {\n\t\tselect {\n\t\tcase item, ok := <-s.source:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// one stream for each flows/sinks.\n\t\t\tflows, sinks := Build(s.serverlessConfig, &s.connMap)\n\t\t\tstream := DispatcherWithFunc(flows, item)\n\n\t\t\tgo func() {\n\t\t\t\tfor customer := range stream.Observe(rxgo.WithErrorStrategy(rxgo.ContinueOnError)) {\n\t\t\t\t\tif customer.Error() {\n\t\t\t\t\t\tfmt.Println(customer.E.Error())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tvalue := customer.V.([]byte)\n\n\t\t\t\t\t// sinks\n\t\t\t\t\tfor _, sink := range sinks {\n\t\t\t\t\t\tgo sendDataToSink(sink, value, \"Zipper sent frame to sink\", \"❌ Zipper sent frame to sink failed.\")\n\t\t\t\t\t}\n\n\t\t\t\t\t// Zipper-Senders\n\t\t\t\t\tfor _, sender := range s.zipperSenders {\n\t\t\t\t\t\tif sender == nil {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tgo sendDataToSink(sender, value, \"[Zipper Sender] sent frame to downstream zipper.\", \"❌ [Zipper Sender] sent frame to downstream zipper failed.\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n}", "func Read(delim rune) func(<-chan interface{}, chan<- interface{}, chan<- error) {\n\tif delim == rune(0) {\n\t\tdelim = ','\n\t}\n\treturn func(in <-chan interface{}, out chan<- interface{}, errs chan<- error) {\n\t\tvar header []string\n\n\t\tfor m := range in {\n\t\t\tr := csv.NewReader(strings.NewReader(m.(fmt.Stringer).String()))\n\t\t\tr.Comma = delim\n\t\t\t//r.ReuseRecord = true\n\t\t\tr.LazyQuotes = true\n\n\t\t\trecords, err := r.ReadAll()\n\t\t\tif err != nil {\n\t\t\t\terrs <- err\n\t\t\t}\n\n\t\t\tfor _, rec := range records {\n\t\t\t\tif header == nil {\n\t\t\t\t\theader = rec\n\t\t\t\t} else {\n\t\t\t\t\trow := message.NewRecord()\n\t\t\t\t\tfor i, v := range header {\n\t\t\t\t\t\trow.Set(v, rec[i])\n\t\t\t\t\t}\n\t\t\t\t\tout <- row\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t}\n}", "func WorkloadPush(ctx context.Context, db *mongo.Database, w ...Workload) error {\n\tcol := db.Collection(queueCollection)\n\tdocs := make([]interface{}, 0, len(w))\n\tfor _, wl := range w {\n\t\tdocs = append(docs, wl)\n\t}\n\t_, err := col.InsertMany(ctx, docs)\n\n\treturn err\n}", "func (fprovider *fileProvider) ReadData(ch chan *Item) error {\n\tif fprovider.logFile == nil || fprovider.config == \"\" {\n\t\tpanic(fileError)\n\t}\n\tconfig := strings.NewReader(fprovider.config)\n\treader, err := gonx.NewNginxReader(fprovider.logFile, config, \"main\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create file reader failure %w\", err)\n\t}\n\n\tfor {\n\t\trec, err := reader.Read()\n\t\tif err == io.EOF {\n\t\t\tclose(ch)\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tch <- &Item{\n\t\t\t\tError: fmt.Errorf(\"read log failure\"),\n\t\t\t}\n\t\t} else {\n\t\t\trequest := readDataFromGnoxEntry(rec, \"request\")\n\t\t\tremoteAddr := readDataFromGnoxEntry(rec, \"remote_addr\")\n\t\t\tremoteUser := readDataFromGnoxEntry(rec, \"remote_user\")\n\t\t\ttimeStr := readDataFromGnoxEntry(rec, \"time_local\")\n\t\t\ttimeLocal, err := time.Parse(\"02/Jan/2006:15:04:05 -0700\", timeStr)\n\t\t\tif err != nil {\n\t\t\t\ttimeLocal = time.Now() //TODO 这里应该返回错误, 并阻止程序继续运行\n\t\t\t}\n\t\t\tstatus, err := strconv.Atoi(readDataFromGnoxEntry(rec, \"status\"))\n\t\t\tif err != nil {\n\t\t\t\tstatus = 0 //TODO 这里应该返回错误, 并阻止程序继续运行\n\t\t\t}\n\t\t\tbodyBytes, err := strconv.Atoi(readDataFromGnoxEntry(rec, \"body_bytes_sent\"))\n\t\t\tif err != nil {\n\t\t\t\tbodyBytes = 0 //TODO 这里应该返回错误, 并阻止程序继续运行\n\t\t\t}\n\t\t\thttpReferer := readDataFromGnoxEntry(rec, \"http_referer\")\n\t\t\thttpUserAgent := readDataFromGnoxEntry(rec, \"http_user_agent\")\n\n\t\t\tch <- &Item{\n\t\t\t\tLog{\n\t\t\t\t\tRequest: request,\n\t\t\t\t\tRemoteAddr: remoteAddr,\n\t\t\t\t\tRemoteUser: remoteUser,\n\t\t\t\t\tTimeLocal: timeLocal,\n\t\t\t\t\tStatus: statusCode(status),\n\t\t\t\t\tBodyBytes: uint(bodyBytes),\n\t\t\t\t\tHTTPReferer: httpReferer,\n\t\t\t\t\tHTTPUserAgent: httpUserAgent,\n\t\t\t\t},\n\t\t\t\tnil,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func convertAndStore(related relatedArticle, googleArticle chan article) {\n\trArticle := article{Title: related.RelatedTitle, Topic: related.RelatedTopic, Author: related.RelatedAuthor, Link: related.RelatedLink, Date: related.RelatedDate}\n\tgoogleArticle <- rArticle\n\tgo articleWorker(googleArticle)\n\n}", "func (m *mapperAndPusher) MapAndPush(line string) error {\n\n\tfields := strings.Split(line, \"\\t\")\n\n\t//translate idfield into Pilosa column sequential number using levelDB translator\n\tcol, err := m.translator.GetColID(fields[logFields[\"idfield\"]])\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting column id from subject\")\n\t}\n\n\t//parse ts field in logs that would be used to for pilosa rows\n\tts, err := time.Parse(\"2006-01-02 15:04:05\", fields[logFields[\"ts\"]])\n\tif err != nil {\n\t\tlog.Println(errors.Wrapf(err, \"could not convert time for id %s and time %s\", fields[logFields[\"idfield\"]],\n\t\t\tfields[logFields[\"ts\"]]))\n\t}\n\trowID, err := m.translator.GetRowID(\"ofield\", fields[logFields[\"ofield\"]])\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"translating ofield\")\n\t}\n\tm.index.AddColumnNumeric(\"ofield\", col, rowID, ts.UnixNano())\n\n\trowID, err = m.translator.GetRowID(\"cfield\", fields[logFields[\"cfield\"]])\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"translating cfield\")\n\t}\n\tm.index.AddColumnNumeric(\"cfield\", col, rowID, ts.UnixNano())\n\n\tstr := fields[logFields[\"sfield_list\"]]\n\tif str == \"\" { //don't process empty list\n\t\treturn nil\n\t}\n\tsfields := strings.Split(str, \",\")\n\tfor _, v := range sfields {\n\t\trowID, err = m.translator.GetRowID(\"sfield\", v)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"translating sfield\")\n\t\t}\n\t\tm.index.AddColumnNumeric(\"sfield\", col, rowID, ts.UnixNano())\n\t}\n\n\treturn nil\n}", "func FWJRconsumer(msg *stomp.Message) ([]Lfnsite, int64, string, string, error) {\n\t//first to check to make sure there is something in msg,\n\t//otherwise we will get error:\n\t//Failed to continue - runtime error: invalid memory address or nil pointer dereference\n\t//[signal SIGSEGV: segmentation violation]\n\t//\n\tvar lfnsite []Lfnsite\n\tvar ls Lfnsite\n\tatomic.AddUint64(&msgreceived, 1)\n\tif msg == nil || msg.Body == nil {\n\t\treturn lfnsite, 0, \"\", \"\", errors.New(\"Empty message\")\n\t}\n\t//\n\tif Config.Verbose > 2 {\n\t\tlog.Println(\"*****************Source AMQ message of wmarchive*********************\")\n\t\tlog.Println(\"Source AMQ message of wmarchive: \", string(msg.Body))\n\t\tlog.Println(\"*******************End AMQ message of wmarchive**********************\")\n\t}\n\t// Define FWJR Recod\n\ttype MetaData struct {\n\t\tTs int64 `json:\"ts\"`\n\t\tJobType string `json:\"jobtype\"`\n\t\tWnName string `json:\"wn_name\"`\n\t}\n\n\ttype InputLst struct {\n\t\tLfn int `json:\"lfn\"`\n\t\tEvents int64 `json:\"events\"`\n\t\tGUID string `json:\"guid\"`\n\t}\n\ttype Step struct {\n\t\tInput []InputLst `json:\"input\"`\n\t\tSite string `json:\"site\"`\n\t}\n\ttype FWJRRecord struct {\n\t\tLFNArray []string\n\t\tLFNArrayRef []string\n\t\tFallbackFiles []int `json:\"fallbackFiles\"`\n\t\tMetadata MetaData `json:\"meta_data\"`\n\t\tSteps []Step `json:\"steps\"`\n\t}\n\tvar rec FWJRRecord\n\terr := json.Unmarshal(msg.Body, &rec)\n\tif err != nil {\n\t\tlog.Printf(\"Enable to Unmarchal input message. Error: %v\", err)\n\t\treturn lfnsite, 0, \"\", \"\", err\n\t}\n\tif Config.Verbose > 2 {\n\t\tlog.Printf(\"******PARSED FWJR record******: %+v\", rec)\n\t}\n\t// process received message, e.g. extract some fields\n\tvar ts int64\n\tvar jobtype string\n\tvar wnname string\n\t// Check the data\n\tif rec.Metadata.Ts == 0 {\n\t\tts = time.Now().Unix()\n\t} else {\n\t\tts = rec.Metadata.Ts\n\t}\n\n\tif len(rec.Metadata.JobType) > 0 {\n\t\tjobtype = rec.Metadata.JobType\n\t} else {\n\t\tjobtype = \"unknown\"\n\t}\n\n\tif len(rec.Metadata.WnName) > 0 {\n\t\twnname = rec.Metadata.WnName\n\t} else {\n\t\twnname = \"unknown\"\n\t}\n\t//\n\tfor _, v := range rec.Steps {\n\t\tls.site = v.Site\n\t\tvar goodlfn []string\n\t\tfor _, i := range v.Input {\n\t\t\tif len(i.GUID) > 0 && i.Events != 0 {\n\t\t\t\tlfn := i.Lfn\n\t\t\t\tif !insliceint(rec.FallbackFiles, lfn) {\n\t\t\t\t\tif inslicestr(rec.LFNArrayRef, \"lfn\") {\n\t\t\t\t\t\tif lfn < len(rec.LFNArray) {\n\t\t\t\t\t\t\tgoodlfn = append(goodlfn, rec.LFNArray[lfn])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t\tif len(goodlfn) > 0 {\n\t\t\tls.lfn = goodlfn\n\t\t\tlfnsite = append(lfnsite, ls)\n\t\t}\n\t}\n\treturn lfnsite, ts, jobtype, wnname, nil\n}", "func (a *AggregationProcess) aggregateRecords(incomingRecord, existingRecord entities.Record, fillSrcStats, fillDstStats bool) error {\n\tif a.aggregateElements == nil {\n\t\treturn nil\n\t}\n\n\tfor _, element := range a.aggregateElements.NonStatsElements {\n\t\tif ieWithValue, exist := incomingRecord.GetInfoElementWithValue(element); exist {\n\t\t\tswitch ieWithValue.Element.Name {\n\t\t\tcase \"flowEndSeconds\":\n\t\t\t\texistingIeWithValue, _ := existingRecord.GetInfoElementWithValue(element)\n\t\t\t\t// Update flow end timestamp if it is latest.\n\t\t\t\tif ieWithValue.Value.(uint32) > existingIeWithValue.Value.(uint32) {\n\t\t\t\t\texistingIeWithValue.Value = ieWithValue.Value\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tklog.Errorf(\"Fields with name %v is not supported in aggregation fields list.\", element)\n\t\t\t}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"element with name %v in nonStatsElements not present in the incoming record\", element)\n\t\t}\n\t}\n\n\tstatsElementList := a.aggregateElements.StatsElements\n\tantreaSourceStatsElements := a.aggregateElements.AggregatedSourceStatsElements\n\tantreaDestinationStatsElements := a.aggregateElements.AggregatedDestinationStatsElements\n\tfor i, element := range statsElementList {\n\t\tisDelta := false\n\t\tif strings.Contains(element, \"Delta\") {\n\t\t\tisDelta = true\n\t\t}\n\t\tif ieWithValue, exist := incomingRecord.GetInfoElementWithValue(element); exist {\n\t\t\texistingIeWithValue, _ := existingRecord.GetInfoElementWithValue(element)\n\t\t\t// Update the corresponding element in existing record.\n\t\t\tif !isDelta {\n\t\t\t\texistingIeWithValue.Value = ieWithValue.Value\n\t\t\t} else {\n\t\t\t\t// We are simply adding the delta stats now. We expect delta stats to be\n\t\t\t\t// reset after sending the record from flowKeyMap in aggregation process.\n\t\t\t\t// Delta stats from source and destination nodes are added, so we will have\n\t\t\t\t// two times the stats approximately.\n\t\t\t\t// For delta stats, it is better to use source and destination specific\n\t\t\t\t// stats.\n\t\t\t\texistingIeWithValue.Value = existingIeWithValue.Value.(uint64) + ieWithValue.Value.(uint64)\n\t\t\t}\n\t\t\t// Update the corresponding source element in antreaStatsElement list.\n\t\t\tif fillSrcStats {\n\t\t\t\texistingIeWithValue, _ := existingRecord.GetInfoElementWithValue(antreaSourceStatsElements[i])\n\t\t\t\tif !isDelta {\n\t\t\t\t\texistingIeWithValue.Value = ieWithValue.Value\n\t\t\t\t} else {\n\t\t\t\t\texistingIeWithValue.Value = existingIeWithValue.Value.(uint64) + ieWithValue.Value.(uint64)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Update the corresponding destination element in antreaStatsElement list.\n\t\t\tif fillDstStats {\n\t\t\t\texistingIeWithValue, _ := existingRecord.GetInfoElementWithValue(antreaDestinationStatsElements[i])\n\t\t\t\tif !isDelta {\n\t\t\t\t\texistingIeWithValue.Value = ieWithValue.Value\n\t\t\t\t} else {\n\t\t\t\t\texistingIeWithValue.Value = existingIeWithValue.Value.(uint64) + ieWithValue.Value.(uint64)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"element with name %v in statsElements not present in the incoming record\", element)\n\t\t}\n\t}\n\treturn nil\n}", "func (d *Release) Push(only ...string) error {\n\tif _ = d.merge(); len(d.src) == 0 {\n\t\treturn ErrMissing\n\t}\n\tif d.rebase(only); len(d.src) == 0 {\n\t\treturn ErrMissing\n\t}\n\tvar g errgroup.Group\n\tfor _, server := range d.to {\n\t\tc := server\n\t\tg.Go(func() error {\n\t\t\treturn c.Bulk(d.src)\n\t\t})\n\t}\n\td.err = g.Wait()\n\treturn d.err\n}", "func Produce(delay time.Duration, filename string) error {\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to open file %s, got error: %s\", filename, err)\n\t}\n\trecords := make(chan models.Record)\n\tgo func() {\n\t\tlog.Println(\"extraction begun\")\n\t\trabbit.ExtractCsvRecords(file, records)\n\t}()\n\n\tmq, err := rabbit.Init(RabbitURI, \"example\")\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to init rabbitmq: %s\", err)\n\t}\n\tdefer mq.Close()\n\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < WORKERS; i++ {\n\t\twg.Add(1)\n\t\tgo rabbit.Publisher(i, mq, records, &wg)\n\t}\n\tlog.Println(\"waiting on workers...\")\n\twg.Wait()\n\treturn nil\n}", "func (prod *Firehose) Produce(workers *sync.WaitGroup) {\n\tprod.AddMainWorker(workers)\n\n\tprod.client = firehose.New(session.New(prod.config))\n\tprod.TickerMessageControlLoop(prod.bufferMessage, prod.flushFrequency, prod.sendBatchOnTimeOut)\n}", "func (t *Timestream) buildWriteRecords(point telegraf.Metric) []types.Record {\n\tif t.UseMultiMeasureRecords {\n\t\treturn t.buildMultiMeasureWriteRecords(point)\n\t}\n\treturn t.buildSingleWriteRecords(point)\n}", "func produceAPIRecord(apiURL string, recordChan chan<- *gosince.APIRecord) {\n\tresp, err := http.Get(apiURL)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tapiVersion := filepath.Base(apiURL)\n\tapiVersion = strings.TrimPrefix(apiVersion, \"go\")\n\tapiVersion = strings.TrimSuffix(apiVersion, \".txt\")\n\tscanner := bufio.NewScanner(resp.Body)\n\tfor scanner.Scan() {\n\t\ts := strings.TrimSpace(scanner.Text())\n\t\t// ingore empty lines and comment lines.\n\t\tif len(s) == 0 || strings.HasPrefix(s, \"#\") {\n\t\t\tcontinue\n\t\t}\n\t\tif r, err := getAPIRecord(s); err != nil {\n\t\t\tlog.Println(\"ERROR: \", err)\n\t\t} else {\n\t\t\tr.Version = apiVersion\n\t\t\tr.GolangURL = constructGolangURL(r.Category, r.Name, r.PackageName, r.Description)\n\t\t\trecordChan <- r\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Println(\"ERROR: \", err)\n\t}\n}", "func DispatchPush(config Config, results map[int][]string) {\n\tif config.StorageDestination == \"postgres\" {\n\t\t// Push CDRs to PostgreSQL\n\t\tpc := new(PGPusher)\n\t\tpc.Init(config.PGDatasourcename, config.CDRFields, config.SwitchIP, config.CDRSourceType, config.TableDestination)\n\t\terr := pc.Push(results)\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t}\n\t} else if config.StorageDestination == \"riak\" {\n\t\t// Push CDRs to Riak\n\t\trc := new(RiakPusher)\n\t\trc.Init(config.RiakConnect, config.CDRFields, config.SwitchIP, config.CDRSourceType, config.RiakBucket)\n\t\terr := rc.Push(results)\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t}\n\t}\n}", "func (a *kinesisFirehoseWriter) toRecords(msg message.Batch) ([]*firehose.Record, error) {\n\tentries := make([]*firehose.Record, msg.Len())\n\n\terr := msg.Iter(func(i int, p *message.Part) error {\n\t\tentry := firehose.Record{\n\t\t\tData: p.AsBytes(),\n\t\t}\n\n\t\tif len(entry.Data) > mebibyte {\n\t\t\ta.log.Errorf(\"part %d exceeds the maximum Kinesis Firehose payload limit of 1 MiB\\n\", i)\n\t\t\treturn component.ErrMessageTooLarge\n\t\t}\n\n\t\tentries[i] = &entry\n\t\treturn nil\n\t})\n\n\treturn entries, err\n}", "func (c ProwlClient) Push(n Notification) error {\n\n\tkeycsv := strings.Join(n.apikeys, \",\")\n\n\tvals := url.Values{\n\t\t\"apikey\": []string{keycsv},\n\t\t\"application\": []string{n.Application},\n\t\t\"description\": []string{n.Description},\n\t\t\"event\": []string{n.Event},\n\t\t\"priority\": []string{string(n.Priority)},\n\t}\n\n\tif n.URL != \"\" {\n\t\tvals[\"url\"] = []string{n.URL}\n\t}\n\n\tif c.ProviderKey != \"\" {\n\t\tvals[\"providerkey\"] = []string{c.ProviderKey}\n\t}\n\n\tr, err := http.PostForm(apiURL+\"/add\", vals)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer r.Body.Close()\n\n\tif r.StatusCode != 200 {\n\t\terr = decodeError(r.Status, r.Body)\n\t}\n\n\treturn err\n}", "func (log *LogFile) tailPush(sym *SymFile, newRecSize uint32) error {\n\n\t// lap >= 2\n\t// |-----|-----|------|------|.......|\n\t// ^ ^ ^\n\t// head tail maxsize\n\t// |-gap-|\n\t// loop reading in records from the tail\n\t// until the accumulated size (including head-tail gap is greater than the new rec size\n\t// tail points to oldest complete record\n\n\ttailGap := log.tailOffset - log.headOffset\n\tsizeAvailable := tailGap\n\tstdlog.Printf(\"Tail push for new rec size of %v - avail of %v\\n\", newRecSize, sizeAvailable)\n\tif tailGap >= 0 { // tail in front of head\n\t // set read pos to the tail\n\t stdlog.Printf(\"Seek to tail %v\", log.tailOffset)\n\t\tlog.entryReadFile.Seek(int64(log.tailOffset), 0)\n\t\tfor {\n\t\t\tif uint64(newRecSize) <= sizeAvailable {\n\t\t\t\t// moved tail far enough and we have space for a new record\n\t\t\t\tstdlog.Printf(\"Moved tail far enough. available=%v, newRecSize=%v\\n\", sizeAvailable, newRecSize)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tvar err error\n\t\t\tvar entry LogEntry\n\t\t\t\n\t\t\t// read entry from tail - so need to set read file pos to the tail offset\n\t\t\tif log.tailOffset >= log.numSizeBytes {\n\t\t\t\tstdlog.Printf(\"Tail has reached end of data %v, equivalent to EOF\", log.numSizeBytes)\n\t\t\t\terr = io.EOF\n\t\t\t} else {\n\t\t\t\tentry, err = log.ReadEntryData(sym)\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\treclen := entry.SizeBytes()\n\t\t\t\tsizeAvailable += uint64(reclen) // size engulfs old tail record\n\t\t\t\tlog.tailOffset += uint64(reclen) // tail moves forward to newer record\n\t\t\t\tlog.NumEntries--\n\t\t\t\tstdlog.Printf(\"Move tail over 1 record, tail=%v, avail=%v, numRecs=%v\\n\", log.tailOffset, sizeAvailable, log.NumEntries)\n\t\t\t} else if err == io.EOF {\n\t\t\t\tstdlog.Printf(\"We hit EOF, no more tail entries to read\\n\")\n\t\t\t\t// we hit the end and no more tail entries to read\n\t\t\t\t// BUT if there is a gap at the end it might be\n\t\t\t\t// big enough for the head entry\n\t\t\t\t// compare tailOffset with maxsize\n\t\t\t\t// |------|000000000|\n\t\t\t\t// ^ ^---------^\n\t\t\t\t// tail maxsize\n\t\t\t\tendGap := log.maxSizeBytes - log.tailOffset\n\t\t\t\tif uint64(newRecSize) <= sizeAvailable+endGap {\n\t\t\t\t\t// then fit in the end gap\n\t\t\t\t\tstdlog.Printf(\"Fit into end gap\\n\")\n\t\t\t\t\tsizeAvailable += endGap\n\t\t\t\t\t\n\t\t\t\t\tlog.numSizeBytes = log.headOffset + uint64(newRecSize)\n\t\t\t\t\tstdlog.Printf(\"Update numSizeBytes as we have moved into gap: %v\", log.numSizeBytes)\n\t\t\t\t} else {\n\t\t\t\t\t// zero out where head is and move head around\n\t\t\t\t\tstdlog.Printf(\"Zero out where head is and move head around to the start (wrap)\\n\")\n\t\t\t\t\tsizeAvailable = 0\n\t\t\t\t\tlog.numSizeBytes = log.headOffset\n\t\t\t\t\tstdlog.Printf(\"Update numSizeBytes to head %v before moving head to zero\", log.numSizeBytes)\n\t\t\t\t\tlog.headOffset = 0\n\t\t\t\t\tlog.wrapNum++\n\t\t\t\t\tlog.setWriteZeroPos()\n\t\t\t\t\tlog.nextLogID.wrap()\n\t\t\t\t}\n\t\t\t\tlog.tailOffset = 0 // wrap around tail\n\t\t\t\terr = log.setReadZeroPos()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} // for each tail record\n\t} // if\n\treturn nil\n}", "func (s *sink) drain() {\n\t// If not lingering, before we begin draining, sleep a tiny bit. This\n\t// helps when a high volume new sink began draining with no linger;\n\t// rather than immediately eating just one record, we allow it to\n\t// buffer a bit before we loop draining.\n\tif s.cl.cfg.linger == 0 && !s.cl.cfg.manualFlushing {\n\t\ttime.Sleep(5 * time.Millisecond)\n\t}\n\n\ts.cl.producer.incDrains()\n\tdefer s.cl.producer.decDrains()\n\n\tagain := true\n\tfor again {\n\t\tif s.cl.producer.isAborting() {\n\t\t\ts.drainState.hardFinish()\n\t\t\treturn\n\t\t}\n\n\t\ts.maybeBackoff()\n\n\t\tsem := s.inflightSem.Load().(chan struct{})\n\t\tsem <- struct{}{}\n\n\t\tvar req *produceRequest\n\t\tvar txnReq *kmsg.AddPartitionsToTxnRequest\n\t\treq, txnReq, again = s.createReq()\n\n\t\t// If we created a request with no batches, everything may be\n\t\t// failing or lingering. Release the sem and continue.\n\t\tif len(req.batches) == 0 {\n\t\t\tagain = s.drainState.maybeFinish(again)\n\t\t\t<-sem\n\t\t\tcontinue\n\t\t}\n\n\t\t// At this point, we need our producer ID.\n\t\tid, epoch, err := s.cl.producerID()\n\t\tif err == nil && txnReq != nil {\n\t\t\ttxnReq.ProducerID = id\n\t\t\ttxnReq.ProducerEpoch = epoch\n\t\t\terr = s.doTxnReq(req, txnReq)\n\t\t}\n\n\t\t// If the producer ID fn or txn req errored, we fail everything.\n\t\t// The error is unrecoverable except in some specific instances.\n\t\t// We do not need to clear the addedToTxn flag for any recBuf\n\t\t// it was set on, since producer id recovery resets the flag.\n\t\tif err != nil {\n\t\t\ts.cl.cfg.logger.Log(LogLevelInfo, \"InitProducerID or AddPartitionsToTxn, failing producer id\",\n\t\t\t\t\"err\", err,\n\t\t\t)\n\t\t\ts.cl.failProducerID(req.producerID, req.producerEpoch, err)\n\t\t\tfor _, partitions := range req.batches {\n\t\t\t\tfor _, batch := range partitions {\n\t\t\t\t\tbatch.owner.failAllRecords(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tagain = s.drainState.maybeFinish(again)\n\t\t\t<-sem\n\t\t\tcontinue\n\t\t}\n\n\t\t// Again we check if there are any batches to send: our txn req\n\t\t// could have had some non-fatal partition errors that removed\n\t\t// partitions from our req, such as unknown topic.\n\t\tif len(req.batches) == 0 {\n\t\t\tagain = s.drainState.maybeFinish(again)\n\t\t\t<-sem\n\t\t\tcontinue\n\t\t}\n\n\t\t// Finally, set our final fields in the req struct.\n\t\treq.producerID = id\n\t\treq.producerEpoch = epoch\n\t\treq.backoffSeq = s.backoffSeq // safe to read outside mu since we are in drain loop\n\n\t\ts.doSequenced(req, func(resp kmsg.Response, err error) {\n\t\t\ts.handleReqResp(req, resp, err)\n\t\t\t<-sem\n\t\t})\n\n\t\tagain = s.drainState.maybeFinish(again)\n\t}\n}", "func (prod *InfluxDB) Produce(workers *sync.WaitGroup) {\n\tprod.BatchMessageLoop(workers, prod.sendBatch)\n}", "func setReaderWriters(d4 *d4S, force bool) bool {\n\t//TODO implement other destination file, fifo unix_socket ...\n\tswitch (*d4).conf.source {\n\tcase \"stdin\":\n\t\t(*d4).src = os.Stdin\n\tcase \"pcap\":\n\t\tf, _ := os.Open(\"capture.pcap\")\n\t\t(*d4).src = f\n\tcase \"d4server\":\n\t\t// Create a new redis connection pool\n\t\t(*d4).redisInputPool = newPool((*d4).conf.redisHost+\":\"+(*d4).conf.redisPort, 16)\n\t\tvar err error\n\t\t(*d4).redisCon, err = (*d4).redisInputPool.Dial()\n\t\tif err != nil {\n\t\t\tlogger.Println(\"Could not connect to d4 Redis\")\n\t\t\treturn false\n\t\t}\n\t\t(*d4).src, err = inputreader.NewLPOPReader(&(*d4).redisCon, (*d4).conf.redisDB, (*d4).conf.redisQueue)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Could not create d4 Redis Descriptor %q \\n\", err)\n\t\t\treturn false\n\t\t}\n\tcase \"folder\":\n\t\tvar err error\n\t\t(*d4).src, err = inputreader.NewFileWatcherReader((*d4).conf.folderstr, (*d4).json, (*d4).daily, logger)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Could not create File Watcher %q \\n\", err)\n\t\t\treturn false\n\t\t}\n\t}\n\tisn, dstnet := config.IsNet((*d4).conf.destination)\n\tif isn {\n\t\t// We test whether a connection already exist\n\t\t// (case where the reader run out of data)\n\t\t// force forces to reset the connections after\n\t\t// failure to reuse it\n\t\tif _, ok := (*d4).dst.w.(net.Conn); !ok || force {\n\t\t\tif (*d4).tor {\n\t\t\t\tdialer := net.Dialer{\n\t\t\t\t\tTimeout: (*d4).ct,\n\t\t\t\t\tKeepAlive: (*d4).cka,\n\t\t\t\t\tFallbackDelay: 0,\n\t\t\t\t}\n\t\t\t\tdial, err := proxy.SOCKS5(\"tcp\", \"127.0.0.1:9050\", nil, &dialer)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t\ttlsc := tls.Config{\n\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t}\n\t\t\t\tif (*d4).cc {\n\t\t\t\t\ttlsc = tls.Config{\n\t\t\t\t\t\tInsecureSkipVerify: false,\n\t\t\t\t\t\tRootCAs: &(*d4).ca,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tconn, errc := dial.Dial(\"tcp\", dstnet)\n\t\t\t\tif errc != nil {\n\t\t\t\t\tlogger.Println(errc)\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tif (*d4).ce == true {\n\t\t\t\t\tconn = tls.Client(conn, &tlsc) // use tls\n\t\t\t\t}\n\t\t\t\t(*d4).dst = newD4Writer(conn, (*d4).conf.key)\n\t\t\t} else {\n\t\t\t\tdial := net.Dialer{\n\t\t\t\t\tTimeout: (*d4).ct,\n\t\t\t\t\tKeepAlive: (*d4).cka,\n\t\t\t\t\tFallbackDelay: 0,\n\t\t\t\t}\n\t\t\t\ttlsc := tls.Config{\n\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t}\n\t\t\t\tif (*d4).cc {\n\t\t\t\t\ttlsc = tls.Config{\n\t\t\t\t\t\tInsecureSkipVerify: false,\n\t\t\t\t\t\tRootCAs: &(*d4).ca,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (*d4).ce == true {\n\t\t\t\t\tconn, errc := tls.DialWithDialer(&dial, \"tcp\", dstnet, &tlsc)\n\t\t\t\t\tif errc != nil {\n\t\t\t\t\t\tlogger.Println(errc)\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t\t(*d4).dst = newD4Writer(conn, (*d4).conf.key)\n\t\t\t\t} else {\n\t\t\t\t\tconn, errc := dial.Dial(\"tcp\", dstnet)\n\t\t\t\t\tif errc != nil {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t\t(*d4).dst = newD4Writer(conn, (*d4).conf.key)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tswitch (*d4).conf.destination {\n\t\tcase \"stdout\":\n\t\t\t(*d4).dst = newD4Writer(os.Stdout, (*d4).conf.key)\n\t\tcase \"file\":\n\t\t\tf, _ := os.Create(\"test.txt\")\n\t\t\t(*d4).dst = newD4Writer(f, (*d4).conf.key)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"No suitable destination found, given :%q\", (*d4).conf.destination))\n\t\t}\n\t}\n\n\t// Create the copy buffer\n\t(*d4).dst.fb = make([]byte, HDR_SIZE+(*d4).conf.snaplen)\n\t(*d4).dst.pb = make([]byte, (*d4).conf.snaplen)\n\n\treturn true\n}", "func ReadRecords(f *os.File, delim rune, output chan<- Record) (n int, err error) {\n\tif output == nil {\n\t\tpanic(\"ReadRecords output channel argument is nil.\")\n\t}\n\tfor reader := NewLazyCSVReader(f, delim); row, err := reader.Read(); n++ {\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\trecord, err := NewRecord(row, f.Name(), n)\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\toutput <- record\n\t}\n\treturn\n}", "func (buf *Buffer) push(x interface{}) {\n\tmetadata, err := getHostMetadata()\n\tif err != nil {\n\t\tlog.Logf(\"Error getting metadata %v\", err)\n\t} else {\n\t\terr = saveHostMetadata(metadata)\n\t\tif err != nil {\n\t\t\tlog.Logf(\"saving host metadata failed with :%v\", err)\n\t\t}\n\t}\n\n\tswitch x.(type) {\n\tcase DNCReport:\n\t\tif len(buf.DNCReports) >= MaxNumReports {\n\t\t\treturn\n\t\t}\n\t\tdncReport := x.(DNCReport)\n\t\tdncReport.Metadata = metadata\n\t\tbuf.DNCReports = append(buf.DNCReports, dncReport)\n\tcase CNIReport:\n\t\tif len(buf.CNIReports) >= MaxNumReports {\n\t\t\treturn\n\t\t}\n\t\tcniReport := x.(CNIReport)\n\t\tcniReport.Metadata = metadata\n\t\tbuf.CNIReports = append(buf.CNIReports, cniReport)\n\tcase NPMReport:\n\t\tif len(buf.NPMReports) >= MaxNumReports {\n\t\t\treturn\n\t\t}\n\t\tnpmReport := x.(NPMReport)\n\t\tnpmReport.Metadata = metadata\n\t\tbuf.NPMReports = append(buf.NPMReports, npmReport)\n\tcase CNSReport:\n\t\tif len(buf.CNSReports) >= MaxNumReports {\n\t\t\treturn\n\t\t}\n\t\tcnsReport := x.(CNSReport)\n\t\tcnsReport.Metadata = metadata\n\t\tbuf.CNSReports = append(buf.CNSReports, cnsReport)\n\t}\n}", "func (ml *mergeList) drain(mergeChan chan todo) todo {\n\tfor {\n\t\tselect {\n\t\tcase td := <-mergeChan:\n\t\t\tif td.isZero() { // channel closed\n\t\t\t\treturn todo{}\n\t\t\t}\n\t\t\tif td.ret == nil && ml.meta.SameSchemaAs(td.meta) {\n\t\t\t\tml.add(td.tables)\n\t\t\t} else {\n\t\t\t\treturn td // not added to merge (sync or persist)\n\t\t\t}\n\t\tdefault: // channel empty\n\t\t\treturn todo{}\n\t\t}\n\t}\n}", "func (jr *joinReader) Run(wg *sync.WaitGroup) {\n\tif wg != nil {\n\t\tdefer wg.Done()\n\t}\n\n\tctx := log.WithLogTagInt(jr.flowCtx.Ctx, \"JoinReader\", int(jr.desc.ID))\n\tctx, span := processorSpan(ctx, \"join reader\")\n\tdefer tracing.FinishSpan(span)\n\n\terr := jr.mainLoop(ctx)\n\tif err != nil {\n\t\tDrainAndClose(ctx, jr.out.output, err /* cause */, jr.pushTrailingMeta, jr.input)\n\t}\n}", "func receiveAndManageRollups(ctx context.Context, client *influxdb.Client, ch <-chan kubernetes.ConfigMapUpdate) {\n\tfor {\n\t\tselect {\n\t\tcase update := <-ch:\n\t\t\tlog := log.WithField(\"configmap\", update.ResourceUpdate.Meta())\n\t\t\tfor _, v := range update.Data {\n\t\t\t\tvar rollups []influxdb.Rollup\n\t\t\t\terr := json.Unmarshal([]byte(v), &rollups)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"failed to unmarshal %s: %v\", update.Data, trace.DebugReport(err))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, rollup := range rollups {\n\t\t\t\t\tswitch update.EventType {\n\t\t\t\t\tcase watch.Added:\n\t\t\t\t\t\terr := client.CreateRollup(rollup)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Errorf(\"failed to create rollup %v: %v\", rollup, trace.DebugReport(err))\n\t\t\t\t\t\t}\n\t\t\t\t\tcase watch.Deleted:\n\t\t\t\t\t\terr := client.DeleteRollup(rollup)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Errorf(\"failed to delete rollup %v: %v\", rollup, trace.DebugReport(err))\n\t\t\t\t\t\t}\n\t\t\t\t\tcase watch.Modified:\n\t\t\t\t\t\terr := client.UpdateRollup(rollup)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Errorf(\"failed to alter rollup %v: %v\", rollup, trace.DebugReport(err))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func (r record) MarshalRecords(records []common.Record) ([]byte, error) {\n\trecordBytes := make([]byte, len(records)*recordLength)\n\n\terr := r.MarshalRecordsToBuffer(records, recordBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn recordBytes, nil\n}", "func (mr *mirrorRouter) Push(\n\trow rowenc.EncDatumRow, meta *execinfrapb.ProducerMetadata,\n) execinfra.ConsumerStatus {\n\taggStatus := mr.aggStatus()\n\tif meta != nil {\n\t\tmr.fwdMetadata(meta)\n\t\t// fwdMetadata can change the status, re-read it.\n\t\treturn mr.aggStatus()\n\t}\n\tif aggStatus != execinfra.NeedMoreRows {\n\t\treturn aggStatus\n\t}\n\n\tuseSema := mr.shouldUseSemaphore()\n\tif useSema {\n\t\tmr.semaphore <- struct{}{}\n\t}\n\n\tfor i := range mr.outputs {\n\t\tro := &mr.outputs[i]\n\t\tro.mu.Lock()\n\t\terr := ro.addRowLocked(context.TODO(), row)\n\t\tro.mu.Unlock()\n\t\tif err != nil {\n\t\t\tif useSema {\n\t\t\t\t<-mr.semaphore\n\t\t\t}\n\t\t\tmr.fwdMetadata(&execinfrapb.ProducerMetadata{Err: err})\n\t\t\tatomic.StoreUint32(&mr.aggregatedStatus, uint32(execinfra.ConsumerClosed))\n\t\t\treturn execinfra.ConsumerClosed\n\t\t}\n\t\tro.mu.cond.Signal()\n\t}\n\tif useSema {\n\t\t<-mr.semaphore\n\t}\n\treturn aggStatus\n}", "func (h *HashesReader) Reader(errChan chan error, hashesChans map[int]chan domains.Hash) {\n\n\tdefer h.ProgressBarHashes.Finish()\n\n\t// Init provider\n\tif err := h.HashesProvider.Prepare(); err != nil {\n\t\terrChan <- errors.Wrap(err, \"unable to prepare dictionary provider\")\n\t\treturn\n\t}\n\n\th.ProgressBarHashes.SetTotal(h.HashesProvider.GetTotal())\n\tvar current int64\n\n\t// Read values and sent them to workers\n\tfor h.HashesProvider.Next() {\n\t\thash := domains.Hash{Hash: h.HashesProvider.Value()}\n\n\t\t// Send the same hash to all workers\n\t\tfor workerID := range hashesChans {\n\t\t\thashesChans[workerID] <- hash\n\t\t}\n\n\t\th.ProgressBarHashes.Increment()\n\t\tcurrent++\n\t\th.ProgressBarCracked.SetTotal(current)\n\t}\n\n\t// Last provider error\n\tif h.HashesProvider.Err() != nil {\n\t\terrChan <- errors.Wrap(h.HashesProvider.Err(), \"dictionary provider error\")\n\t\treturn\n\t}\n\n\t// Close provider\n\tif err := h.HashesProvider.Close(); err != nil {\n\t\terrChan <- errors.Wrap(err, \"unable to close dictionary provider\")\n\t\treturn\n\t}\n\n\tcloseWorkers(hashesChans)\n}", "func worker(queue chan []string, out chan []byte, opts options, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tfor batch := range queue {\n\tLoop:\n\t\tfor _, s := range batch {\n\t\t\tvar err error\n\t\t\tis := finc.IntermediateSchema{}\n\t\t\terr = json.Unmarshal([]byte(s), &is)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\t// Skip things, e.g. blacklisted DOIs.\n\t\t\tfor _, f := range opts.filters {\n\t\t\t\tif !f.Apply(is) {\n\t\t\t\t\tcontinue Loop\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Get export format.\n\t\t\tschema := opts.exportSchemaFunc()\n\t\t\terr = schema.Convert(is)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\t// Get list of ISILs to attach.\n\t\t\tschema.Attach(opts.tagger.Tags(is))\n\n\t\t\t// TODO(miku): maybe move marshalling into Exporter, if we have\n\t\t\t// anything else than JSON - function could be somethings like\n\t\t\t// func Marshal() ([]byte, error)\n\t\t\tb, err := json.Marshal(schema)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tout <- b\n\t\t}\n\t}\n}", "func (p *Sink) Listen(s chan qpid.GrillStatus) {\n\n\tfor status := range s {\n\t\tfor _, s := range status.GrillSensors {\n\t\t\tt, err := s.Temperature()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(errors.Wrap(err, \"get temperature\"))\n\t\t\t}\n\n\t\t\ttm := messages.GrillTemp{\n\t\t\t\tTemp: t,\n\t\t\t}\n\n\t\t\tb, err := json.Marshal(tm)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Err marshaling Grill Temp\", err)\n\t\t\t}\n\n\t\t\tp.service.Publish(p.GrillTopic(), b, 0, false)\n\n\t\t\tset, err := s.Setpoint()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(errors.Wrap(err, \"get setpoint\"))\n\t\t\t}\n\n\t\t\tgtm := messages.GrillTarget{\n\t\t\t\tTemp: set,\n\t\t\t}\n\n\t\t\tb, err = json.Marshal(gtm)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Err marshaling Grill Setpoint\", err)\n\t\t\t}\n\n\t\t\tp.service.Publish(p.SetTopic(), b, 0, false)\n\n\t\t\tfsm := messages.FanStatus{\n\t\t\t\tFanOn: status.FanOn,\n\t\t\t}\n\n\t\t\tb, err = json.Marshal(fsm)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Err marshaling Fan Status\", err)\n\t\t\t}\n\n\t\t\tp.service.Publish(p.FanTopic(), b, 0, false)\n\t\t}\n\t}\n\n}", "func (c *cloudflareClient) uploadRecords(name string, records map[string]string) error {\n\t// Convert all names to lowercase.\n\tlrecords := make(map[string]string, len(records))\n\tfor name, r := range records {\n\t\tlrecords[strings.ToLower(name)] = r\n\t}\n\trecords = lrecords\n\n\tlog.Info(fmt.Sprintf(\"Retrieving existing TXT records on %s\", name))\n\tentries, err := c.DNSRecords(c.zoneID, cloudflare.DNSRecord{Type: \"TXT\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\texisting := make(map[string]cloudflare.DNSRecord)\n\tfor _, entry := range entries {\n\t\tif !strings.HasSuffix(entry.Name, name) {\n\t\t\tcontinue\n\t\t}\n\t\texisting[strings.ToLower(entry.Name)] = entry\n\t}\n\n\t// Iterate over the new records and inject anything missing.\n\tfor path, val := range records {\n\t\told, exists := existing[path]\n\t\tif !exists {\n\t\t\t// Entry is unknown, push a new one to Cloudflare.\n\t\t\tlog.Info(fmt.Sprintf(\"Creating %s = %q\", path, val))\n\t\t\tttl := 1\n\t\t\tif path != name {\n\t\t\t\tttl = 2147483647 // Max TTL permitted by Cloudflare\n\t\t\t}\n\t\t\t_, err = c.CreateDNSRecord(c.zoneID, cloudflare.DNSRecord{Type: \"TXT\", Name: path, Content: val, TTL: ttl})\n\t\t} else if old.Content != val {\n\t\t\t// Entry already exists, only change its content.\n\t\t\tlog.Info(fmt.Sprintf(\"Updating %s from %q to %q\", path, old.Content, val))\n\t\t\told.Content = val\n\t\t\terr = c.UpdateDNSRecord(c.zoneID, old.ID, old)\n\t\t} else {\n\t\t\tlog.Info(fmt.Sprintf(\"Skipping %s = %q\", path, val))\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to publish %s: %v\", path, err)\n\t\t}\n\t}\n\n\t// Iterate over the old records and delete anything stale.\n\tfor path, entry := range existing {\n\t\tif _, ok := records[path]; ok {\n\t\t\tcontinue\n\t\t}\n\t\t// Stale entry, nuke it.\n\t\tlog.Info(fmt.Sprintf(\"Deleting %s = %q\", path, entry.Content))\n\t\tif err := c.DeleteDNSRecord(c.zoneID, entry.ID); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to delete %s: %v\", path, err)\n\t\t}\n\t}\n\treturn nil\n}", "func Sink(zipkinSpans <-chan proxy.Span) {\n\tfor span := range zipkinSpans {\n\t\tsinkSpan(span)\n\t}\n}", "func (w *WarcFileWriter) Write(record ...WarcRecord) []WriteResponse {\n\tselect {\n\tcase <-w.closed:\n\t\treturn nil\n\tdefault:\n\t}\n\n\tjob, result := w.createWriteJob(record...)\n\tselect {\n\tcase <-w.closed:\n\t\treturn nil\n\tcase w.middleCh <- job:\n\t\treturn <-result\n\t}\n}", "func (p *Puller) Pull(ctx context.Context) error {\n\tif p.statsCh != nil {\n\t\tc := emitStats(p.stats, p.statsCh)\n\t\tdefer c()\n\t}\n\n\teg, ctx := errgroup.WithContext(ctx)\n\n\tcompletedTables := make(chan FilledWriters, 8)\n\n\teg.Go(func() error {\n\t\treturn p.processCompletedTables(ctx, completedTables)\n\t})\n\n\teg.Go(func() (err error) {\n\t\tif err = p.tablefileSema.Acquire(ctx, 1); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tconst batchSize = 64 * 1024\n\t\t// refs are added to |visited| on first sight\n\t\tvisited := p.hashes\n\t\t// |absent| are visited, un-batched refs\n\t\tabsent := p.hashes.Copy()\n\t\t// |batches| are visited, un-fetched refs\n\t\tbatches := make([]hash.HashSet, 0, 64)\n\n\t\tfor absent.Size() > 0 || len(batches) > 0 {\n\t\t\tif absent.Size() >= batchSize {\n\t\t\t\tvar bb []hash.HashSet\n\t\t\t\tabsent, bb = batchNovel(absent, batchSize)\n\t\t\t\tbatches = append(batches, bb...)\n\t\t\t}\n\t\t\tif len(batches) == 0 {\n\t\t\t\tbatches = append(batches, absent)\n\t\t\t\tabsent = make(hash.HashSet)\n\t\t\t}\n\n\t\t\tb := batches[len(batches)-1]\n\t\t\tbatches = batches[:len(batches)-1]\n\n\t\t\tb, err = p.sinkDBCS.HasMany(ctx, b)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t} else if b.Size() == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\terr = p.getCmp(ctx, b, absent, visited, completedTables)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif p.wr != nil && p.wr.ChunkCount() > 0 {\n\t\t\tselect {\n\t\t\tcase completedTables <- FilledWriters{p.wr}:\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn ctx.Err()\n\t\t\t}\n\t\t}\n\t\tclose(completedTables)\n\t\treturn nil\n\t})\n\n\treturn eg.Wait()\n}", "func RetrieveNotificationRecords(appid uint16, owner string, channelInfo *saver.RetrieveChannel,\n\ttraceSn string, resp *saver.RetrieveMessagesResponse) error {\n\t// count request response time if need\n\tif netConf().StatResponseTime {\n\t\tcountFunc := countP2pResponseTime(owner, traceSn, \"RetrieveNotificationRecords\", appid)\n\t\tdefer countFunc()\n\t}\n\n\tif sessions, err := GetMessageMongoStore(owner, appid, traceSn); err != nil {\n\t\tLogger.Error(owner, appid, traceSn, \"RetrieveNotificationRecords\",\n\t\t\t\"Retrieve message from storage failed\", err)\n\t\treturn err\n\t} else if len(sessions) == 0 {\n\t\tLogger.Error(owner, appid, traceSn, \"RetrieveNotificationRecords\",\n\t\t\t\"Retrieve message from storage failed\", \"no available mongo connection\")\n\t\treturn errors.New(\"no available mongo connection\")\n\t} else {\n\t\t// need to close sessions to release socket connection\n\t\tdefer CloseMgoSessions(sessions)\n\n\t\tsortField := FieldMsgId\n\t\tif channelInfo.MaxCount == 0 {\n\t\t\tchannelInfo.MaxCount = DefaultReturnCount\n\t\t} else if channelInfo.MaxCount < 0 {\n\t\t\t// if channelInfo.StartMsgId and channelInfo.MaxCount both are negative, we set sort field later\n\t\t\tif channelInfo.StartMsgId > 0 {\n\t\t\t\tsortField = \"-\" + sortField\n\t\t\t}\n\t\t\tchannelInfo.MaxCount = -1 * channelInfo.MaxCount\n\t\t}\n\n\t\tif channelInfo.StartMsgId == 0 {\n\t\t\tif lastRead, err := GetLastRead(appid, owner, channelInfo.Channel, sessions); err == nil && lastRead > 0 {\n\t\t\t\tchannelInfo.StartMsgId = lastRead + 1\n\t\t\t}\n\t\t}\n\n\t\t// makes complex query:\n\t\t// condition is that:\n\t\t// owner is specified owner,\n\t\t// no recall field or recall field value is not 1\n\t\t// sorted field is message id field\n\t\tvar query *bson.D\n\t\tif channelInfo.StartMsgId >= 0 {\n\t\t\tif strings.HasPrefix(sortField, \"-\") {\n\t\t\t\t// query condition: message id is less than start id\n\t\t\t\t// retrieves messages in descending order of message id,\n\t\t\t\t// result does not include message which id is channelInfo.StartMsgId.\n\t\t\t\tquery = &bson.D{bson.DocElem{FieldJid, owner},\n\t\t\t\t\tbson.DocElem{FieldMsgId, bson.D{bson.DocElem{OpLt, channelInfo.StartMsgId}}},\n\t\t\t\t\tbson.DocElem{OpOr, []bson.D{\n\t\t\t\t\t\t{bson.DocElem{FieldRecalled, bson.D{bson.DocElem{OpExists, false}}}},\n\t\t\t\t\t\t{bson.DocElem{FieldRecalled, bson.D{bson.DocElem{OpNe, 1}}}}}}}\n\t\t\t} else {\n\t\t\t\t// query condition: message id is equal or lager than start id,\n\t\t\t\t// retrieves messages in ascending order of message id,\n\t\t\t\t// result includes message which id is channelInfo.StartMsgId if there is.\n\t\t\t\tquery = &bson.D{bson.DocElem{FieldJid, owner},\n\t\t\t\t\tbson.DocElem{FieldMsgId, bson.D{bson.DocElem{OpGte, channelInfo.StartMsgId}}},\n\t\t\t\t\tbson.DocElem{OpOr, []bson.D{\n\t\t\t\t\t\t{bson.DocElem{FieldRecalled, bson.D{bson.DocElem{OpExists, false}}}},\n\t\t\t\t\t\t{bson.DocElem{FieldRecalled, bson.D{bson.DocElem{OpNe, 1}}}}}}}\n\t\t\t}\n\t\t} else {\n\t\t\t// retrieve latest messages in descending order from latest message\n\t\t\tquery = &bson.D{bson.DocElem{FieldJid, owner},\n\t\t\t\tbson.DocElem{OpOr, []bson.D{\n\t\t\t\t\t{bson.DocElem{FieldRecalled, bson.D{bson.DocElem{OpExists, false}}}},\n\t\t\t\t\t{bson.DocElem{FieldRecalled, bson.D{bson.DocElem{OpNe, 1}}}}}}}\n\t\t\tsortField = \"-\" + sortField\n\t\t}\n\n\t\tmessages := make([]*saver.ChatMessage, 0, channelInfo.MaxCount)\n\t\tif records, err := find(query, channelInfo.MaxCount, sessions, NotificationMsgDB,\n\t\t\tFormatCollection(NotificationMsgCol, appid), sortField); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tfor _, record := range records {\n\t\t\t\t// translate a record to saver.ChatMessage object; if an error occurred, translated message will not\n\t\t\t\t// return to caller but only log it to error log\n\t\t\t\tif message, err := TranslateBsonM2Message(&record, channelInfo.Channel); err != nil {\n\t\t\t\t\tLogger.Error(owner, appid, traceSn, \"RetrieveNotificationRecords\",\n\t\t\t\t\t\t\"Retrieve message from storage failed\",\n\t\t\t\t\t\tfmt.Sprint(\"error:[\", err.Error(), \"] translated message:[\", message, \"]\"))\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\tmessages = append(messages, message)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tresp.Inbox[channelInfo.Channel] = messages\n\n\t\t// get max message id/count retrieved\n\t\tmaxMsgId := uint64(0)\n\t\tqueriedCount := len(resp.Inbox[channelInfo.Channel])\n\t\tif queriedCount > 0 {\n\t\t\tmaxMsgId = resp.Inbox[channelInfo.Channel][queriedCount-1].MsgId\n\t\t\tif resp.Inbox[channelInfo.Channel][0].MsgId > maxMsgId {\n\t\t\t\tmaxMsgId = resp.Inbox[channelInfo.Channel][0].MsgId\n\t\t\t}\n\t\t}\n\n\t\tLogger.Debug(owner, appid, traceSn, \"RetrieveNotificationRecords\", \"Retrieved notification records\",\n\t\t\tfmt.Sprintf(\"Retrieved messages of %s, start: %v, request len: %v, return: %v, retrun max id: %v\",\n\t\t\t\towner, channelInfo.StartMsgId, channelInfo.MaxCount, queriedCount, maxMsgId))\n\n\t\t// get latest id of channel\n\t\tlatest, err := GetLatestMessageId(appid, owner, channelInfo.Channel, sessions)\n\t\tif err != nil {\n\t\t\tLogger.Error(owner, appid, traceSn, \"RetrieveNotificationRecords\",\n\t\t\t\t\"get latest id failed\", err)\n\t\t} else {\n\t\t\tresp.LatestID[channelInfo.Channel] = latest\n\t\t}\n\n\t\t// try to update last read id, update will failed if new last read is less than db record\n\t\tlastRead, _ := GetLastRead(appid, owner, channelInfo.Channel, sessions)\n\t\tif lastRead < (channelInfo.StartMsgId-1) && latest > 0 {\n\t\t\tlastRead = channelInfo.StartMsgId - 1\n\t\t\tif lastRead > int64(latest) {\n\t\t\t\tlastRead = int64(latest) // 确保最后读取ID是一个有效的ID\n\t\t\t}\n\n\t\t\tUpdateLastRead(appid, owner, channelInfo.Channel, sessions, lastRead)\n\t\t}\n\t\tresp.LastReadID[channelInfo.Channel] = uint64(lastRead)\n\t}\n\treturn nil\n}", "func (c *Client) addReaders(op errors.Op, entry *upspin.DirEntry, packer upspin.Packer, readers []upspin.UserName) error {\n\tif packer.Packing() != upspin.EEPack {\n\t\treturn nil\n\t}\n\n\tname := entry.Name\n\n\t// Add other readers to Packdata.\n\treadersPublicKey := make([]upspin.PublicKey, 0, len(readers)+2)\n\tf := c.config.Factotum()\n\tif f == nil {\n\t\treturn errors.E(op, name, errors.Permission, \"no factotum available\")\n\t}\n\treadersPublicKey = append(readersPublicKey, f.PublicKey())\n\tall := access.IsAccessControlFile(entry.Name)\n\tfor _, r := range readers {\n\t\tif r == access.AllUsers {\n\t\t\tall = true\n\t\t\tcontinue\n\t\t}\n\t\tkey, err := bind.KeyServer(c.config, c.config.KeyEndpoint())\n\t\tif err != nil {\n\t\t\treturn errors.E(op, err)\n\t\t}\n\t\tu, err := key.Lookup(r)\n\t\tif err != nil || len(u.PublicKey) == 0 {\n\t\t\t// TODO warn that we can't process one of the readers?\n\t\t\tcontinue\n\t\t}\n\t\tif u.PublicKey != readersPublicKey[0] { // don't duplicate self\n\t\t\t// TODO(ehg) maybe should check for other duplicates?\n\t\t\treadersPublicKey = append(readersPublicKey, u.PublicKey)\n\t\t}\n\t}\n\tif all {\n\t\treadersPublicKey = append(readersPublicKey, upspin.AllUsersKey)\n\t}\n\n\tpackdata := make([]*[]byte, 1)\n\tpackdata[0] = &entry.Packdata\n\tpacker.Share(c.config, readersPublicKey, packdata)\n\treturn nil\n}", "func ExampleMultiStreamer() {\n\t// Init OAuth2/JWT. This is required for authenticating with BigQuery.\n\t// https://cloud.google.com/bigquery/authorization\n\t// https://developers.google.com/console/help/new/#generatingoauth2\n\tjwtConfig, err := NewJWTConfig(\"path_to_key.json\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\t// Set MultiStreamer configuration.\n\tnumStreamers := 10 // Number of concurrent sub-streamers (workers) to use.\n\tmaxRows := 500 // Amount of rows queued before forcing insert to BigQuery.\n\tmaxDelay := 1 * time.Second // Time to pass between forcing insert to BigQuery.\n\tsleepBeforeRetry := 1 * time.Second // Time to wait between failed insert retries.\n\tmaxRetryInsert := 10 // Maximum amount of failed insert retries before discarding rows and moving on.\n\n\t// Init a new multi-streamer.\n\tms, err := NewMultiStreamer(\n\t\tjwtConfig, numStreamers, maxRows, maxDelay, sleepBeforeRetry, maxRetryInsert)\n\n\t// Start multi-streamer and workers.\n\tms.Start()\n\tdefer ms.Stop()\n\n\t// Worker errors are reported to MultiStreamer.Errors channel.\n\t// This inits a goroutine the reads from this channel and logs errors.\n\t//\n\t// It can be closed by sending \"true\" to the shutdownErrorChan channel.\n\tshutdownErrorChan := make(chan bool)\n\tgo func() {\n\t\tvar err error\n\n\t\treadErrors := true\n\t\tfor readErrors {\n\t\t\tselect {\n\t\t\tcase <-shutdownErrorChan:\n\t\t\t\treadErrors = false\n\t\t\tcase err = <-ms.Errors:\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\t}()\n\tdefer func() { shutdownErrorChan <- true }()\n\n\t// Queue a single row.\n\t// Insert will happen once maxDelay time has passed,\n\t// or maxRows rows have been queued.\n\tms.QueueRow(\n\t\t\"project-id\", \"dataset-id\", \"table-id\",\n\t\tmap[string]bigquery.JsonValue{\"key\": \"value\"},\n\t)\n}", "func pushMetrics(producer *sarama.Producer, mode string) {\n\n\t// The list of metrics we want to filter out of the total stats dump from haproxy\n\twantedMetrics := []string{ \"Scur\", \"Qcur\",\"Smax\",\"Slim\",\"Weight\",\"Qtime\",\"Ctime\",\"Rtime\",\"Ttime\",\"Req_rate\",\"Req_rate_max\",\"Req_tot\",\"Rate\",\"Rate_lim\",\"Rate_max\" }\n\n\t// get metrics every second, for ever.\n\tfor {\n\n\t\t\tstats, _ := GetStats(\"all\")\n\t\t localTime := int64(time.Now().Unix())\n\n\n\t\t// for each proxy in the stats dump, pick out the wanted metrics, parse them and send 'm to Kafka\n\t\t\tfor _,proxy := range stats {\n\n\t\t\t\t// filter out the metrics for haproxy's own stats page\n\t\t\t\tif (proxy.Pxname != \"stats\") {\n\n\t\t\t\t\t// loop over all wanted metrics for the current proxy\n\t\t\t\t\tfor _,metric := range wantedMetrics {\n\n\t\t\t\t\t\tfullMetricName := proxy.Pxname + \".\" + strings.ToLower(proxy.Svname) + \".\" + strings.ToLower(metric)\n\t\t\t\t\t\tfield := reflect.ValueOf(proxy).FieldByName(metric).String()\n\t\t\t\t\t\tif (field != \"\") {\n\n\t\t\t\t\t\t\tmetricValue,_ := strconv.Atoi(field)\n\n\t\t\t\t\t\t\tmetricObj := Metric{fullMetricName, metricValue, localTime}\n\t\t\t\t\t\t\tjsonObj, _ := json.MarshalIndent(metricObj, \"\", \" \")\n\n\t\t\t\t\t\t\terr := producer.SendMessage(mode+\".\"+\"all\", sarama.StringEncoder(\"lbmetrics\"), sarama.StringEncoder(jsonObj))\n\t\t\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t\t\tlog.Error(\"Error sending message to Kafka \" + err.Error())\n\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tlog.Debug(\"Successfully sent message to Kafka on topic: \" + mode + \".\" + \"all\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\ttime.Sleep(3000 * time.Millisecond)\n\t}\n}", "func NewRecordWriter(w io.Writer) (rec io.Writer, err error) {\n\t// TODO 2 - complete implementation\n}", "func (tc *TrafficCaptureReader) Read(ready chan struct{}) {\n\ttc.Lock()\n\ttc.Done = make(chan struct{})\n\ttc.fuse = make(chan struct{})\n\tdefer close(tc.Done)\n\n\tlog.Debugf(\"Processing capture file of size: %d\", len(tc.Contents))\n\n\t// skip header\n\ttc.offset = uint32(len(datadogHeader))\n\n\tvar tsResolution time.Duration\n\tif tc.Version < minNanoVersion {\n\t\ttsResolution = time.Second\n\t} else {\n\t\ttsResolution = time.Nanosecond\n\t}\n\ttc.Unlock()\n\n\tlast := int64(0)\n\n\t// we are all ready to go - let the caller know\n\tready <- struct{}{}\n\n\t// The state must be read out of band, it makes zero sense in the context\n\t// of the replaying process, it must be pushed to the agent. We just read\n\t// and submit the packets here.\n\tfor {\n\t\tmsg, err := tc.ReadNext()\n\t\tif err != nil && err == io.EOF {\n\t\t\tlog.Debugf(\"Done reading capture file...\")\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tlog.Errorf(\"Error processing: %v\", err)\n\t\t\tbreak\n\t\t}\n\n\t\tif last != 0 {\n\t\t\tif msg.Timestamp > last {\n\t\t\t\tutil.Wait(tsResolution * time.Duration(msg.Timestamp-last))\n\t\t\t}\n\t\t}\n\n\t\tlast = msg.Timestamp\n\t\ttc.Traffic <- msg\n\n\t\tselect {\n\t\tcase <-tc.fuse:\n\t\t\treturn\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func Fetch(stream *health.Stream, conf Config, writers ...Writer) error {\n\tjob := stream.NewJob(\"fetch\")\n\n\t// Fetch data from each provider\n\tallRates := []exchangeRates{{\"BTC\": {Ask: \"1\", Bid: \"1\", Last: \"1\", Type: exchangeRateTypeCrypto.String()}}}\n\tfor _, f := range []fetchFn{\n\t\tNewBTCAVGFetcher(conf.BTCAVGPubkey, conf.BTCAVGPrivkey),\n\t\tNewCMCFetcher(conf.CMCEnv, conf.CMCAPIKey),\n\t} {\n\t\trates, err := f()\n\t\tif err != nil {\n\t\t\tjob.EventErr(\"fetch_data\", err)\n\t\t\tjob.Complete(health.Error)\n\t\t\treturn err\n\t\t}\n\t\tallRates = append(allRates, rates)\n\t}\n\n\tfullRates := mergeRates(allRates)\n\n\t// Ensure the final payload passes correctness checks\n\terr := validateRates(fullRates)\n\tif err != nil {\n\t\tjob.EventErr(\"validate_rates\", err)\n\t\tjob.Complete(health.Error)\n\t\treturn err\n\t}\n\n\t// Serialize responses\n\tresponseBytes, err := json.Marshal(fullRates)\n\tif err != nil {\n\t\tjob.EventErr(\"marshal\", err)\n\t\tjob.Complete(health.Error)\n\t\treturn err\n\t}\n\n\t// Write\n\tfor _, writer := range writers {\n\t\terr := writer(job, responseBytes)\n\t\tif err != nil {\n\t\t\tjob.EventErr(\"write\", err)\n\t\t\tjob.Complete(health.Error)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tjob.Complete(health.Success)\n\treturn nil\n}", "func (rw *ReadWorker) Run(reader *bufio.Reader) {\n\tisLastBufferSeparated := false\n\tlastWriteBytes := 0\n\tfor {\n\t\tbuf := make([]byte, readBufferSize)\n\t\tn, err := reader.Read(buf)\n\t\tif err == io.EOF {\n\t\t\tif n == 0 {\n\t\t\t\tif !isLastBufferSeparated && lastWriteBytes != 0 {\n\t\t\t\t\trw.sendSpool(1, []byte{separator})\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\trw.sendSpool(n, buf)\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif n != 0 {\n\t\t\tisLastBufferSeparated = buf[n-1] == separator\n\t\t\tlastWriteBytes = n\n\t\t\trw.sendSpool(n, buf)\n\t\t}\n\t}\n}", "func (c *connection) process(pushCounter *uint64, inputs input) error {\n\n\treply, err := c.redis.Do(howMap[inputs.how].pop, inputs.source)\n\n\tif reply == nil {\n\t\treturn nil\n\t}\n\n\tpopData, err := redis.Bytes(reply, err)\n\n\tif err != nil {\n\t\tlog.Printf(\"Popped : %v\", string(popData))\n\t\tlog.Printf(\"Error occured while popping data , : %v\", err)\n\t\treturn err\n\t}\n\n\tif _, err := redis.Int(c.redis.Do(howMap[inputs.how].push, inputs.destination, popData)); err != nil {\n\t\tlog.Printf(\"Error occured while pushing data, : %v\", err)\n\t\treturn err\n\t}\n\n\t*pushCounter++\n\n\treturn nil\n\n}", "func (p *Provider) record(tx *lease.Tx) {\n\tvar ops uint64 // Total number of consumptive ops (really op effects)\n\n\tfor _, op := range tx.Ops() {\n\t\tif op.Type == lease.Update && op.UpdateType() == lease.Renew {\n\t\t\t// Don't record renewals\n\t\t\tcontinue\n\t\t}\n\t\tfor _, effect := range op.Effects() {\n\t\t\tif !effect.Consumptive() {\n\t\t\t\t// Only record effects that affect consumption\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp.log.Printf(\"TX %s\", effect.String())\n\t\t\tops++\n\t\t}\n\t}\n\n\tp.add(ops)\n}", "func (s *sink) createReq() (*produceRequest, *kmsg.AddPartitionsToTxnRequest, bool) {\n\treq := &produceRequest{\n\t\ttxnID: s.cl.cfg.txnID,\n\t\tacks: s.cl.cfg.acks.val,\n\t\ttimeout: int32(s.cl.cfg.produceTimeout.Milliseconds()),\n\t\tbatches: make(seqRecBatches, 5),\n\n\t\tcompressor: s.cl.compressor,\n\t}\n\n\tvar (\n\t\t// We use non-flexible lengths for what follows. These will be\n\t\t// strictly larger (unless we are creating a produce request\n\t\t// with >16K partitions for a single topic...), so using\n\t\t// non-flexible makes calculations simpler.\n\t\twireLength = s.cl.baseProduceRequestLength()\n\t\twireLengthLimit = s.cl.cfg.maxBrokerWriteBytes\n\n\t\tmoreToDrain bool\n\n\t\ttransactional = req.txnID != nil\n\t\ttxnReq *kmsg.AddPartitionsToTxnRequest\n\t\ttxnAddedTopics map[string]int // topic => index in txnReq\n\t)\n\n\ts.mu.Lock() // prevent concurrent modification to recBufs\n\tdefer s.mu.Unlock()\n\n\t// Over every record buffer, check to see if the first batch is not\n\t// backing off and that it can can fit in our request.\n\trecBufsIdx := s.recBufsStart\n\tfor i := 0; i < len(s.recBufs); i++ {\n\t\trecBuf := s.recBufs[recBufsIdx]\n\t\trecBufsIdx = (recBufsIdx + 1) % len(s.recBufs)\n\n\t\trecBuf.mu.Lock()\n\t\tif recBuf.failing || len(recBuf.batches) == recBuf.batchDrainIdx {\n\t\t\trecBuf.mu.Unlock()\n\t\t\tcontinue\n\t\t}\n\n\t\tbatch := recBuf.batches[recBuf.batchDrainIdx]\n\t\tbatchWireLength := 4 + batch.wireLength // partition, batch len\n\n\t\tif atomic.LoadUint32(&s.produceVersionKnown) == 0 {\n\t\t\tv1BatchWireLength := 4 + batch.v1wireLength\n\t\t\tif v1BatchWireLength > batchWireLength {\n\t\t\t\tbatchWireLength = v1BatchWireLength\n\t\t\t}\n\t\t} else {\n\t\t\tswitch s.produceVersion {\n\t\t\tcase 0, 1:\n\t\t\t\tbatchWireLength = 4 + batch.v0wireLength()\n\t\t\tcase 2:\n\t\t\t\tbatchWireLength = 4 + batch.v1wireLength\n\t\t\t}\n\t\t}\n\n\t\tif _, exists := req.batches[recBuf.topic]; !exists {\n\t\t\tbatchWireLength += 2 + int32(len(recBuf.topic)) + 4 // string len, topic, partition array len\n\t\t}\n\t\tif wireLength+batchWireLength > wireLengthLimit {\n\t\t\trecBuf.mu.Unlock()\n\t\t\tmoreToDrain = true\n\t\t\tcontinue\n\t\t}\n\n\t\tbatch.tries++\n\t\trecBuf.batchDrainIdx++\n\n\t\trecBuf.lockedStopLinger()\n\n\t\t// If lingering is configured, there is some logic around\n\t\t// whether there is more to drain. If this recbuf has more than\n\t\t// one batch ready, then yes, more to drain. Otherwise, we\n\t\t// re-linger unless we are flushing.\n\t\tif s.cl.cfg.linger > 0 {\n\t\t\tif len(recBuf.batches) > recBuf.batchDrainIdx+1 {\n\t\t\t\tmoreToDrain = true\n\t\t\t} else if len(recBuf.batches) == recBuf.batchDrainIdx+1 {\n\t\t\t\tif !recBuf.lockedMaybeStartLinger() {\n\t\t\t\t\tmoreToDrain = true\n\t\t\t\t}\n\t\t\t}\n\t\t} else { // no linger, easier\n\t\t\tmoreToDrain = len(recBuf.batches) > recBuf.batchDrainIdx || moreToDrain\n\t\t}\n\n\t\tseq := recBuf.seq\n\t\trecBuf.seq += int32(len(batch.records))\n\n\t\trecBuf.mu.Unlock()\n\n\t\tif transactional && !recBuf.addedToTxn {\n\t\t\trecBuf.addedToTxn = true\n\t\t\tif txnReq == nil {\n\t\t\t\ttxnReq = &kmsg.AddPartitionsToTxnRequest{\n\t\t\t\t\tTransactionalID: *req.txnID,\n\t\t\t\t}\n\t\t\t}\n\t\t\tif txnAddedTopics == nil {\n\t\t\t\ttxnAddedTopics = make(map[string]int, 10)\n\t\t\t}\n\t\t\tidx, exists := txnAddedTopics[recBuf.topic]\n\t\t\tif !exists {\n\t\t\t\tidx = len(txnReq.Topics)\n\t\t\t\ttxnAddedTopics[recBuf.topic] = idx\n\t\t\t\ttxnReq.Topics = append(txnReq.Topics, kmsg.AddPartitionsToTxnRequestTopic{\n\t\t\t\t\tTopic: recBuf.topic,\n\t\t\t\t})\n\t\t\t}\n\t\t\ttxnReq.Topics[idx].Partitions = append(txnReq.Topics[idx].Partitions, recBuf.partition)\n\t\t}\n\n\t\twireLength += batchWireLength\n\t\treq.batches.addBatch(\n\t\t\trecBuf.topic,\n\t\t\trecBuf.partition,\n\t\t\tseq,\n\t\t\tbatch,\n\t\t)\n\t}\n\n\t// We could have lost our only record buffer just before we grabbed the\n\t// lock above, so we have to check there are recBufs.\n\tif len(s.recBufs) > 0 {\n\t\ts.recBufsStart = (s.recBufsStart + 1) % len(s.recBufs)\n\t}\n\treturn req, txnReq, moreToDrain\n}", "func (p *Sink) Listen(s chan qpid.GrillStatus) {\n\tfor status := range s {\n\t\tvar fst int\n\t\tif status.FanOn {\n\t\t\tfst = 1\n\t\t}\n\t\tfsm := messages.FanStatus{\n\t\t\tFanOn: fst,\n\t\t\tTime: time.Now(),\n\t\t}\n\n\t\tb, err := json.Marshal(fsm)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Err marshaling Fan Status\", err)\n\t\t}\n\t\tfut := p.service.Publish(p.FanTopic(), b, 0, false)\n\n\t\terr = fut.Wait(5 * time.Second)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfor _, s := range status.GrillSensors {\n\t\t\tt, err := s.Temperature()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Err getting temp\", err)\n\t\t\t}\n\n\t\t\ttm := messages.GrillTemp{\n\t\t\t\tTemp: t,\n\t\t\t\tTime: time.Now(),\n\t\t\t}\n\n\t\t\tb, err := json.Marshal(tm)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Err marshaling Grill Temp\", err)\n\t\t\t}\n\n\t\t\tp.service.Publish(p.GrillTopic(), b, 0, false)\n\n\t\t\tset, err := s.Setpoint()\n\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Err getting temp\", err)\n\t\t\t}\n\t\t\tgtm := messages.GrillTarget{\n\t\t\t\tTemp: set,\n\t\t\t\tTime: time.Now(),\n\t\t\t}\n\n\t\t\tb, err = json.Marshal(gtm)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Err marshaling Grill Setpoint\", err)\n\t\t\t}\n\n\t\t\tp.service.Publish(p.SetTopic(), b, 0, false)\n\t\t}\n\t}\n\n}", "func AddFromReader(r io.Reader, store *Store, name string) error {\n\ts := bufio.NewScanner(r)\n\tfor s.Scan() {\n\t\trt := []rune(s.Text())\n\t\tif len(rt) > 0 {\n\t\t\tstore.Add(name, rt)\n\t\t}\n\t}\n\treturn s.Err()\n}", "func (jr *joinReader) mainLoop(ctx context.Context) error {\n\tprimaryKeyPrefix := sqlbase.MakeIndexKeyPrefix(&jr.desc, jr.index.ID)\n\n\tvar alloc sqlbase.DatumAlloc\n\tspans := make(roachpb.Spans, 0, joinReaderBatchSize)\n\n\ttxn := jr.flowCtx.txn\n\tif txn == nil {\n\t\tlog.Fatalf(ctx, \"joinReader outside of txn\")\n\t}\n\n\tlog.VEventf(ctx, 1, \"starting\")\n\tif log.V(1) {\n\t\tdefer log.Infof(ctx, \"exiting\")\n\t}\n\n\tspanToRows := make(map[string][]sqlbase.EncDatumRow)\n\tfor {\n\t\t// TODO(radu): figure out how to send smaller batches if the source has\n\t\t// a soft limit (perhaps send the batch out if we don't get a result\n\t\t// within a certain amount of time).\n\t\tfor spans = spans[:0]; len(spans) < joinReaderBatchSize; {\n\t\t\trow, meta := jr.input.Next()\n\t\t\tif meta != nil {\n\t\t\t\tif meta.Err != nil {\n\t\t\t\t\treturn meta.Err\n\t\t\t\t}\n\t\t\t\tif !emitHelper(ctx, &jr.out, nil /* row */, meta, jr.pushTrailingMeta, jr.input) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif row == nil {\n\t\t\t\tif len(spans) == 0 {\n\t\t\t\t\t// No fetching needed since we have collected no spans and\n\t\t\t\t\t// the input has signaled that no more records are coming.\n\t\t\t\t\tjr.out.Close()\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tkey, err := jr.generateKey(row, &alloc, primaryKeyPrefix, jr.lookupCols)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tspan := roachpb.Span{\n\t\t\t\tKey: key,\n\t\t\t\tEndKey: key.PrefixEnd(),\n\t\t\t}\n\t\t\tif jr.isLookupJoin() {\n\t\t\t\tif spanToRows[key.String()] == nil {\n\t\t\t\t\tspans = append(spans, span)\n\t\t\t\t}\n\t\t\t\tspanToRows[key.String()] = append(spanToRows[key.String()], row)\n\t\t\t} else {\n\t\t\t\tspans = append(spans, span)\n\t\t\t}\n\t\t}\n\n\t\t// TODO(radu): we are consuming all results from a fetch before starting\n\t\t// the next batch. We could start the next batch early while we are\n\t\t// outputting rows.\n\t\tif earlyExit, err := jr.indexLookup(ctx, txn, spans, spanToRows); err != nil {\n\t\t\treturn err\n\t\t} else if earlyExit {\n\t\t\treturn nil\n\t\t}\n\n\t\tif len(spans) != joinReaderBatchSize {\n\t\t\t// This was the last batch.\n\t\t\tjr.pushTrailingMeta(ctx)\n\t\t\tjr.out.Close()\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (s *S3Sink) drainEvents(events []EventData) {\n\tvar written int64\n\tfor _, evt := range events {\n\t\tswitch s.outputFormat {\n\t\tcase \"rfc5424\":\n\t\t\tw, err := evt.WriteRFC5424(s.bodyBuf)\n\t\t\twritten += w\n\t\t\tif err != nil {\n\t\t\t\tglog.Warningf(\"Could not write to event request body (wrote %v) bytes: %v\", written, err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase \"flatjson\":\n\t\t\tw, err := evt.WriteFlattenedJSON(s.bodyBuf)\n\t\t\twritten += w\n\t\t\tif err != nil {\n\t\t\t\tglog.Warningf(\"Could not write to event request body (wrote %v) bytes: %v\", written, err)\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\terr := errors.New(\"Invalid Sink Output Format specified\")\n\t\t\tpanic(err.Error())\n\t\t}\n\t\ts.bodyBuf.Write([]byte{'\\n'})\n\t\twritten++\n\t}\n\n\tif s.canUpload() == false {\n\t\treturn\n\t}\n\n\ts.upload()\n}", "func (record) MarshalRecordsToBuffer(records []common.Record, buffer []byte) error {\n\tif len(records)*recordLength > len(buffer) {\n\t\treturn fmt.Errorf(\"buffer %d is not big enough for records %d\", len(buffer), len(records)*recordLength)\n\t}\n\n\tfor i, r := range records {\n\t\tbuff := buffer[i*recordLength : (i+1)*recordLength]\n\n\t\tif !validation.ValidTraceID(r.ID) { // todo: remove this check. maybe have a max id size of 128 bits?\n\t\t\treturn errors.New(\"ids must be 128 bit\")\n\t\t}\n\n\t\tmarshalRecord(r, buff)\n\t}\n\n\treturn nil\n}", "func (self *BinaryPushRequestProcessor) pushMux() {\n\tconnMap := make(map[string]*pushWorkerGroupInfo, 10)\n\tfor req := range self.reqChan {\n\t\tif req == nil {\n\t\t\tbreak\n\t\t}\n\t\tpsp := req.PSP\n\t\tworkerGroup, ok := connMap[psp.Name()]\n\n\t\tneedAdd := false\n\t\tif !ok {\n\t\t\tneedAdd = true\n\t\t} else {\n\t\t\tif !push.IsSamePSP(workerGroup.psp, psp) {\n\t\t\t\tclose(workerGroup.ch)\n\t\t\t\tneedAdd = true\n\t\t\t}\n\t\t}\n\n\t\tif needAdd {\n\t\t\tworkerGroup = &pushWorkerGroupInfo{\n\t\t\t\tpsp: psp,\n\t\t\t\tch: make(chan *common.PushRequest),\n\t\t\t}\n\t\t\tconnMap[psp.Name()] = workerGroup\n\t\t\tself.wgFinalize.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer self.wgFinalize.Done()\n\t\t\t\tself.pushWorkerGroup(psp, workerGroup.ch)\n\t\t\t}()\n\t\t}\n\n\t\tif workerGroup != nil {\n\t\t\tworkerGroup.ch <- req\n\t\t}\n\t}\n\tfor _, workerGroup := range connMap {\n\t\tif workerGroup == nil || workerGroup.ch == nil {\n\t\t\tcontinue\n\t\t}\n\t\tclose(workerGroup.ch)\n\t}\n}", "func (_IUniswapV2Pair *IUniswapV2PairFilterer) WatchBurn(opts *bind.WatchOpts, sink chan<- *IUniswapV2PairBurn, sender []common.Address, to []common.Address) (event.Subscription, error) {\r\n\r\n\tvar senderRule []interface{}\r\n\tfor _, senderItem := range sender {\r\n\t\tsenderRule = append(senderRule, senderItem)\r\n\t}\r\n\r\n\tvar toRule []interface{}\r\n\tfor _, toItem := range to {\r\n\t\ttoRule = append(toRule, toItem)\r\n\t}\r\n\r\n\tlogs, sub, err := _IUniswapV2Pair.contract.WatchLogs(opts, \"Burn\", senderRule, toRule)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\r\n\t\tdefer sub.Unsubscribe()\r\n\t\tfor {\r\n\t\t\tselect {\r\n\t\t\tcase log := <-logs:\r\n\t\t\t\t// New log arrived, parse the event and forward to the user\r\n\t\t\t\tevent := new(IUniswapV2PairBurn)\r\n\t\t\t\tif err := _IUniswapV2Pair.contract.UnpackLog(event, \"Burn\", log); err != nil {\r\n\t\t\t\t\treturn err\r\n\t\t\t\t}\r\n\t\t\t\tevent.Raw = log\r\n\r\n\t\t\t\tselect {\r\n\t\t\t\tcase sink <- event:\r\n\t\t\t\tcase err := <-sub.Err():\r\n\t\t\t\t\treturn err\r\n\t\t\t\tcase <-quit:\r\n\t\t\t\t\treturn nil\r\n\t\t\t\t}\r\n\t\t\tcase err := <-sub.Err():\r\n\t\t\t\treturn err\r\n\t\t\tcase <-quit:\r\n\t\t\t\treturn nil\r\n\t\t\t}\r\n\t\t}\r\n\t}), nil\r\n}", "func (d *Desync) readChan(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tfor m := range d.q {\n\t\tgo m.send()\n\t}\n}", "func (is *InputSteerer) Push(w io.WriteCloser) {\n\tis.mu.Lock()\n\tdefer is.mu.Unlock()\n\tis.ws = append(is.ws, w)\n}", "func (e *PostfixExporter) Collect(ch chan<- prometheus.Metric) {\n\terr := CollectShowqFromSocket(e.showqPath, ch)\n\tif err == nil {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tpostfixUpDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\t1.0,\n\t\t\te.showqPath)\n\t} else {\n\t\tlog.Printf(\"Failed to scrape showq socket: %s\", err)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tpostfixUpDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\t0.0,\n\t\t\te.showqPath)\n\t}\n\n\terr = e.CollectLogfileFromFile(e.logfilePath)\n\tif err == nil {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tpostfixUpDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\t1.0,\n\t\t\te.logfilePath)\n\t} else {\n\t\tlog.Printf(\"Failed to scrape logfile: %s\", err)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tpostfixUpDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\t0.0,\n\t\t\te.logfilePath)\n\t}\n\n\tch <- e.cleanupProcesses\n\tch <- e.cleanupRejects\n\te.lmtpDelays.Collect(ch)\n\te.pipeDelays.Collect(ch)\n\tch <- e.qmgrInsertsNrcpt\n\tch <- e.qmgrInsertsSize\n\tch <- e.qmgrRemoves\n\te.smtpDelays.Collect(ch)\n\te.smtpTLSConnects.Collect(ch)\n\tch <- e.smtpdConnects\n\tch <- e.smtpdDisconnects\n\tch <- e.smtpdFCrDNSErrors\n\te.smtpdLostConnections.Collect(ch)\n\te.smtpdProcesses.Collect(ch)\n\te.smtpdRejects.Collect(ch)\n\tch <- e.smtpdSASLAuthenticationFailures\n\te.smtpdTLSConnects.Collect(ch)\n\te.unsupportedLogEntries.Collect(ch)\n}", "func (pl *Payload) push(x interface{}) {\n\tmetadata, err := getHostMetadata()\n\tif err != nil {\n\t\ttelemetryLogger.Printf(\"Error getting metadata %v\", err)\n\t} else {\n\t\terr = saveHostMetadata(metadata)\n\t\tif err != nil {\n\t\t\ttelemetryLogger.Printf(\"saving host metadata failed with :%v\", err)\n\t\t}\n\t}\n\n\tif pl.len() < MaxPayloadSize {\n\t\tswitch x.(type) {\n\t\tcase DNCReport:\n\t\t\tdncReport := x.(DNCReport)\n\t\t\tdncReport.Metadata = metadata\n\t\t\tpl.DNCReports = append(pl.DNCReports, dncReport)\n\t\tcase CNIReport:\n\t\t\tcniReport := x.(CNIReport)\n\t\t\tcniReport.Metadata = metadata\n\t\t\tpl.CNIReports = append(pl.CNIReports, cniReport)\n\t\tcase NPMReport:\n\t\t\tnpmReport := x.(NPMReport)\n\t\t\tnpmReport.Metadata = metadata\n\t\t\tpl.NPMReports = append(pl.NPMReports, npmReport)\n\t\tcase CNSReport:\n\t\t\tcnsReport := x.(CNSReport)\n\t\t\tcnsReport.Metadata = metadata\n\t\t\tpl.CNSReports = append(pl.CNSReports, cnsReport)\n\t\t}\n\t}\n}", "func (q *Queue) transfer() {\n\tfor q.s1.Peek() != nil {\n\t\tq.s2.Push(q.s1.Pop())\n\t}\n}", "func main() {\n\tfmt.Println(\"Start Test....!\")\n\tinputDB := setupDB(\"mysql\", \"root:root123@tcp(127.0.0.1:13306)/srcDB\")\n\textractDP := processors.NewSQLReader(inputDB, mypkg.Query(5))\n\n\ttransformDP := mypkg.NewMyTransformer()\n\tfmt.Println(transformDP)\n\n\toutputDB := setupDB(\"mysql\", \"root:root123@tcp(127.0.0.1:13306)/dstDB\")\n\toutputTable := \"krew_info\"\n\tloadDP := processors.NewSQLWriter(outputDB, outputTable)\n\n\tpipeline := ratchet.NewPipeline(extractDP, transformDP, loadDP)\n\tpipeline.Name = \"My Pipeline\"\n\n\terr := <-pipeline.Run()\n\tif err != nil {\n\t\tlogger.ErrorWithoutTrace(pipeline.Name, \":\", err)\n\t\tlogger.ErrorWithoutTrace(pipeline.Stats())\n\t} else {\n\t\tlogger.Info(pipeline.Name, \": Completed successfully.\")\n\t}\n}", "func (r *PopRow) ReceiveRow() []interface{} {\n\treturn []interface{}{&r.Data.Name, &r.Data.Year, &r.Data.Description}\n}", "func (p *Provider) AppendRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {\n\treturn p.updateRecords(ctx, zone, records)\n}", "func (r *RecordSlice) Push(rec interface{}) {\n\tr.zvals = append(r.zvals, *rec.(*zed.Value))\n}", "func Reader(filenames <-chan string, itemsToHash chan<- ItemToHash, bufMan *bpipe.BufMan) {\n\tfor file := range filenames {\n\t\tdoRead(file, itemsToHash, bufMan)\n\t}\n}", "func main() {\n\trecordChan := make(chan recWrap)\n\tdefer close(recordChan)\n\tif len(os.Args) != 2{\n\t\tfmt.Println(\"Use format: ./commandExecuter <command_file.txt>\")\n\t\tlog.Fatal()\n\t}\n\tcmdFile := os.Args[1]\n\twg.Add(2)\n\tgo cmdProducer(cmdFile, recordChan)\n\tgo cmdConsumer(recordChan)\n\twg.Wait()\n}", "func (h component) consumeDown(appEUI []byte, devEUI []byte, region dutycycle.Region, dataRate string, bundles []bundle) {\n\tstats.UpdateHistogram(\"handler.uplink.duplicate.count\", int64(len(bundles)))\n\tvar metadata []*core.Metadata\n\tvar fPort uint32\n\tvar fCnt uint32\n\tvar payload []byte\n\tvar firstTime time.Time\n\n\tcomputer, scores, err := dutycycle.NewScoreComputer(region, dataRate)\n\tif err != nil {\n\t\th.abortConsume(err, bundles)\n\t\treturn\n\t}\n\n\tfor i, bundle := range bundles {\n\t\t// We only decrypt the payload of the first bundle's packet.\n\t\t// We assume all the other to be equal and we'll merely collect\n\t\t// metadata from other bundle.\n\t\tpacket := bundle.Packet.(*core.DataUpHandlerReq)\n\t\tif i == 0 {\n\t\t\tvar err error\n\t\t\tvar devAddr lorawan.DevAddr\n\t\t\tcopy(devAddr[:], bundle.Entry.DevAddr)\n\t\t\tpayload, err = lorawan.EncryptFRMPayload(\n\t\t\t\tbundle.Entry.AppSKey,\n\t\t\t\ttrue,\n\t\t\t\tdevAddr,\n\t\t\t\tpacket.FCnt,\n\t\t\t\tpacket.Payload,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\th.abortConsume(err, bundles)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfirstTime = bundle.Time\n\t\t\tfPort = packet.FPort\n\t\t\tfCnt = packet.FCnt\n\t\t\tstats.MarkMeter(\"handler.uplink.in.unique\")\n\t\t} else {\n\t\t\tdiff := bundle.Time.Sub(firstTime).Nanoseconds()\n\t\t\tstats.UpdateHistogram(\"handler.uplink.duplicate.delay\", diff/1000)\n\t\t}\n\n\t\t// Append metadata for each of them\n\t\tmetadata = append(metadata, packet.Metadata)\n\t\tscores = computer.Update(scores, i, *packet.Metadata) // Nil check already done\n\t}\n\n\t// Then create an application-level packet and send it to the wild open\n\t// we don't expect a response from the adapter, end of the chain.\n\t_, err = h.AppAdapter.HandleData(context.Background(), &core.DataAppReq{\n\t\tAppEUI: appEUI,\n\t\tDevEUI: devEUI,\n\t\tFPort: fPort,\n\t\tFCnt: fCnt,\n\t\tPayload: payload,\n\t\tMetadata: metadata,\n\t})\n\tif err != nil {\n\t\th.abortConsume(errors.New(errors.Operational, err), bundles)\n\t\treturn\n\t}\n\n\tstats.MarkMeter(\"handler.uplink.out\")\n\n\t// Now handle the downlink and respond to node\n\tbest := computer.Get(scores, false)\n\tvar downlink pktEntry\n\tif best != nil { // Avoid pulling when there's no gateway available for an answer\n\t\tdownlink, err = h.PktStorage.dequeue(appEUI, devEUI)\n\t}\n\tif err != nil && err.(errors.Failure).Nature != errors.NotFound {\n\t\th.abortConsume(err, bundles)\n\t\treturn\n\t}\n\n\t// One of those bundle might be available for a response\n\tupType := lorawan.MType(bundles[0].Packet.(*core.DataUpHandlerReq).MType)\n\tfor i, bundle := range bundles {\n\t\tif best != nil && best.ID == i && (downlink.Payload != nil || upType == lorawan.ConfirmedDataUp) {\n\t\t\tstats.MarkMeter(\"handler.downlink.pull\")\n\t\t\tdownType := lorawan.UnconfirmedDataDown\n\t\t\tack := (upType == lorawan.ConfirmedDataUp)\n\t\t\tif bundle.Packet.(*core.DataUpHandlerReq).FCntUpReset {\n\t\t\t\tbundle.Entry.FCntDown = 0\n\t\t\t}\n\t\t\tdownlink, err := h.buildDownlink(downlink.Payload, downType, ack, *bundle.Packet.(*core.DataUpHandlerReq), bundle.Entry, best)\n\t\t\tif err != nil {\n\t\t\t\th.abortConsume(errors.New(errors.Structural, err), bundles)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tbundle.Entry.FCntDown = downlink.Payload.MACPayload.FHDR.FCnt\n\t\t\tbundle.Entry.FCntUp = bundle.Packet.(*core.DataUpHandlerReq).FCnt\n\t\t\terr = h.DevStorage.upsert(bundle.Entry)\n\t\t\tif err != nil {\n\t\t\t\th.abortConsume(err, bundles)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbundle.Chresp <- downlink\n\t\t} else {\n\t\t\tbundle.Chresp <- nil\n\t\t}\n\t}\n\n\t// Then, if there was no downlink, we still update the Frame Counter Up in the storage\n\tif best == nil || downlink.Payload == nil && upType != lorawan.ConfirmedDataUp {\n\t\tbundles[0].Entry.FCntUp = bundles[0].Packet.(*core.DataUpHandlerReq).FCnt\n\t\tif err := h.DevStorage.upsert(bundles[0].Entry); err != nil {\n\t\t\th.Ctx.WithError(err).Debug(\"Unable to update Frame Counter Up\")\n\t\t}\n\t}\n}", "func feedByRelayLog(r relay.Reader, ld loader.Loader, cp checkpoint.CheckPoint, cfg *Config) error {\n\tcheckpointTS := cp.TS()\n\tlastSuccessTS := checkpointTS\n\tr.Run()\n\n\tloaderQuit := make(chan struct{})\n\tvar loaderErr error\n\tgo func() {\n\t\tld.SetSafeMode(true)\n\t\tloaderErr = ld.Run()\n\t\tclose(loaderQuit)\n\t}()\n\n\tvar readerTxnsC <-chan *obinlog.Binlog\n\tvar toPushLoaderTxn *loader.Txn\n\tvar loaderInputC chan<- *loader.Txn\n\tsuccessTxnC := ld.Successes()\n\n\treaderTxnsC = r.Binlogs()\n\treaderTxnsCClosed := false\n\n\tloaderClosed := false\n\n\tvar tableRouter *router.Table = nil\n\tupperColName := false\n\tvar routerErr error\n\tif cfg.SyncerCfg.DestDBType == \"oracle\" {\n\t\tupperColName = true\n\t\ttableRouter, _, routerErr = genRouterAndBinlogEvent(cfg.SyncerCfg)\n\t\tif routerErr != nil {\n\t\t\treturn errors.Annotate(routerErr, \"when feed by relay log, gen router and filter failed\")\n\t\t}\n\t}\n\n\tfor {\n\t\t// when reader is drained and all txn has been push into loader\n\t\t// we close cloader.\n\t\tif readerTxnsC == nil && loaderInputC == nil && !loaderClosed {\n\t\t\tld.Close()\n\t\t\tloaderClosed = true\n\t\t}\n\n\t\t// break once we drainer the success items return by loader.\n\t\tif loaderClosed && successTxnC == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tselect {\n\t\tcase sbinlog, ok := <-readerTxnsC:\n\t\t\tif !ok {\n\t\t\t\tlog.Info(\"readerTxnsC closed\")\n\t\t\t\treaderTxnsC = nil\n\t\t\t\treaderTxnsCClosed = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif sbinlog.CommitTs <= checkpointTS {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar txn *loader.Txn\n\t\t\tvar err error\n\t\t\ttxn, err = loader.SecondaryBinlogToTxn(sbinlog, tableRouter, upperColName)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\n\t\t\treaderTxnsC = nil\n\t\t\ttxn.Metadata = sbinlog.CommitTs\n\t\t\ttoPushLoaderTxn = txn\n\t\t\tloaderInputC = ld.Input()\n\t\tcase loaderInputC <- toPushLoaderTxn:\n\t\t\tloaderInputC = nil\n\t\t\ttoPushLoaderTxn = nil\n\t\t\tif !readerTxnsCClosed {\n\t\t\t\treaderTxnsC = r.Binlogs()\n\t\t\t}\n\t\tcase success, ok := <-successTxnC:\n\t\t\tif !ok {\n\t\t\t\tsuccessTxnC = nil\n\t\t\t\tlog.Info(\"success closed\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlastSuccessTS = success.Metadata.(int64)\n\t\tcase <-loaderQuit:\n\t\t\tif loaderErr != nil {\n\t\t\t\treturn errors.Trace(loaderErr)\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Info(\"finish feed by relay log\")\n\n\treaderErr := <-r.Error()\n\n\tif readerErr != nil {\n\t\treturn errors.Trace(readerErr)\n\t}\n\n\terr := cp.Save(lastSuccessTS, 0 /* secondaryTS */, true /*consistent*/, 0)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tlog.Info(\"update status as normal\", zap.Int64(\"ts\", lastSuccessTS))\n\n\treturn nil\n}", "func main() {\n\tfabricFileCh := readEtlFiles(\"fabric_events.etl\")\n\tktlFileCh := readEtlFiles(\"ktl_events.etl\")\n\tleaseFileCh := readEtlFiles(\"lease_events.etl\")\n\n\tfabricEventsOutCh1 := make(chan event)\n\tfabricEventsOutCh2 := make(chan event)\n\tfabricEventsInCh := parseEtlFiles(fabricFileCh)\n\tfanOut(fabricEventsInCh, fabricEventsOutCh1, fabricEventsOutCh2)\n\tfabricEventsCh := make(chan event)\n\tfanIn(fabricEventsCh, fabricEventsOutCh1, fabricEventsOutCh2)\n\n\tktlEventsCh := parseEtlFiles(ktlFileCh)\n\tleaseEventsCh := parseEtlFiles(leaseFileCh)\n\n\tazureUploaderCh := make(chan event)\n\tinMemoryProducerCh := make(chan event)\n\tgo publishEtlEvents(fabricEventsCh, azureUploaderCh, inMemoryProducerCh)\n\tgo publishEtlEvents(ktlEventsCh, azureUploaderCh, inMemoryProducerCh)\n\n\tazureCsvUploaderCh := make(chan csvevent)\n\tgo publishEtlToCsvEvents(leaseEventsCh, azureCsvUploaderCh)\n\n\tprintEvtToScreen := func(name string, achan chan event) {\n\t\tfor {\n\t\t\te := <-achan\n\t\t\tfmt.Printf(\"%s : %+v\\n\", name, e)\n\t\t}\n\t}\n\n\tprintCsvToScreen := func(name string, achan chan csvevent) {\n\t\tfor {\n\t\t\te := <-achan\n\t\t\tfmt.Printf(\"%s : %+v\\n\", name, e)\n\t\t}\n\t}\n\n\tgo printEvtToScreen(\"azureuploader\", azureUploaderCh)\n\tgo printEvtToScreen(\"inmemoryproducer\", inMemoryProducerCh)\n\tgo printCsvToScreen(\"azurecsvuploader\", azureCsvUploaderCh)\n\n\ttime.Sleep(1000 * time.Millisecond)\n}", "func Push(at *auth.Token, wr *prompbmarshal.WriteRequest) {\n\tif at == nil && len(*remoteWriteMultitenantURLs) > 0 {\n\t\t// Write data to default tenant if at isn't set while -remoteWrite.multitenantURL is set.\n\t\tat = defaultAuthToken\n\t}\n\tvar rwctxs []*remoteWriteCtx\n\tif at == nil {\n\t\trwctxs = rwctxsDefault\n\t} else {\n\t\tif len(*remoteWriteMultitenantURLs) == 0 {\n\t\t\tlogger.Panicf(\"BUG: -remoteWrite.multitenantURL command-line flag must be set when __tenant_id__=%q label is set\", at)\n\t\t}\n\t\trwctxsMapLock.Lock()\n\t\ttenantID := tenantmetrics.TenantID{\n\t\t\tAccountID: at.AccountID,\n\t\t\tProjectID: at.ProjectID,\n\t\t}\n\t\trwctxs = rwctxsMap[tenantID]\n\t\tif rwctxs == nil {\n\t\t\trwctxs = newRemoteWriteCtxs(at, *remoteWriteMultitenantURLs)\n\t\t\trwctxsMap[tenantID] = rwctxs\n\t\t}\n\t\trwctxsMapLock.Unlock()\n\t}\n\n\tvar rctx *relabelCtx\n\trcs := allRelabelConfigs.Load()\n\tpcsGlobal := rcs.global\n\tif pcsGlobal.Len() > 0 {\n\t\trctx = getRelabelCtx()\n\t}\n\ttss := wr.Timeseries\n\trowsCount := getRowsCount(tss)\n\tglobalRowsPushedBeforeRelabel.Add(rowsCount)\n\tmaxSamplesPerBlock := *maxRowsPerBlock\n\t// Allow up to 10x of labels per each block on average.\n\tmaxLabelsPerBlock := 10 * maxSamplesPerBlock\n\tfor len(tss) > 0 {\n\t\t// Process big tss in smaller blocks in order to reduce the maximum memory usage\n\t\tsamplesCount := 0\n\t\tlabelsCount := 0\n\t\ti := 0\n\t\tfor i < len(tss) {\n\t\t\tsamplesCount += len(tss[i].Samples)\n\t\t\tlabelsCount += len(tss[i].Labels)\n\t\t\ti++\n\t\t\tif samplesCount >= maxSamplesPerBlock || labelsCount >= maxLabelsPerBlock {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\ttssBlock := tss\n\t\tif i < len(tss) {\n\t\t\ttssBlock = tss[:i]\n\t\t\ttss = tss[i:]\n\t\t} else {\n\t\t\ttss = nil\n\t\t}\n\t\tif rctx != nil {\n\t\t\trowsCountBeforeRelabel := getRowsCount(tssBlock)\n\t\t\ttssBlock = rctx.applyRelabeling(tssBlock, pcsGlobal)\n\t\t\trowsCountAfterRelabel := getRowsCount(tssBlock)\n\t\t\trowsDroppedByGlobalRelabel.Add(rowsCountBeforeRelabel - rowsCountAfterRelabel)\n\t\t}\n\t\tsortLabelsIfNeeded(tssBlock)\n\t\ttssBlock = limitSeriesCardinality(tssBlock)\n\t\tpushBlockToRemoteStorages(rwctxs, tssBlock)\n\t\tif rctx != nil {\n\t\t\trctx.reset()\n\t\t}\n\t}\n\tif rctx != nil {\n\t\tputRelabelCtx(rctx)\n\t}\n}", "func (e *PostfixExporter) CollectLogfileFromReader(file io.Reader) error {\n\tscanner := bufio.NewScanner(file)\n\tscanner.Split(bufio.ScanLines)\n\n\t// Patterns for parsing log messages.\n\tlogLine := regexp.MustCompile(\" postfix/(\\\\w+)\\\\[\\\\d+\\\\]: (.*)\")\n\tlmtpPipeSMTPLine := regexp.MustCompile(\", relay=(\\\\S+), .*, delays=([0-9\\\\.]+)/([0-9\\\\.]+)/([0-9\\\\.]+)/([0-9\\\\.]+), \")\n\tqmgrInsertLine := regexp.MustCompile(\":.*, size=(\\\\d+), nrcpt=(\\\\d+) \")\n\tsmtpTLSLine := regexp.MustCompile(\"^(\\\\S+) TLS connection established to \\\\S+: (\\\\S+) with cipher (\\\\S+) \\\\((\\\\d+)/(\\\\d+) bits\\\\)$\")\n\tsmtpdFCrDNSErrorsLine := regexp.MustCompile(\"^warning: hostname \\\\S+ does not resolve to address \")\n\tsmtpdProcessesSASLLine := regexp.MustCompile(\": client=.*, sasl_username=(\\\\S+)\")\n\tsmtpdRejectsLine := regexp.MustCompile(\"^NOQUEUE: reject: RCPT from \\\\S+: ([0-9]+) \")\n\tsmtpdLostConnectionLine := regexp.MustCompile(\"^lost connection after (\\\\w+) from \")\n\tsmtpdSASLAuthenticationFailuresLine := regexp.MustCompile(\"^warning: \\\\S+: SASL \\\\S+ authentication failed: \")\n\tsmtpdTLSLine := regexp.MustCompile(\"^(\\\\S+) TLS connection established from \\\\S+: (\\\\S+) with cipher (\\\\S+) \\\\((\\\\d+)/(\\\\d+) bits\\\\)$\")\n\n\tfor scanner.Scan() {\n\t\t// Strip off timestamp, hostname, etc.\n\t\tif logMatches := logLine.FindStringSubmatch(scanner.Text()); logMatches != nil {\n\t\t\t// Group patterns to check by Postfix service.\n\t\t\tif logMatches[1] == \"cleanup\" {\n\t\t\t\tif strings.Contains(logMatches[2], \": message-id=<\") {\n\t\t\t\t\te.cleanupProcesses.Inc()\n\t\t\t\t} else if strings.Contains(logMatches[2], \": reject: \") {\n\t\t\t\t\te.cleanupRejects.Inc()\n\t\t\t\t} else {\n\t\t\t\t\te.unsupportedLogEntries.WithLabelValues(logMatches[1]).Inc()\n\t\t\t\t}\n\t\t\t} else if logMatches[1] == \"lmtp\" {\n\t\t\t\tif lmtpMatches := lmtpPipeSMTPLine.FindStringSubmatch(logMatches[2]); lmtpMatches != nil {\n\t\t\t\t\tpdelay, _ := strconv.ParseFloat(lmtpMatches[2], 64)\n\t\t\t\t\te.lmtpDelays.WithLabelValues(\"before_queue_manager\").Observe(pdelay)\n\t\t\t\t\tadelay, _ := strconv.ParseFloat(lmtpMatches[3], 64)\n\t\t\t\t\te.lmtpDelays.WithLabelValues(\"queue_manager\").Observe(adelay)\n\t\t\t\t\tsdelay, _ := strconv.ParseFloat(lmtpMatches[4], 64)\n\t\t\t\t\te.lmtpDelays.WithLabelValues(\"connection_setup\").Observe(sdelay)\n\t\t\t\t\txdelay, _ := strconv.ParseFloat(lmtpMatches[5], 64)\n\t\t\t\t\te.lmtpDelays.WithLabelValues(\"transmission\").Observe(xdelay)\n\t\t\t\t} else {\n\t\t\t\t\te.unsupportedLogEntries.WithLabelValues(logMatches[1]).Inc()\n\t\t\t\t}\n\t\t\t} else if logMatches[1] == \"pipe\" {\n\t\t\t\tif pipeMatches := lmtpPipeSMTPLine.FindStringSubmatch(logMatches[2]); pipeMatches != nil {\n\t\t\t\t\tpdelay, _ := strconv.ParseFloat(pipeMatches[2], 64)\n\t\t\t\t\te.pipeDelays.WithLabelValues(pipeMatches[1], \"before_queue_manager\").Observe(pdelay)\n\t\t\t\t\tadelay, _ := strconv.ParseFloat(pipeMatches[3], 64)\n\t\t\t\t\te.pipeDelays.WithLabelValues(pipeMatches[1], \"queue_manager\").Observe(adelay)\n\t\t\t\t\tsdelay, _ := strconv.ParseFloat(pipeMatches[4], 64)\n\t\t\t\t\te.pipeDelays.WithLabelValues(pipeMatches[1], \"connection_setup\").Observe(sdelay)\n\t\t\t\t\txdelay, _ := strconv.ParseFloat(pipeMatches[5], 64)\n\t\t\t\t\te.pipeDelays.WithLabelValues(pipeMatches[1], \"transmission\").Observe(xdelay)\n\t\t\t\t} else {\n\t\t\t\t\te.unsupportedLogEntries.WithLabelValues(logMatches[1]).Inc()\n\t\t\t\t}\n\t\t\t} else if logMatches[1] == \"qmgr\" {\n\t\t\t\tif qmgrInsertMatches := qmgrInsertLine.FindStringSubmatch(logMatches[2]); qmgrInsertMatches != nil {\n\t\t\t\t\tsize, _ := strconv.ParseFloat(qmgrInsertMatches[1], 64)\n\t\t\t\t\te.qmgrInsertsSize.Observe(size)\n\t\t\t\t\tnrcpt, _ := strconv.ParseFloat(qmgrInsertMatches[2], 64)\n\t\t\t\t\te.qmgrInsertsNrcpt.Observe(nrcpt)\n\t\t\t\t} else if strings.HasSuffix(logMatches[2], \": removed\") {\n\t\t\t\t\te.qmgrRemoves.Inc()\n\t\t\t\t} else {\n\t\t\t\t\te.unsupportedLogEntries.WithLabelValues(logMatches[1]).Inc()\n\t\t\t\t}\n\t\t\t} else if logMatches[1] == \"smtp\" {\n\t\t\t\tif smtpMatches := lmtpPipeSMTPLine.FindStringSubmatch(logMatches[2]); smtpMatches != nil {\n\t\t\t\t\tpdelay, _ := strconv.ParseFloat(smtpMatches[2], 64)\n\t\t\t\t\te.smtpDelays.WithLabelValues(\"before_queue_manager\").Observe(pdelay)\n\t\t\t\t\tadelay, _ := strconv.ParseFloat(smtpMatches[3], 64)\n\t\t\t\t\te.smtpDelays.WithLabelValues(\"queue_manager\").Observe(adelay)\n\t\t\t\t\tsdelay, _ := strconv.ParseFloat(smtpMatches[4], 64)\n\t\t\t\t\te.smtpDelays.WithLabelValues(\"connection_setup\").Observe(sdelay)\n\t\t\t\t\txdelay, _ := strconv.ParseFloat(smtpMatches[5], 64)\n\t\t\t\t\te.smtpDelays.WithLabelValues(\"transmission\").Observe(xdelay)\n\t\t\t\t} else if smtpTLSMatches := smtpTLSLine.FindStringSubmatch(logMatches[2]); smtpTLSMatches != nil {\n\t\t\t\t\te.smtpTLSConnects.WithLabelValues(smtpTLSMatches[1:]...).Inc()\n\t\t\t\t} else {\n\t\t\t\t\te.unsupportedLogEntries.WithLabelValues(logMatches[1]).Inc()\n\t\t\t\t}\n\t\t\t} else if logMatches[1] == \"smtpd\" {\n\t\t\t\tif strings.HasPrefix(logMatches[2], \"connect from \") {\n\t\t\t\t\te.smtpdConnects.Inc()\n\t\t\t\t} else if strings.HasPrefix(logMatches[2], \"disconnect from \") {\n\t\t\t\t\te.smtpdDisconnects.Inc()\n\t\t\t\t} else if smtpdFCrDNSErrorsLine.MatchString(logMatches[2]) {\n\t\t\t\t\te.smtpdFCrDNSErrors.Inc()\n\t\t\t\t} else if smtpdLostConnectionMatches := smtpdLostConnectionLine.FindStringSubmatch(logMatches[2]); smtpdLostConnectionMatches != nil {\n\t\t\t\t\te.smtpdLostConnections.WithLabelValues(smtpdLostConnectionMatches[1]).Inc()\n\t\t\t\t} else if smtpdProcessesSASLMatches := smtpdProcessesSASLLine.FindStringSubmatch(logMatches[2]); smtpdProcessesSASLMatches != nil {\n\t\t\t\t\te.smtpdProcesses.WithLabelValues(smtpdProcessesSASLMatches[1]).Inc()\n\t\t\t\t} else if strings.Contains(logMatches[2], \": client=\") {\n\t\t\t\t\te.smtpdProcesses.WithLabelValues(\"\").Inc()\n\t\t\t\t} else if smtpdRejectsMatches := smtpdRejectsLine.FindStringSubmatch(logMatches[2]); smtpdRejectsMatches != nil {\n\t\t\t\t\te.smtpdRejects.WithLabelValues(smtpdRejectsMatches[1]).Inc()\n\t\t\t\t} else if smtpdSASLAuthenticationFailuresLine.MatchString(logMatches[2]) {\n\t\t\t\t\te.smtpdSASLAuthenticationFailures.Inc()\n\t\t\t\t} else if smtpdTLSMatches := smtpdTLSLine.FindStringSubmatch(logMatches[2]); smtpdTLSMatches != nil {\n\t\t\t\t\te.smtpdTLSConnects.WithLabelValues(smtpdTLSMatches[1:]...).Inc()\n\t\t\t\t} else {\n\t\t\t\t\te.unsupportedLogEntries.WithLabelValues(logMatches[1]).Inc()\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Unknown Postfix service.\n\t\t\t\te.unsupportedLogEntries.WithLabelValues(logMatches[1]).Inc()\n\t\t\t}\n\t\t} else {\n\t\t\t// Unknown log entry format.\n\t\t\te.unsupportedLogEntries.WithLabelValues(\"\").Inc()\n\t\t}\n\t}\n\n\treturn scanner.Err()\n}", "func CollectAndRecordGatherer(\n\tctx context.Context,\n\tgatherer gatherers.Interface,\n\trec recorder.Interface,\n\tconfigurator configobserver.Configurator,\n) ([]GathererFunctionReport, error) {\n\tresultsChan, err := startGatheringConcurrently(ctx, gatherer, configurator.Config().Gather)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgathererName := gatherer.GetName()\n\n\tvar errs []error\n\tvar functionReports []GathererFunctionReport\n\n\tfor result := range resultsChan {\n\t\tif result.Panic != nil {\n\t\t\tklog.Error(fmt.Errorf(\n\t\t\t\t\"gatherer %v's function %v panicked with error: %v\",\n\t\t\t\tgathererName, result.FunctionName, result.Panic,\n\t\t\t))\n\t\t\tresult.Errs = append(result.Errs, fmt.Errorf(\"%v\", result.Panic))\n\t\t}\n\n\t\tfor _, err := range result.Errs {\n\t\t\terrStr := fmt.Sprintf(\n\t\t\t\t\"gatherer %v's function %v failed with error: %v\",\n\t\t\t\tgathererName, result.FunctionName, err,\n\t\t\t)\n\n\t\t\tif result.IgnoreErrors {\n\t\t\t\tklog.Error(errStr)\n\t\t\t} else {\n\t\t\t\terrs = append(errs, fmt.Errorf(errStr))\n\t\t\t}\n\t\t}\n\t\trecordedRecs := 0\n\t\tfor _, r := range result.Records {\n\t\t\tif err := rec.Record(r); err != nil {\n\t\t\t\trecErr := fmt.Errorf(\n\t\t\t\t\t\"unable to record gatherer %v function %v' result %v because of error: %v\",\n\t\t\t\t\tgathererName, result.FunctionName, r.Name, err,\n\t\t\t\t)\n\t\t\t\tresult.Errs = append(result.Errs, recErr)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trecordedRecs++\n\t\t}\n\n\t\tklog.Infof(\n\t\t\t\"Gather %v's function %v took %v to process %v records\",\n\t\t\tgathererName, result.FunctionName, result.TimeElapsed, len(result.Records),\n\t\t)\n\n\t\tfunctionReports = append(functionReports, GathererFunctionReport{\n\t\t\tFuncName: fmt.Sprintf(\"%v/%v\", gathererName, result.FunctionName),\n\t\t\tDuration: result.TimeElapsed.Milliseconds(),\n\t\t\tRecordsCount: recordedRecs,\n\t\t\tErrors: errorsToStrings(result.Errs),\n\t\t\tPanic: result.Panic,\n\t\t})\n\t}\n\treturn functionReports, sumErrors(errs)\n}", "func buildReadingsIndexData(mapReadings []DbMapReading) (*MapReadingBuoyGroupsWrapper, error) {\n\t// Ensure a buoy group is not duplicated\n\tbuoyGroups := make(map[int]*MapReadingBuoyGroup)\n\n\t// For each row in the SQL result\n\tfor _, reading := range mapReadings {\n\t\tvar group *MapReadingBuoyGroup\n\t\tvar exists bool\n\t\t// If the Buoy Group doesn't exist, add it\n\t\tif group, exists = buoyGroups[reading.BuoyGroupId]; !exists {\n\t\t\tgroup = &MapReadingBuoyGroup{Id: reading.BuoyGroupId, Name: reading.BuoyGroupName}\n\t\t\tgroup.BuoyInstanceMap = make(map[int]*MapReadingBuoyInstance)\n\t\t\tbuoyGroups[reading.BuoyGroupId] = group\n\t\t}\n\n\t\tvar buoyInstance *MapReadingBuoyInstance\n\t\t// If the Buoy Instance doesn't exist within the Buoy Group, add it\n\t\tif buoyInstance, exists = group.BuoyInstanceMap[reading.BuoyInstanceId]; !exists {\n\t\t\tbuoyInstance = &MapReadingBuoyInstance{Id: reading.BuoyInstanceId, Name: reading.BuoyInstanceName}\n\t\t\tbuoyInstance.ReadingMap = make(map[int]*MapReading)\n\t\t\tgroup.BuoyInstanceMap[reading.BuoyInstanceId] = buoyInstance\n\t\t}\n\n\t\tvar mapReading *MapReading\n\t\t// If a Map Reading for the given buoy instance with the given timestamp doesn't already exist, add it\n\t\tif mapReading, exists = buoyInstance.ReadingMap[reading.ReadingId]; !exists {\n\t\t\t// Construct Reading for Buoy Instance\n\t\t\tmapReading = &MapReading{\n\t\t\t\tId: reading.ReadingId,\n\t\t\t\tLatitude: reading.Latitude,\n\t\t\t\tLongitude: reading.Longitude,\n\t\t\t\tAltitude: reading.Altitude,\n\t\t\t\tSpeedOG: reading.SpeedOG,\n\t\t\t\tCourse: reading.Course,\n\t\t\t\tTimestamp: reading.Timestamp.Unix(),\n\t\t\t}\n\t\t\t// Store the reading in the map\n\t\t\tbuoyInstance.ReadingMap[reading.ReadingId] = mapReading\n\t\t}\n\n\t\tsensorReading := MapSensorReading{Value: reading.Value, SensorTypeId: reading.SensorTypeId}\n\t\tmapReading.SensorReadings = append(mapReading.SensorReadings, sensorReading)\n\t}\n\n\t// All Buoy Groups which will be returned to the client\n\tbuoyGroupsWrapper := &MapReadingBuoyGroupsWrapper{BuoyGroups: make([]MapReadingBuoyGroup, 0)}\n\tfor _, buoyGroup := range buoyGroups {\n\t\tfor _, buoyInstance := range buoyGroup.BuoyInstanceMap {\n\t\t\tfor _, reading := range buoyInstance.ReadingMap {\n\t\t\t\tbuoyInstance.Readings = append(buoyInstance.Readings, *reading)\n\t\t\t}\n\t\t\tsort.Sort(byTimestamp(buoyInstance.Readings))\n\t\t\tbuoyGroup.BuoyInstances = append(buoyGroup.BuoyInstances, *buoyInstance)\n\t\t}\n\t\tbuoyGroupsWrapper.BuoyGroups = append(buoyGroupsWrapper.BuoyGroups, *buoyGroup)\n\t}\n\n\treturn buoyGroupsWrapper, nil\n}", "func ReadTestRecords(reader io.Reader) ([]*TestRecord, error) {\n\tret := []*TestRecord{}\n\tif err := json.NewDecoder(reader).Decode(&ret); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}", "func BrokerDLQTestImpl(t *testing.T, brokerName, triggerName string) {\n\tsendCount := 5\n\topts := []rigging.Option{}\n\n\trig, err := rigging.NewInstall(opts, []string{\"rabbitmq\", \"dlq\", \"recorder\"}, map[string]string{\n\t\t\"brokerName\": brokerName,\n\t\t\"triggerName\": triggerName,\n\t\t\"producerCount\": fmt.Sprint(sendCount),\n\t})\n\tif err != nil {\n\t\tt.Fatal(\"failed to create rig, \", err)\n\t}\n\tt.Logf(\"Created a new testing rig at namespace %s.\", rig.Namespace())\n\n\t// Uninstall deferred.\n\tdefer func() {\n\t\tif err := rig.Uninstall(); err != nil {\n\t\t\tt.Error(\"failed to uninstall, \", err)\n\t\t}\n\t}()\n\n\trefs := rig.Objects()\n\tfor _, r := range refs {\n\t\tif !strings.Contains(r.APIVersion, \"knative.dev\") {\n\t\t\t// Let's not care so much about checking the status of non-knative\n\t\t\t// resources.\n\t\t\tcontinue\n\t\t}\n\t\t_, err := rig.WaitForReadyOrDone(r, 5*time.Minute)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"failed to wait for ready or done, \", err)\n\t\t}\n\t}\n\n\t// TODO: we want a wait for events for x time in the future.\n\ttime.Sleep(1 * time.Minute)\n\n\t// TODO: need to validate set events.\n\tctx := Context() // TODO: there needs to be a better way to do this.\n\tc := recorder_collector.New(ctx)\n\n\tfrom := duckv1.KReference{\n\t\tKind: \"Namespace\",\n\t\tName: \"default\",\n\t\tAPIVersion: \"v1\",\n\t}\n\n\tobsName := \"recorder-\" + rig.Namespace()\n\tevents, err := c.List(ctx, from, func(ob observer.Observed) bool {\n\t\treturn ob.Observer == obsName\n\t})\n\tif err != nil {\n\t\tt.Fatal(\"failed to list observed events, \", err)\n\t}\n\n\tfor i, e := range events {\n\t\tfmt.Printf(\"[%d]: seen by %q\\n%s\\n\", i, e.Observer, e.Event)\n\t}\n\n\tgot := len(events)\n\twant := sendCount\n\tif want != got {\n\t\tt.Errorf(\"failed to observe the correct number of events, want: %d, got: %d\", want, got)\n\t}\n\n\t// Pass!\n}", "func (rf *Raft) sendAppendEntriesToMultipleFollowers() {\n for !rf.killed() {\n rf.mu.Lock()\n if rf.state != \"Leader\" {\n DLCPrintf(\"Server (%d) is no longer Leader and stop sending Heart Beat\", rf.me)\n rf.mu.Unlock()\n return\n }\n\n for i := 0; i < len(rf.peers) && rf.state == \"Leader\"; i++ {\n if i == rf.me {\n continue\n }else{\n if rf.nextIndex[i] <= rf.snapshottedIndex {\n go rf.sendInstallSnapshotToOneFollower(i, rf.log[0].Term)\n }else{\n go rf.sendAppendEntriesToOneFollower(i)\n }\n }\n }\n if rf.state != \"Leader\" {\n DLCPrintf(\"Server (%d) is no longer Leader and stop sending Heart Beat\", rf.me)\n rf.mu.Unlock()\n return\n }\n rf.commitEntries()\n rf.mu.Unlock()\n\n time.Sleep(100 * time.Millisecond)\n }\n}", "func SumReader(f io.Reader, rollover uint64) []byte {\n\t//jobs = append(jobs, extra...)\n\t// I've made a /guess/ about the runtime required for each thread, and sorted them in the /HOPE/ that golang will wake the low indexed goroutines first...\n\t// If I blocked the writer thread, handed back a buffered channel wakeup to it, and then continued I think I could FORCE sync... but I'm not sure that would be better, and it sounds WAY more complex.\n\tjobs := make([]parallel.SyncedWorker, 0)\n\tjobs = append(jobs, HashWorker{hash: sha3.New512(), name: \"sha3-512\"})\n\tjobs = append(jobs, HashWorker{hash: sha3.New256(), name: \"sha3-256\"})\n\tjobs = append(jobs, HashWorker{hash: sha256.New(), name: \"sha256\"})\n\tjobs = append(jobs, HashSegsWorker{hash: sha1.New(), rollover: rollover, name: \"sha1segs\"})\n\tjobs = append(jobs, HashWorker{hash: sha1.New(), name: \"sha1\"})\n\tjobs = append(jobs, HashWorker{hash: md5.New(), name: \"md5\"})\n\tjobs = append(jobs, HashSegsWorker{hash: md5.New(), rollover: rollover, name: \"md5segs\"})\n\t//const bufsize = 32 * 1024\n\n\treturn parallel.ConductorReaderWorker(f, jobs, 32*1024)\n}", "func (me *I16HEXFile) AddRecords(r ...Record) error {\n\tfor _, record := range r {\n\t\tif err := me.Add(record); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (r *distSQLReceiver) PushRow(row sqlbase.EncDatumRow) bool {\n\tif r.err != nil {\n\t\treturn false\n\t}\n\tif r.rows == nil {\n\t\tr.numRows++\n\t\treturn true\n\t}\n\tif r.row == nil {\n\t\tr.row = make(parser.DTuple, len(r.resultToStreamColMap))\n\t}\n\tfor i, resIdx := range r.resultToStreamColMap {\n\t\terr := row[resIdx].EnsureDecoded(&r.alloc)\n\t\tif err != nil {\n\t\t\tr.err = err\n\t\t\treturn false\n\t\t}\n\t\tr.row[i] = row[resIdx].Datum\n\t}\n\t// Note that AddRow accounts for the memory used by the Datums.\n\tif _, err := r.rows.AddRow(r.row); err != nil {\n\t\tr.err = err\n\t\treturn false\n\t}\n\treturn true\n}", "func readFromLowKeyRecords(records map[string][]string, keys map[string]int64, rdrs map[string]*bufio.Reader, varids map[string]string) (map[string][]string, map[string]int64, map[string]string) {\n\tlowKeys := getLowKeys(keys)\n\tfor assaytype := range lowKeys {\n\t\trecords[assaytype], keys[assaytype], varids[assaytype] = getNextRecordSlice(rdrs[assaytype])\n\t}\n\treturn records, keys, varids\n}", "func AddToZip(zip *zip.Writer, zipQ chan FetchedStream) error {\n\tfetched := <-zipQ\n\tif fetched.Err != nil {\n\t\tfmt.Printf(\"Error in fetching stream , stream name %s, err %s\", fetched.Name, fetched.Err.Error())\n\t\treturn fetched.Err\n\t}\n\tdefer fetched.Stream.Close()\n\turlEntry, err := zip.Create(fetched.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"Adding stream entry to zip %s\\n\", fetched.Name)\n\tdefer fetched.Stream.Close()\n\tio.Copy(urlEntry, fetched.Stream)\n\treturn nil\n}", "func ReaderReadUpToV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output, num_records tf.Output) (keys tf.Output, values tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"ReaderReadUpToV2\",\n\t\tInput: []tf.Input{\n\t\t\treader_handle, queue_handle, num_records,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0), op.Output(1)\n}", "func (jr *joinReader) mainLoop() error {\n\tprimaryKeyPrefix := sqlbase.MakeIndexKeyPrefix(&jr.desc, jr.index.ID)\n\n\tvar alloc sqlbase.DatumAlloc\n\tspans := make(sqlbase.Spans, 0, joinReaderBatchSize)\n\n\tif log.V(2) {\n\t\tlog.Infof(jr.ctx, \"starting (filter: %s)\", jr.filter)\n\t\tdefer log.Infof(jr.ctx, \"exiting\")\n\t}\n\n\tfor {\n\t\t// TODO(radu): figure out how to send smaller batches if the source has\n\t\t// a soft limit (perhaps send the batch out if we don't get a result\n\t\t// within a certain amount of time).\n\t\tfor spans = spans[:0]; len(spans) < joinReaderBatchSize; {\n\t\t\trow, err := jr.input.NextRow()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif row == nil {\n\t\t\t\tif len(spans) == 0 {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tkey, err := jr.generateKey(row, &alloc, primaryKeyPrefix)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tspans = append(spans, sqlbase.Span{\n\t\t\t\tStart: key,\n\t\t\t\tEnd: key.PrefixEnd(),\n\t\t\t})\n\t\t}\n\n\t\terr := jr.fetcher.StartScan(jr.flowCtx.txn, spans, 0)\n\t\tif err != nil {\n\t\t\tlog.Errorf(jr.ctx, \"scan error: %s\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t// TODO(radu): we are consuming all results from a fetch before starting\n\t\t// the next batch. We could start the next batch early while we are\n\t\t// outputting rows.\n\t\tfor {\n\t\t\toutRow, err := jr.nextRow()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif outRow == nil {\n\t\t\t\t// Done.\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif log.V(3) {\n\t\t\t\tlog.Infof(jr.ctx, \"pushing row %s\\n\", outRow)\n\t\t\t}\n\t\t\t// Push the row to the output RowReceiver; stop if they don't need more\n\t\t\t// rows.\n\t\t\tif !jr.output.PushRow(outRow) {\n\t\t\t\tif log.V(2) {\n\t\t\t\t\tlog.Infof(jr.ctx, \"no more rows required\")\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tif len(spans) != joinReaderBatchSize {\n\t\t\t// This was the last batch.\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (s *sender) sendMetrics(ctx context.Context, flds fields) ([]metricPair, error) {\n\tvar (\n\t\tbody strings.Builder\n\t\terrs error\n\t\tdroppedRecords []metricPair\n\t\tcurrentRecords []metricPair\n\t)\n\n\tfor _, record := range s.metricBuffer {\n\t\tvar formattedLine string\n\t\tvar err error\n\n\t\tswitch s.config.MetricFormat {\n\t\tcase PrometheusFormat:\n\t\t\tformattedLine = s.prometheusFormatter.metric2String(record)\n\t\tcase Carbon2Format:\n\t\t\tformattedLine = carbon2Metric2String(record)\n\t\tcase GraphiteFormat:\n\t\t\tformattedLine = s.graphiteFormatter.metric2String(record)\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"unexpected metric format: %s\", s.config.MetricFormat)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tdroppedRecords = append(droppedRecords, record)\n\t\t\terrs = multierr.Append(errs, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tar, err := s.appendAndSend(ctx, formattedLine, MetricsPipeline, &body, flds)\n\t\tif err != nil {\n\t\t\terrs = multierr.Append(errs, err)\n\t\t\tif ar.sent {\n\t\t\t\tdroppedRecords = append(droppedRecords, currentRecords...)\n\t\t\t}\n\n\t\t\tif !ar.appended {\n\t\t\t\tdroppedRecords = append(droppedRecords, record)\n\t\t\t}\n\t\t}\n\n\t\t// If data was sent, cleanup the currentTimeSeries counter\n\t\tif ar.sent {\n\t\t\tcurrentRecords = currentRecords[:0]\n\t\t}\n\n\t\t// If log has been appended to body, increment the currentTimeSeries\n\t\tif ar.appended {\n\t\t\tcurrentRecords = append(currentRecords, record)\n\t\t}\n\t}\n\n\tif body.Len() > 0 {\n\t\tif err := s.send(ctx, MetricsPipeline, strings.NewReader(body.String()), flds); err != nil {\n\t\t\terrs = multierr.Append(errs, err)\n\t\t\tdroppedRecords = append(droppedRecords, currentRecords...)\n\t\t}\n\t}\n\n\treturn droppedRecords, errs\n}", "func receive(q Queryer, rows *sql.Rows) ([]Queryer, error) {\n\tvar queryers []Queryer\n\tfor rows.Next() {\n\t\tqNew := q\n\t\tvalue := reflect.ValueOf(q)\n\t\tbaseType := reflect.TypeOf(value.Interface())\n\t\ttmp := reflect.New(baseType)\n\t\tptrValue := tmp.Elem()\n\t\tres := []interface{}{}\n\t\tfor i := 0; i < value.NumField(); i++ {\n\t\t\ttmp := value.Field(i).Interface()\n\t\t\tres = append(res, &tmp)\n\t\t}\n\t\tif err := rows.Scan(res...); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor i := 0; i < value.NumField(); i++ {\n\t\t\tunderlyingValue := reflect.ValueOf(res[i]).Elem()\n\t\t\tswitch v := underlyingValue.Interface().(type) {\n\t\t\tcase string:\n\t\t\t\tptrValue.Field(i).SetString(string(v))\n\t\t\t\tbreak\n\t\t\tcase []byte:\n\t\t\t\tptrValue.Field(i).SetString(string(v))\n\t\t\t\tbreak\n\t\t\tcase int64:\n\t\t\t\tptrValue.Field(i).SetInt(int64(v))\n\t\t\t\tbreak\n\t\t\tcase nil:\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"Failed to fetch value from database: %v\", underlyingValue.Interface())\n\t\t\t}\n\t\t}\n\t\tqNew = ptrValue.Interface().(Queryer)\n\t\tqueryers = append(queryers, qNew)\n\t}\n\treturn queryers, nil\n}", "func PushReport(s *discordgo.Session, db database.Database, publicAddr,\n\tguildID, executorID, victimID, reason, attachmentID string, typ report.Type) (*report.Report, error) {\n\n\trepID := snowflakenodes.NodesReport[typ].Generate()\n\n\trep := &report.Report{\n\t\tID: repID,\n\t\tType: typ,\n\t\tGuildID: guildID,\n\t\tExecutorID: executorID,\n\t\tVictimID: victimID,\n\t\tMsg: reason,\n\t\tAttachmehtURL: attachmentID,\n\t}\n\n\terr := db.AddReport(rep)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif modlogChan, err := db.GetGuildModLog(guildID); err == nil {\n\t\ts.ChannelMessageSendEmbed(modlogChan, rep.AsEmbed(publicAddr))\n\t}\n\n\tdmChan, err := s.UserChannelCreate(victimID)\n\tif err == nil {\n\t\ts.ChannelMessageSendEmbed(dmChan.ID, rep.AsEmbed(publicAddr))\n\t}\n\n\treturn rep, nil\n}", "func (c *cluster) PushTransaction(\n\tctx context.Context, pushee *enginepb.TxnMeta, h roachpb.Header, pushType roachpb.PushTxnType,\n) (*roachpb.Transaction, *roachpb.Error) {\n\tpusheeRecord, err := c.getTxnRecord(pushee.ID)\n\tif err != nil {\n\t\treturn nil, roachpb.NewError(err)\n\t}\n\tvar pusherRecord *txnRecord\n\tif h.Txn != nil {\n\t\tpusherID := h.Txn.ID\n\t\tpusherRecord, err = c.getTxnRecord(pusherID)\n\t\tif err != nil {\n\t\t\treturn nil, roachpb.NewError(err)\n\t\t}\n\n\t\tpush, err := c.registerPush(ctx, pusherID, pushee.ID)\n\t\tif err != nil {\n\t\t\treturn nil, roachpb.NewError(err)\n\t\t}\n\t\tdefer c.unregisterPush(push)\n\t}\n\tfor {\n\t\t// Is the pushee pushed?\n\t\tpusheeTxn, pusheeRecordSig := pusheeRecord.asTxn()\n\t\tvar pushed bool\n\t\tswitch pushType {\n\t\tcase roachpb.PUSH_TIMESTAMP:\n\t\t\tpushed = h.Timestamp.Less(pusheeTxn.WriteTimestamp) || pusheeTxn.Status.IsFinalized()\n\t\tcase roachpb.PUSH_ABORT, roachpb.PUSH_TOUCH:\n\t\t\tpushed = pusheeTxn.Status.IsFinalized()\n\t\tdefault:\n\t\t\treturn nil, roachpb.NewErrorf(\"unexpected push type: %s\", pushType)\n\t\t}\n\t\tif pushed {\n\t\t\treturn pusheeTxn, nil\n\t\t}\n\t\t// If PUSH_TOUCH, return error instead of waiting.\n\t\tif pushType == roachpb.PUSH_TOUCH {\n\t\t\tlog.Eventf(ctx, \"pushee not abandoned\")\n\t\t\terr := roachpb.NewTransactionPushError(*pusheeTxn)\n\t\t\treturn nil, roachpb.NewError(err)\n\t\t}\n\t\t// Or the pusher aborted?\n\t\tvar pusherRecordSig chan struct{}\n\t\tif pusherRecord != nil {\n\t\t\tvar pusherTxn *roachpb.Transaction\n\t\t\tpusherTxn, pusherRecordSig = pusherRecord.asTxn()\n\t\t\tif pusherTxn.Status == roachpb.ABORTED {\n\t\t\t\tlog.Eventf(ctx, \"detected pusher aborted\")\n\t\t\t\terr := roachpb.NewTransactionAbortedError(roachpb.ABORT_REASON_PUSHER_ABORTED)\n\t\t\t\treturn nil, roachpb.NewError(err)\n\t\t\t}\n\t\t}\n\t\t// Wait until either record is updated.\n\t\tselect {\n\t\tcase <-pusheeRecordSig:\n\t\tcase <-pusherRecordSig:\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, roachpb.NewError(ctx.Err())\n\t\t}\n\t}\n}", "func cmdConsumer(recordChan chan recWrap) {\n\tdefer wg.Done()\n\n\tfor i := 0; i < 5; i++ {\n\t\tgo func() {\n\t\t\tfor record := range recordChan {\n\t\t\t\tcmd := strings.TrimSpace(record.input[0])\n\t\t\t\tswitch (strings.ToUpper(cmd)){\n\t\t\t\tcase \"CHECKSUM\":\n\t\t\t\t\tcs, dur := checkSum(strings.TrimSpace(record.input[1]))\n\t\t\t\t\tif cs == -1000{\n\t\t\t\t\t\tfmt.Println(\"Invalid line:\", record.input)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Printf(\"%s,%s, %d, %v\\n\", record.input[0], record.input[1], cs, dur)\n\t\t\t\tcase \"WORDCOUNT\":\n\t\t\t\t\ttotWc, dur := wordCount(strings.TrimSpace(record.input[1]))\n\n\t\t\t\t\tif totWc == -1000{\n\t\t\t\t\t\tfmt.Println(\"Invalid line:\", record.input)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Printf(\"%s,%s, %d, %v\\n\", record.input[0], record.input[1], totWc, dur)\n\t\t\t\tcase \"WORDFREQ\":\n\t\t\t\t\twc, dur := wordFreq(strings.TrimSpace(record.input[1]), \n\t\t\t\t\t\t\t\t\t\tstrings.TrimSpace(record.input[2]))\n\t\t\t\t\tif wc == -1000{\n\t\t\t\t\t\tfmt.Println(\"Invalid line:\", record.input)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Printf(\"%s,%s,%s, %d, %v\\n\", record.input[0], record.input[1], record.input[2], wc, dur)\n\t\t\t\tdefault:\n\t\t\t\t\tfmt.Println(\"Invalid line: \", record.input)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}", "func (r *routine) push(versions int64, url, username, password string) {\n\tentropy := random.New(r.id + 1)\n\n\tfor r.nCharts > 0 {\n\t\tname, err := r.generateName(entropy)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t_versions := r.versionsToCreate(versions)\n\t\tr.nCharts -= _versions\n\n\t\tfor i := _versions; i > 0; i-- {\n\t\t\tversion, err := r.generateVersion(entropy, _versions)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treader, err := r.generateChart(name, version)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err = r.pushChart(reader, username, password, url, false); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}", "func processPersonData(person1, person2 string) error {\r\n\r\n\tpn1, err := fetchData(person1)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tpn2, err := fetchData(person2)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tif len(pn1.Movies) > len(pn2.Movies) {\r\n\t\tbuff.source, buff.destination = person2, person1\r\n\t\tbuff.person1, buff.person2 = pn2, pn1\r\n\t} else {\r\n\t\tbuff.source, buff.destination = person1, person2\r\n\t\tbuff.person1, buff.person2 = pn1, pn2\r\n\t}\r\n\r\n\tfor _, movie := range buff.person2.Movies {\r\n\t\tbuff.p2Movies[movie.Url] = movie\r\n\t}\r\n\r\n\tbuff.visit = append(buff.visit, buff.source)\r\n\tbuff.visited[buff.source] = true\r\n\r\n\treturn nil\r\n}", "func (tt *Index) Push(recs ...*types.Log) error {\n\tfor _, rec := range recs {\n\t\tif len(rec.Topics) > MaxCount {\n\t\t\treturn ErrTooManyTopics\n\t\t}\n\t\tcount := posToBytes(uint8(1 + len(rec.Topics)))\n\n\t\tid := NewID(rec.BlockNumber, rec.TxHash, rec.Index)\n\n\t\tvar pos int\n\t\tpush := func(topic common.Hash) error {\n\t\t\tkey := topicKey(topic, uint8(pos), id)\n\t\t\terr := tt.table.Topic.Put(key, count)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tkey = otherKey(id, uint8(pos))\n\t\t\terr = tt.table.Other.Put(key, topic.Bytes())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpos++\n\t\t\treturn nil\n\t\t}\n\n\t\tif err := push(rec.Address.Hash()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, topic := range rec.Topics {\n\t\t\tif err := push(topic); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tbuf := make([]byte, 0, common.HashLength+len(rec.Data))\n\t\tbuf = append(buf, rec.BlockHash.Bytes()...)\n\t\tbuf = append(buf, rec.Data...)\n\n\t\terr := tt.table.Logrec.Put(id.Bytes(), buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func main() {\n\tfmt.Println(\"Start Test....!\")\n\tinputDB := setupDB(\"mysql\", \"root:root123@tcp(127.0.0.1:13306)/srcDB\")\n\textractDP := processors.NewSQLReader(inputDB, mypkg.Query(5))\n\n\toutputDB := setupDB(\"mysql\", \"root:root123@tcp(127.0.0.1:13306)/dstDB\")\n\toutputTable := \"users2\"\n\tloadDP := processors.NewSQLWriter(outputDB, outputTable)\n\n\tpipeline := ratchet.NewPipeline(extractDP, loadDP)\n\tpipeline.Name = \"My Pipeline\"\n\n\t// To see debugging output, uncomment the following lines:\n\t// logger.LogLevel = logger.LevelDebug\n\t// pipeline.PrintData = true\n\n\terr := <-pipeline.Run()\n\tif err != nil {\n\t\tlogger.ErrorWithoutTrace(pipeline.Name, \":\", err)\n\t\tlogger.ErrorWithoutTrace(pipeline.Stats())\n\t} else {\n\t\tlogger.Info(pipeline.Name, \": Completed successfully.\")\n\t}\n}" ]
[ "0.5021056", "0.48767054", "0.48742503", "0.48395205", "0.46045676", "0.4591664", "0.45517206", "0.45375064", "0.4508009", "0.44851536", "0.4476698", "0.44636384", "0.44487244", "0.44070694", "0.4406888", "0.43819577", "0.43797287", "0.43710393", "0.436893", "0.43673733", "0.4354775", "0.43413013", "0.43387398", "0.43306053", "0.43191868", "0.42927685", "0.42828256", "0.42771384", "0.4234618", "0.42202473", "0.41914436", "0.41799974", "0.41693255", "0.41691563", "0.41589856", "0.4155569", "0.41514736", "0.4147688", "0.41436806", "0.4139656", "0.41392577", "0.41343433", "0.4134165", "0.41213545", "0.41179284", "0.41137043", "0.41030788", "0.4100155", "0.40989387", "0.40922895", "0.40896985", "0.40896338", "0.40885147", "0.4083328", "0.40777206", "0.40691236", "0.40683353", "0.40532416", "0.40512675", "0.4047694", "0.40457273", "0.4045671", "0.40430194", "0.40420458", "0.40392393", "0.40388614", "0.40318626", "0.40300947", "0.4029641", "0.40294018", "0.4023721", "0.4022661", "0.40166545", "0.4016447", "0.40111625", "0.40110868", "0.40104985", "0.40043268", "0.39981595", "0.39974603", "0.399705", "0.39959365", "0.3995874", "0.3991275", "0.3987966", "0.39845064", "0.39781812", "0.3977795", "0.39751065", "0.39744338", "0.3973615", "0.3971831", "0.39661202", "0.3959719", "0.3958283", "0.3951849", "0.39513433", "0.3948812", "0.39434606", "0.39433366" ]
0.6187916
0
StartService Starts selenium server
func StartService() { opts := []selenium.ServiceOption{ selenium.ChromeDriver(helpers.AutomationPath + driverPath), // Specify the path to GeckoDriver in order to use Firefox. selenium.Output(os.Stderr), // Output debug information to STDERR. } selenium.SetDebug(false) selenium, err := selenium.NewSeleniumService(helpers.AutomationPath+seleniumPath, port, opts...) if err != nil { panic(err) } seleniumService = selenium }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Service) StartService() {\n\tutils.Logger().Info().Msg(\"Starting explorer service.\")\n\ts.Init(true)\n\ts.server = s.Run()\n}", "func (s *Service) StartService() {\n\tutils.Logger().Info().Msg(\"Starting explorer service.\")\n\ts.Init(true)\n\ts.server = s.Run()\n}", "func (env Env) StartService(c *cli.Context) error {\n\tservice := env.GetServiceDef(c)\n\tproject, err := env.GetProject()\n\tutil.CheckErrFatal(err)\n\tstartCommand := service.RunCommand(project.Network)\n\tfmt.Println(startCommand.ToString())\n\tjsonCMD, err := json.Marshal(startCommand)\n\tutil.CheckErrFatal(err)\n\tstatus := env.SendToMaster(\"start\", jsonCMD)\n\tfmt.Println(status)\n\treturn nil\n}", "func (s JSONHTTPServer) StartService(status chan bool) {\n\tgo s.startInternal(status)\n}", "func (s JSONHTTPServer) StartService(status chan bool) {\n\tgo s.startInternal(status)\n}", "func StartService(service string) error {\n\treturn doService(service, \"start\")\n}", "func StartSelenium() {\n\tvar err error\n\tcheckWebDriver()\n\tdriver, err = core.Selenium()\n\tcheckFailure(err)\n\tcheckFailure(driver.Start())\n}", "func (rs *RatchetServer) StartService() {\n\tif rs.isStarted {\n\t\treturn\n\t}\n\trs.GenerateKeys()\n\trs.fountain.StartService()\n\trs.ticker = timesource.Clock.NewTicker(time.Minute * 5)\n\tgo func() {\n\t\tfor range rs.ticker.Chan() {\n\t\t\t// Pregenerate.\n\t\t\trs.GenerateKeys()\n\t\t\t// Call persistence.\n\t\t\tif err := rs.persist(); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}()\n}", "func startSeleniumServerOrCrash() *exec.Cmd {\n\tmyLog.Info(\"Starting Selenium server...\")\n\tcmd := exec.Command(\"/opt/bin/entry_point.sh\")\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\tmyLog.Fatal(err)\n\t}\n\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tmyLog.Fatal(err)\n\t}\n\n\tgo func() {\n\t\tscanner := bufio.NewScanner(stderr)\n\t\tfor scanner.Scan() {\n\t\t\tmyLog.Error(scanner.Text())\n\t\t}\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tmyLog.Fatal(err)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tscanner := bufio.NewScanner(stdout)\n\t\tfor scanner.Scan() {\n\t\t\tmyLog.Info(scanner.Text())\n\t\t}\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tmyLog.Fatal(err)\n\t\t}\n\t}()\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\tmyLog.Fatal(\"Cannot start Selenium process:\", err)\n\t}\n\n\treturn cmd\n}", "func (m *mockService) Start() {\n\tif err := m.server.Bootstrap(); err != nil {\n\t\tpanic(err)\n\t}\n\tm.started = true\n}", "func StartTelemetryService() error {\n\tplatform.KillProcessByName(telemetryServiceProcessName)\n\n\ttelemetryLogger.Printf(\"[Telemetry] Starting telemetry service process\")\n\tpath := fmt.Sprintf(\"%v/%v\", cniInstallDir, telemetryServiceProcessName)\n\tif err := common.StartProcess(path); err != nil {\n\t\ttelemetryLogger.Printf(\"[Telemetry] Failed to start telemetry service process :%v\", err)\n\t\treturn err\n\t}\n\n\ttelemetryLogger.Printf(\"[Telemetry] Telemetry service started\")\n\n\tfor attempt := 0; attempt < 5; attempt++ {\n\t\tif checkIfSockExists() {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(200 * time.Millisecond)\n\t}\n\n\treturn nil\n}", "func (s *JSONHTTPServer) StartService() {\n\tgo s.startInternal()\n}", "func StartService() {\n\tlog.Printf(\"Starting weather web service\\n\")\n\tmux := http.NewServeMux()\n\n\tmux.HandleFunc(\"/weather\", weatherHandler)\n\tmux.HandleFunc(\"/health-check\", healthCheck)\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", shared.WeatherPort), mux))\n}", "func (n *Node) StartService() {\n\tif n.stopServer != nil {\n\t\treturn\n\t}\n\n\tif n.service.listen == nil {\n\t\tn.service.listen = network.TCPListener\n\t}\n\n\tvar genesis hash.Hash\n\tif n.consensus != nil {\n\t\tgenesis = n.consensus.GetGenesisHash()\n\t}\n\n\tbind := n.NetAddrOf(n.host)\n\t_, n.Addr, n.stopServer = api.StartService(bind, n.key, genesis, n, n.Infof, n.service.listen)\n}", "func Start() {\n\tdriver.Main(func(app oswin.App) {\n\t\tatomic.AddInt32(&started, 1)\n\t\t<-quit\n\t})\n}", "func (s *AngularService) Start() (err error) {\n\tif s.options.Cd != \"\" {\n\t\tvar currDir string\n\t\tif currDir, err = os.Getwd(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = os.Chdir(s.options.Cd); err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer os.Chdir(currDir)\n\t}\n\n\tvar ctx context.Context\n\n\tctx, s.ctxCancel = context.WithCancel(context.Background())\n\ts.done = ctx.Done()\n\n\tcmdArgs := []string{\"serve\"}\n\tif s.options.Port > 0 {\n\t\tcmdArgs = append(cmdArgs, \"--port\", strconv.Itoa(s.options.Port))\n\t}\n\n\tif s.options.Args != nil {\n\t\tcmdArgs = append(cmdArgs, s.options.Args...)\n\t}\n\n\tcmd := exec.CommandContext(ctx, \"ng\", cmdArgs...)\n\n\tif s.options.Stdout != nil {\n\t\tcmd.Stdout = s.options.Stdout\n\t}\n\tif s.options.Stderr != nil {\n\t\tcmd.Stderr = s.options.Stderr\n\t}\n\n\treturn cmd.Start()\n}", "func (s *JSONHTTPServer) StartService(\n\tctx context.Context,\n\tservices ...ServiceAPI,\n) <-chan struct{} {\n\tstarted := make(chan struct{})\n\n\t// This will block, so run it in a goroutine\n\tgo s.startInternal(\n\t\tctx,\n\t\tstarted,\n\t\tservices...)\n\n\treturn started\n}", "func main() {\n\tservice.StartWebServer(\"8081\")\n}", "func (s *MockServer) Start(ctx context.Context, cancelFunc context.CancelFunc) error {\n\ts.LogInfo(nil, \"starting http mock server on: %s\", s.cfg.Address)\n\tlistener, err := net.Listen(\"tcp\", s.cfg.Address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tutil.StartServiceAsync(ctx, cancelFunc, s.Logger, func() error {\n\t\treturn http.Serve(listener, s)\n\t}, func() error {\n\t\treturn listener.Close()\n\t})\n\treturn nil\n}", "func (sv *Unit) Start() (err error) {\n\te := log.WithField(\"ExecStart\", sv.Definition.Service.ExecStart)\n\n\te.Debug(\"sv.Start\")\n\n\tswitch sv.Definition.Service.Type {\n\tcase \"simple\":\n\t\tif err = sv.Cmd.Start(); err == nil {\n\t\t\tgo sv.Cmd.Wait()\n\t\t}\n\tcase \"oneshot\":\n\t\terr = sv.Cmd.Run()\n\tdefault:\n\t\tpanic(\"Unknown service type\")\n\t}\n\n\te.WithField(\"err\", err).Debug(\"started\")\n\treturn\n}", "func StopService() {\n\tseleniumService.Stop()\n}", "func WDInit() selenium.WebDriver {\n\tvar err error\n\n\tops := []selenium.ServiceOption{\n\t\tselenium.ChromeDriver(seleniumPath),\n\t}\n\n\t//service, err := selenium.NewSeleniumService(seleniumPath, port, ops...)\n\tservice, err := selenium.NewChromeDriverService(chromeDriverPath, port, ops...)\n\tif err != nil {\n\t\tlog.Printf(\"Error starting the ChromeDriver server: %v\", err)\n\t}\n\t//Delay service shutdown\n\tdefer service.Stop()\n\n\t//log.Println(\"Service => \", service)\n\n\tcaps := selenium.Capabilities(map[string]interface{}{\"browserName\": \"chrome\"})\n\t//log.Println(\"Capabilities => \", caps)\n\n\tdriver, err := selenium.NewRemote(caps, \"\")\n\n\tif err != nil {\n\t\tlog.Println(\"support/base | Error al instanciar el driver de Selenium : \", err.Error())\n\t}\n\t//driver.ResizeWindow(\"note\", 1920, 1080)\n\treturn driver\n}", "func (m *MockCSIDriver) Start() error {\n\t// Listen on a port assigned by the net package\n\treturn m.StartOnAddress(\"tcp\", \"127.0.0.1:0\")\n}", "func StartTaskService(brain *brain.Manager, errChan chan error) {\n\tlis, err := net.Listen(\"tcp\", taskServicePort)\n\tif err != nil {\n\t\terrChan <- err\n\t\treturn\n\t}\n\n\tgrpcServer := grpc.NewServer()\n\n\tRegisterTaskServiceServer(grpcServer, TaskService{Manager: brain})\n\n\tlog.LogInfo(\"starting taask-server task service on :3688\")\n\tif err := grpcServer.Serve(lis); err != nil {\n\t\terrChan <- err\n\t}\n}", "func StartTelemetryService(path string, args []string) error {\n\tplatform.KillProcessByName(TelemetryServiceProcessName)\n\n\tlog.Logf(\"[Telemetry] Starting telemetry service process :%v args:%v\", path, args)\n\n\tif err := common.StartProcess(path, args); err != nil {\n\t\tlog.Logf(\"[Telemetry] Failed to start telemetry service process :%v\", err)\n\t\treturn err\n\t}\n\n\tlog.Logf(\"[Telemetry] Telemetry service started\")\n\n\treturn nil\n}", "func StartTelemetryService(path string, args []string) error {\n\tplatform.KillProcessByName(TelemetryServiceProcessName)\n\n\tlog.Logf(\"[Telemetry] Starting telemetry service process :%v args:%v\", path, args)\n\n\tif err := common.StartProcess(path, args); err != nil {\n\t\tlog.Logf(\"[Telemetry] Failed to start telemetry service process :%v\", err)\n\t\treturn err\n\t}\n\n\tlog.Logf(\"[Telemetry] Telemetry service started\")\n\n\treturn nil\n}", "func (s *Envoy) Start() error {\n\terr := s.cmd.Start()\n\tif err == nil {\n\t\turl := fmt.Sprintf(\"http://localhost:%v/server_info\", s.ports.AdminPort)\n\t\tWaitForHTTPServer(url)\n\t\tWaitForPort(s.ports.ClientProxyPort)\n\t\tWaitForPort(s.ports.ServerProxyPort)\n\t\tWaitForPort(s.ports.TCPProxyPort)\n\t}\n\treturn err\n}", "func StartService() *service.Service {\n\t//TODO StartService could return an error in case it fails to start\n\n\taccessManager := CreateAccessManager()\n\tmessageStore := CreateMessageStore()\n\tkvStore := CreateKVStore()\n\n\tvar cl *cluster.Cluster\n\tvar err error\n\n\tif *Config.Cluster.NodeID > 0 {\n\t\texitIfInvalidClusterParams(*Config.Cluster.NodeID, *Config.Cluster.NodePort, *Config.Cluster.Remotes)\n\t\tlogger.Info(\"Starting in cluster-mode\")\n\t\tcl, err = cluster.New(&cluster.Config{\n\t\t\tID: *Config.Cluster.NodeID,\n\t\t\tPort: *Config.Cluster.NodePort,\n\t\t\tRemotes: *Config.Cluster.Remotes,\n\t\t})\n\t\tif err != nil {\n\t\t\tlogger.WithField(\"err\", err).Fatal(\"Module could not be started (cluster)\")\n\t\t}\n\t} else {\n\t\tlogger.Info(\"Starting in standalone-mode\")\n\t}\n\n\tr := router.New(accessManager, messageStore, kvStore, cl)\n\twebsrv := webserver.New(*Config.HttpListen)\n\n\tsrv := service.New(r, websrv).\n\t\tHealthEndpoint(*Config.HealthEndpoint).\n\t\tMetricsEndpoint(*Config.MetricsEndpoint)\n\n\tsrv.RegisterModules(0, 6, kvStore, messageStore)\n\tsrv.RegisterModules(4, 3, CreateModules(r)...)\n\n\tif err = srv.Start(); err != nil {\n\t\tlogger.WithField(\"error\", err.Error()).Error(\"errors occurred while starting service\")\n\t\tif err = srv.Stop(); err != nil {\n\t\t\tlogger.WithField(\"error\", err.Error()).Error(\"errors occurred when stopping service after it failed to start\")\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn srv\n}", "func (s *GrpcServer) startGrpcService() {\n\t// Start listening for requests\n\treflection.Register(s.server)\n\tlogrus.Infof(\"%s gRPC Server ready on %s\", s.name, s.Address())\n\twaitForServer := make(chan bool)\n\ts.goServe(waitForServer)\n\t<-waitForServer\n\ts.running = true\n}", "func (p *PodmanTestIntegration) StartRemoteService() {\n}", "func StartDownloadService() {\n\tdownloadYoutubeDLIfRequiredWithConsoleOutput(GetConfVal(\"youtubeDownloader\"))\n\tdownloadLoop()\n}", "func Start(ctx context.Context, config *Config) error {\n\tlog.Debug(\"start application\")\n\n\tserviceRepo := service.NewRepository()\n\n\tserviceConfig := service.Config{\n\t\tNoTrackMode: config.NoTrackMode,\n\t\tConnDefaults: config.Defaults,\n\t\tConnsSettings: config.ServicesConnsSettings,\n\t\tDatabasesRE: config.DatabasesRE,\n\t\tDisabledCollectors: config.DisableCollectors,\n\t\tCollectorsSettings: config.CollectorsSettings,\n\t}\n\n\tvar wg sync.WaitGroup\n\tctx, cancel := context.WithCancel(ctx)\n\n\tif config.ServicesConnsSettings == nil || len(config.ServicesConnsSettings) == 0 {\n\t\t// run background discovery, the service repo will be fulfilled at first iteration\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tserviceRepo.StartBackgroundDiscovery(ctx, serviceConfig)\n\t\t\twg.Done()\n\t\t}()\n\t} else {\n\t\t// fulfill service repo using passed services\n\t\tserviceRepo.AddServicesFromConfig(serviceConfig)\n\n\t\t// setup exporters for all services\n\t\terr := serviceRepo.SetupServices(serviceConfig)\n\t\tif err != nil {\n\t\t\tcancel()\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Start auto-update loop if it is enabled.\n\tif config.AutoUpdate != \"off\" {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tac := &autoupdate.Config{\n\t\t\t\tBinaryPath: config.BinaryPath,\n\t\t\t\tBinaryVersion: config.BinaryVersion,\n\t\t\t\tUpdatePolicy: config.AutoUpdate,\n\t\t\t}\n\t\t\tautoupdate.StartAutoupdateLoop(ctx, ac)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\terrCh := make(chan error)\n\tdefer close(errCh)\n\n\t// Start HTTP metrics listener.\n\twg.Add(1)\n\tgo func() {\n\t\tif err := runMetricsListener(ctx, config); err != nil {\n\t\t\terrCh <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\t// Start metrics sender if necessary.\n\tif config.SendMetricsURL != \"\" {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tif err := runSendMetricsLoop(ctx, config, serviceRepo); err != nil {\n\t\t\t\terrCh <- err\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\t// Waiting for errors or context cancelling.\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.Info(\"exit signaled, stop application\")\n\t\t\tcancel()\n\t\t\twg.Wait()\n\t\t\treturn nil\n\t\tcase err := <-errCh:\n\t\t\tcancel()\n\t\t\twg.Wait()\n\t\t\treturn err\n\t\t}\n\t}\n}", "func StartSomeService(serviceName string) {\n\tsrv := service.NewDefaultService(serviceName)\n\t_ = srv.Start()\n}", "func runService(ctx context.Context, targetDir string, service string) {\n\t// Save and restore lsater current working dir\n\twd, err := os.Getwd()\n\tcheck(err)\n\tdefer os.Chdir(wd)\n\n\t// Build the server if needed\n\t_, err = os.Stat(\"./\" + service)\n\tif os.IsNotExist(err) {\n\t\tout, err := exec.Command(\"go\", \"build\", \".\").CombinedOutput()\n\t\tlog.Println(out)\n\t\tcheck(err)\n\t}\n\n\tcmd := exec.CommandContext(ctx, \"./\"+service)\n\terr = cmd.Start()\n\tcheck(err)\n}", "func Start(r chi.Router, lg *logrus.Logger, cfg *Config) {\n\ts := &http.Server{\n\t\tAddr: cfg.Port,\n\t\tReadTimeout: time.Duration(cfg.ReadTimeoutSeconds) * time.Second,\n\t\tWriteTimeout: time.Duration(cfg.WriteTimeoutSeconds) * time.Second,\n\t\tHandler: r,\n\t}\n\n\tgo func() {\n\t\tlg.Infof(\"service started from port: %v\", cfg.Port)\n\t\tif err := s.ListenAndServe(); err != nil {\n\t\t\tlg.Info(\"Shutting down the server\")\n\t\t}\n\t}()\n\n\t// Wait for interrupt signal to gracefully shutdown the server with\n\t// a timeout of 10 seconds.\n\tquit := make(chan os.Signal)\n\tsignal.Notify(quit, os.Interrupt)\n\t<-quit\n\tlg.Info(\"server stoped\")\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\tif err := s.Shutdown(ctx); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n}", "func (env *Environment) StartService(ctx context.Context, conf *config.Config) ([]net.Listener, error) {\n\tif err := env.InitGenesisChunks(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tenv.Listeners = []string{\n\t\tfmt.Sprintf(\"Listener(@%v)\", conf.P2P.ExternalAddress),\n\t}\n\n\tlistenAddrs := strings.SplitAndTrimEmpty(conf.RPC.ListenAddress, \",\", \" \")\n\troutes := NewRoutesMap(env, &RouteOptions{\n\t\tUnsafe: conf.RPC.Unsafe,\n\t})\n\n\tcfg := rpcserver.DefaultConfig()\n\tcfg.MaxBodyBytes = conf.RPC.MaxBodyBytes\n\tcfg.MaxHeaderBytes = conf.RPC.MaxHeaderBytes\n\tcfg.MaxOpenConnections = conf.RPC.MaxOpenConnections\n\t// If necessary adjust global WriteTimeout to ensure it's greater than\n\t// TimeoutBroadcastTxCommit.\n\t// See https://github.com/tendermint/tendermint/issues/3435\n\tif cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit {\n\t\tcfg.WriteTimeout = conf.RPC.TimeoutBroadcastTxCommit + 1*time.Second\n\t}\n\n\t// If the event log is enabled, subscribe to all events published to the\n\t// event bus, and forward them to the event log.\n\tif lg := env.EventLog; lg != nil {\n\t\t// TODO(creachadair): This is kind of a hack, ideally we'd share the\n\t\t// observer with the indexer, but it's tricky to plumb them together.\n\t\t// For now, use a \"normal\" subscription with a big buffer allowance.\n\t\t// The event log should always be able to keep up.\n\t\tconst subscriberID = \"event-log-subscriber\"\n\t\tsub, err := env.EventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{\n\t\t\tClientID: subscriberID,\n\t\t\tQuery: query.All,\n\t\t\tLimit: 1 << 16, // essentially \"no limit\"\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"event log subscribe: %w\", err)\n\t\t}\n\t\tgo func() {\n\t\t\t// N.B. Use background for unsubscribe, ctx is already terminated.\n\t\t\tdefer env.EventBus.UnsubscribeAll(context.Background(), subscriberID) // nolint:errcheck\n\t\t\tfor {\n\t\t\t\tmsg, err := sub.Next(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\tenv.Logger.Error(\"Subscription terminated\", \"err\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tetype, ok := eventlog.FindType(msg.Events())\n\t\t\t\tif ok {\n\t\t\t\t\t_ = lg.Add(etype, msg.Data())\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\tenv.Logger.Info(\"Event log subscription enabled\")\n\t}\n\n\t// We may expose the RPC over both TCP and a Unix-domain socket.\n\tlisteners := make([]net.Listener, len(listenAddrs))\n\tfor i, listenAddr := range listenAddrs {\n\t\tmux := http.NewServeMux()\n\t\trpcLogger := env.Logger.With(\"module\", \"rpc-server\")\n\t\trpcserver.RegisterRPCFuncs(mux, routes, rpcLogger)\n\n\t\tif conf.RPC.ExperimentalDisableWebsocket {\n\t\t\trpcLogger.Info(\"Disabling websocket endpoints (experimental-disable-websocket=true)\")\n\t\t} else {\n\t\t\trpcLogger.Info(\"WARNING: Websocket RPC access is deprecated and will be removed \" +\n\t\t\t\t\"in Tendermint v0.37. See https://tinyurl.com/adr075 for more information.\")\n\t\t\twmLogger := rpcLogger.With(\"protocol\", \"websocket\")\n\t\t\twm := rpcserver.NewWebsocketManager(wmLogger, routes,\n\t\t\t\trpcserver.OnDisconnect(func(remoteAddr string) {\n\t\t\t\t\terr := env.EventBus.UnsubscribeAll(context.Background(), remoteAddr)\n\t\t\t\t\tif err != nil && err != tmpubsub.ErrSubscriptionNotFound {\n\t\t\t\t\t\twmLogger.Error(\"Failed to unsubscribe addr from events\", \"addr\", remoteAddr, \"err\", err)\n\t\t\t\t\t}\n\t\t\t\t}),\n\t\t\t\trpcserver.ReadLimit(cfg.MaxBodyBytes),\n\t\t\t)\n\t\t\tmux.HandleFunc(\"/websocket\", wm.WebsocketHandler)\n\t\t}\n\n\t\tlistener, err := rpcserver.Listen(\n\t\t\tlistenAddr,\n\t\t\tcfg.MaxOpenConnections,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar rootHandler http.Handler = mux\n\t\tif conf.RPC.IsCorsEnabled() {\n\t\t\tcorsMiddleware := cors.New(cors.Options{\n\t\t\t\tAllowedOrigins: conf.RPC.CORSAllowedOrigins,\n\t\t\t\tAllowedMethods: conf.RPC.CORSAllowedMethods,\n\t\t\t\tAllowedHeaders: conf.RPC.CORSAllowedHeaders,\n\t\t\t})\n\t\t\trootHandler = corsMiddleware.Handler(mux)\n\t\t}\n\t\tif conf.RPC.IsTLSEnabled() {\n\t\t\tgo func() {\n\t\t\t\tif err := rpcserver.ServeTLS(\n\t\t\t\t\tctx,\n\t\t\t\t\tlistener,\n\t\t\t\t\trootHandler,\n\t\t\t\t\tconf.RPC.CertFile(),\n\t\t\t\t\tconf.RPC.KeyFile(),\n\t\t\t\t\trpcLogger,\n\t\t\t\t\tcfg,\n\t\t\t\t); err != nil {\n\t\t\t\t\tenv.Logger.Error(\"error serving server with TLS\", \"err\", err)\n\t\t\t\t}\n\t\t\t}()\n\t\t} else {\n\t\t\tgo func() {\n\t\t\t\tif err := rpcserver.Serve(\n\t\t\t\t\tctx,\n\t\t\t\t\tlistener,\n\t\t\t\t\trootHandler,\n\t\t\t\t\trpcLogger,\n\t\t\t\t\tcfg,\n\t\t\t\t); err != nil {\n\t\t\t\t\tenv.Logger.Error(\"error serving server\", \"err\", err)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\n\t\tlisteners[i] = listener\n\t}\n\n\treturn listeners, nil\n\n}", "func (vs *VolMgrServer) StartService() {\n\n\tlis, err := net.Listen(\"tcp\", vs.Addr.Grpc)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to listen on:%v\", vs.Addr.Grpc))\n\t}\n\ts := grpc.NewServer()\n\tvp.RegisterVolMgrServer(s, vs)\n\treflection.Register(s)\n\tif err := s.Serve(lis); err != nil {\n\t\tpanic(\"Failed to serve\")\n\t}\n}", "func Start(ctx context.Context, config *Config) error {\n\tlog.Debug(\"start application\")\n\n\tserviceRepo := service.NewRepository()\n\n\tserviceConfig := service.Config{\n\t\tNoTrackMode: config.NoTrackMode,\n\t\tConnDefaults: config.Defaults,\n\t\tConnsSettings: config.ServicesConnsSettings,\n\t\tDatabasesRE: config.DatabasesRE,\n\t\tDisabledCollectors: config.DisableCollectors,\n\t\tCollectorsSettings: config.CollectorsSettings,\n\t}\n\n\tvar wg sync.WaitGroup\n\tctx, cancel := context.WithCancel(ctx)\n\n\tif config.ServicesConnsSettings == nil {\n\t\t// run background discovery, the service repo will be fulfilled at first iteration\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tserviceRepo.StartBackgroundDiscovery(ctx, serviceConfig)\n\t\t\twg.Done()\n\t\t}()\n\t} else {\n\t\t// fulfill service repo using passed services\n\t\tserviceRepo.AddServicesFromConfig(serviceConfig)\n\n\t\t// setup exporters for all services\n\t\terr := serviceRepo.SetupServices(serviceConfig)\n\t\tif err != nil {\n\t\t\tcancel()\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Start auto-update loop if it is enabled.\n\tif config.AutoUpdate != \"off\" {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tac := &autoupdate.Config{\n\t\t\t\tBinaryPath: config.BinaryPath,\n\t\t\t\tBinaryVersion: config.BinaryVersion,\n\t\t\t\tUpdatePolicy: config.AutoUpdate,\n\t\t\t}\n\t\t\tautoupdate.StartAutoupdateLoop(ctx, ac)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\terrCh := make(chan error)\n\tdefer close(errCh)\n\n\t// Start HTTP metrics listener.\n\twg.Add(1)\n\tgo func() {\n\t\tif err := runMetricsListener(ctx, config); err != nil {\n\t\t\terrCh <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\t// Start metrics sender if necessary.\n\tif config.SendMetricsURL != \"\" {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tif err := runSendMetricsLoop(ctx, config, serviceRepo); err != nil {\n\t\t\t\terrCh <- err\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\t// Waiting for errors or context cancelling.\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.Info(\"exit signaled, stop application\")\n\t\t\tcancel()\n\t\t\twg.Wait()\n\t\t\treturn nil\n\t\tcase err := <-errCh:\n\t\t\tcancel()\n\t\t\twg.Wait()\n\t\t\treturn err\n\t\t}\n\t}\n}", "func main() {\n\tservice := service.Service{}\n\tservice.Start(\"\")\n}", "func start() {\n\n\tif LOGERR != nil {\n\t\tpanic(LOGERR)\n\t}\n\n\tservicePath := &CONFIG.ServiceConfig.Path\n\tremoveSlashes(servicePath)\n\n\tdb := new(DB)\n\tdb.getClient()\n\n\tLOGGER.Warning(fmt.Sprintf(\"Scanning %s\\n\", *servicePath))\n\n\tif _, err := os.Stat(\"/\" + *servicePath); err != nil {\n\t\tif crErr := os.Mkdir(\"/\"+*servicePath, 0755); crErr != nil {\n\t\t\tLOGGER.Crit(fmt.Sprintf(\"Scanning %s failed - directory does not exist and is not creatable\\n\", *servicePath))\n\t\t\tfmt.Printf(\"Scanning %s failed - directory does not exist and is not creatable\\n\", *servicePath)\n\t\t\tusage(1)\n\t\t}\n\t}\n\n\trunningServices := make(map[string]*Service)\n\n\t// Loop knownServices and services in directory\n\t// If differ, decide which to remove or add\n\tfor {\n\t\tservicesInDir := readServiceDir(servicePath)\n\t\tdb.createNewServicesIfNeeded(&servicesInDir, servicePath)\n\t\tknownServices := db.getServices()\n\n\t\tfor serviceName, service := range knownServices {\n\t\t\tserviceName := serviceName\n\t\t\tservice := service\n\n\t\t\tsrvDone := make(chan error, 1)\n\n\t\t\t_, ok := runningServices[serviceName]\n\t\t\tif ok != true {\n\t\t\t\t// service is not yet running\n\t\t\t\t// so start it and a logger\n\t\t\t\tgo func() {\n\t\t\t\t\terr1 := updateServicePaths(&knownServices, servicePath)\n\t\t\t\t\terr2 := removeServiceBefore(&servicesInDir, serviceName)\n\t\t\t\t\tif err1 == nil && err2 == nil {\n\t\t\t\t\t\tLOGGER.Debug(fmt.Sprintf(\"%s not yet running\\n\", serviceName))\n\t\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t\t\tsv := new(ServiceHandler)\n\t\t\t\t\t\tsv.mutex = &sync.Mutex{}\n\t\t\t\t\t\tsv.service = service\n\t\t\t\t\t\tsv.startService(srvDone, runningServices, serviceName)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t} else {\n\t\t\t\t// the service is running\n\t\t\t\t// but might have been removed manually (rm)\n\t\t\t\terr := removeServiceAfter(&servicesInDir, serviceName, runningServices[serviceName], srvDone)\n\t\t\t\tif err == nil {\n\t\t\t\t\tLOGGER.Debug(fmt.Sprintf(\"%s already running\\n\", serviceName))\n\t\t\t\t} else {\n\t\t\t\t\tdelete(runningServices, serviceName)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\ttime.Sleep(5 * time.Second)\n\t}\n\n\tLOGGER.Warning(\"exiting\")\n}", "func Start(args ...string) {\n runInstances(\"Start\", func(i int, id string) error {\n return runDaemon(\"run\", settingsToParams(i, true)...)\n })\n\n if cfg.UseNginx {\n UpdateNginxConf()\n }\n}", "func (s *Service) Start() error {\r\n\ts.logger.Debug(\"Starting health monitor service\")\r\n\r\n\t//todo - create web servier\r\n\t//\t - expose prometheus endpoint from metric manager\r\n\r\n\tsAdminPort := fmt.Sprintf(\":%v\", s.adminPort)\r\n\ts.logger.Debug(fmt.Sprintf(\"running /metrics endpoint on %s\", sAdminPort))\r\n\r\n\thttp.Handle(\"/metrics\", s.metrics.MetricEndpoint())\r\n\tgo http.ListenAndServe(sAdminPort, nil)\r\n\r\n\ts.logger.Debug(\"Running metrics service\")\r\n\tgo s.metrics.Run()\r\n\r\n\t// if err := s.Admin.Serve(); err != nil {\r\n\t// \treturn fmt.Errorf(\"cannot start the admin server: %w\", err)\r\n\t// }\r\n\r\n\tfor _, val := range s.monitors {\r\n\t\ts.logger.Debug(fmt.Sprintf(\"Starting monitor %s [%T] \", val.Identifier(), val))\r\n\t\tval.Start()\r\n\t}\r\n\r\n\treturn nil\r\n}", "func (s *Service) BeforeStart(ctx context.Context) error {\n\tlog.Info(ctx, \"ui> Starting service %s %s...\", s.Cfg.Name, sdk.VERSION)\n\ts.StartupTime = time.Now()\n\n\tfromTmpl, err := s.prepareIndexHTML()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.checkStaticFiles(ctx); err != nil {\n\t\treturn err\n\t}\n\n\tif fromTmpl {\n\t\t// if we have a index.tmpl, it's from a ui.tar.gz\n\t\t// we can check the checksum or files based on FILES_UI\n\t\tif err := s.checkChecksumFiles(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := s.indexHTMLReplaceVar(); err != nil {\n\t\treturn err\n\t}\n\n\t//Init the http server\n\ts.initRouter(ctx)\n\ts.Server = &http.Server{\n\t\tAddr: fmt.Sprintf(\"%s:%d\", s.Cfg.HTTP.Addr, s.Cfg.HTTP.Port),\n\t\tHandler: s.Router.Mux,\n\t\tReadTimeout: 10 * time.Minute,\n\t\tWriteTimeout: 10 * time.Minute,\n\t\tMaxHeaderBytes: 1 << 20,\n\t}\n\n\t// Start the http server\n\ts.GoRoutines.Run(ctx, \"ui-http-serve\", func(ctx context.Context) {\n\t\tlog.Info(ctx, \"ui> Starting HTTP Server on port %d\", s.Cfg.HTTP.Port)\n\t\tif err := s.Server.ListenAndServe(); err != nil {\n\t\t\tlog.Error(ctx, \"ui> Listen and serve failed: %s\", err)\n\t\t}\n\t})\n\n\treturn nil\n}", "func Start() {\n\tglobalLock.Lock()\n\tif started {\n\t\tglobalLock.Unlock()\n\t\treturn\n\t}\n\tstarted = true\n\tglobalLock.Unlock()\n\n\tconst svcName = \"gopherbot\"\n\tvar err error\n\tisIntSess, err = svc.IsAnInteractiveSession()\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to determine if we are running in an interactive session: %v\", err)\n\t}\n\n\tvar installdir, localdir string\n\n\t// Process command-line flags\n\tvar configDir string\n\tcusage := \"path to the local configuration directory\"\n\tflag.StringVar(&configDir, \"config\", \"\", cusage)\n\tflag.StringVar(&configDir, \"c\", \"\", cusage+\" (shorthand)\")\n\tvar logFile string\n\tlusage := \"path to robot's log file\"\n\tflag.StringVar(&logFile, \"log\", \"\", lusage)\n\tflag.StringVar(&logFile, \"l\", \"\", lusage+\" (shorthand)\")\n\tvar winCommand string\n\tif isIntSess {\n\t\twusage := \"manage Windows service, one of: install, remove, start, stop\"\n\t\tflag.StringVar(&winCommand, \"winsvc\", \"\", wusage)\n\t\tflag.StringVar(&winCommand, \"w\", \"\", wusage+\" (shorthand)\")\n\t}\n\tflag.Parse()\n\n\tif winCommand != \"\" {\n\t\tswitch winCommand {\n\t\tcase \"install\":\n\t\t\tvar args []string\n\t\t\tif configDir != \"\" {\n\t\t\t\targs = append(args, \"-c\", configDir)\n\t\t\t}\n\t\t\terr = installService(svcName, \"Gopherbot ChatOps chat bot\", args)\n\t\tcase \"remove\":\n\t\t\terr = removeService(svcName)\n\t\tcase \"start\":\n\t\t\terr = startService(svcName)\n\t\tcase \"stop\":\n\t\t\terr = controlService(svcName, svc.Stop, svc.Stopped)\n\t\tcase \"pause\":\n\t\t\terr = controlService(svcName, svc.Pause, svc.Paused)\n\t\tcase \"continue\":\n\t\t\terr = controlService(svcName, svc.Continue, svc.Running)\n\t\tdefault:\n\t\t\tlog.Fatalf(\"invalid command %s\", winCommand)\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to %s %s: %v\", winCommand, svcName, err)\n\t\t}\n\t\treturn\n\t}\n\n\tif isIntSess {\n\t\tbotLogger = log.New(os.Stdout, \"\", log.LstdFlags)\n\t} else {\n\t\tif logFile == \"\" {\n\t\t\tlogFile = \"C:/Windows/Temp/gopherbot-startup.log\"\n\t\t}\n\t\tvar f *os.File\n\t\tf, err = os.Create(logFile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Unable to open log file\")\n\t\t}\n\t\tbotLogger = log.New(f, \"\", log.LstdFlags)\n\t}\n\tbotLogger.Println(\"Starting up ...\")\n\n\t// Installdir is where the default config and stock external\n\t// plugins are. Search some likely locations in case installDir\n\t// wasn't passed on the command line, or Gopherbot isn't installed\n\t// in one of the usual system locations.\n\tex, err := os.Executable()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tinstalldir, err = filepath.Abs(filepath.Dir(ex))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Localdir is where all user-supplied configuration and\n\t// external plugins are.\n\tconfSearchPath := []string{\n\t\tconfigDir,\n\t\t`C:/ProgramData/Gopherbot`,\n\t}\n\thome := os.Getenv(\"USERPROFILE\")\n\tif len(home) > 0 {\n\t\tconfSearchPath = append(confSearchPath, home+\"/.gopherbot\")\n\t}\n\tfor _, spath := range confSearchPath {\n\t\tif len(spath) > 0 && dirExists(spath) {\n\t\t\tlocaldir = spath\n\t\t\tbreak\n\t\t}\n\t}\n\tif len(localdir) == 0 {\n\t\tbotLogger.Println(\"Couldn't locate local configuration directory, exiting\")\n\t\tos.Exit(0)\n\t}\n\n\t// Create the 'bot and load configuration, supplying configdir and installdir.\n\t// When loading configuration, gopherbot first loads default configuration\n\t// from internal config, then loads from localdir/conf/..., which\n\t// overrides defaults.\n\tos.Setenv(\"GOPHER_INSTALLDIR\", installdir)\n\tos.Setenv(\"GOPHER_CONFIGDIR\", localdir)\n\tbotLogger.Printf(\"Starting up with local config dir: %s, and install dir: %s\\n\", localdir, installdir)\n\terr = newBot(localdir, installdir, botLogger)\n\tif err != nil {\n\t\tbotLogger.Fatal(fmt.Errorf(\"Error loading initial configuration: %v\", err))\n\t}\n\n\tconnectionStarter, ok := connectors[robot.protocol]\n\tif !ok {\n\t\tbotLogger.Fatal(\"No connector registered with name:\", robot.protocol)\n\t}\n\n\t// handler{} is just a placeholder struct for implementing the Handler interface\n\th := handler{}\n\tconn = connectionStarter(h, log.New(ioutil.Discard, \"\", 0))\n\n\t// Initialize the robot with a valid connector\n\tbotInit(conn)\n\n\t// Start the brain loop\n\tgo runBrain()\n\tif isIntSess {\n\t\t// Start the connector's main loop for interactive sessions\n\t\tconn.Run(finish)\n\t} else {\n\t\t// Stop logging to startup log when running as a service\n\t\trobot.logger.SetOutput(ioutil.Discard)\n\t\t// Started as a Windows Service\n\t\trunService(svcName)\n\t}\n}", "func (serv *webService) Start(ctx context.Context) error {\n\tvar err error\n\tserv.RPC, err = rpchttp.New(fmt.Sprintf(\"http://127.0.0.1:%d\", serv.RPCPort), \"/websocket\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tserv.Server = &http.Server{Addr: \":21478\", Handler: serv.Mux}\n\treturn serv.Server.ListenAndServe()\n}", "func (ms *MockOpenIDDiscoveryServer) Start() error {\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/.well-known/openid-configuration\", ms.openIDCfg).Methods(\"GET\")\n\n\tserver := &http.Server{\n\t\tAddr: \":\" + strconv.Itoa(ms.port),\n\t\tHandler: router,\n\t}\n\n\t// Starts the HTTP and waits for it to begin receiving requests.\n\t// Returns an error if the server doesn't serve traffic within about 2 seconds.\n\tgo func() {\n\t\tserver.ListenAndServe()\n\t}()\n\n\twait := 300 * time.Millisecond\n\tfor try := 0; try < 3; try++ {\n\t\ttime.Sleep(wait)\n\t\t// Try to call the server\n\t\tif _, err := http.Get(fmt.Sprintf(\"%s/.well-known/openid-configuration\", ms.url)); err != nil {\n\t\t\tlog.Infof(\"Server not yet serving: %v\", err)\n\t\t\t// Retry after some sleep.\n\t\t\twait *= 2\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Infof(\"Successfully serving on %s\", ms.url)\n\t\tms.HitNum = 0\n\t\tms.server = server\n\t\treturn nil\n\t}\n\n\tms.Stop()\n\treturn errors.New(\"server failed to start\")\n}", "func startWsServer(listen_addr string) {\n\t//hub = newHub()\n\tgo hub.Run()\n\n\t//http.HandleFunc(\"/\", cmdHandler)\n\thttp.HandleFunc(\"/upgrade\", func(w http.ResponseWriter, r *http.Request) {\n\t\tserveWs(hub, w, r)\n\t})\n\terr := http.ListenAndServe(listen_addr, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not listen to %s: %s\", listen_addr, err)\n\t}\n}", "func (s *Server) Start(ctx context.Context) {\n\trunCtx, cancel := context.WithCancel(ctx)\n\n\trouter := mux.NewRouter()\n\ts.registerAPI(router)\n\n\tsrv := http.Server{\n\t\tAddr: s.option.Address,\n\t\tHandler: router,\n\t}\n\n\tenableTLS := s.option.CertFile != \"\" && s.option.KeyFile != \"\"\n\n\t// enable TLS\n\tif enableTLS {\n\t\tsrv.TLSConfig = &tls.Config{\n\t\t\tServerName: s.option.Name,\n\t\t\tMinVersion: tls.VersionTLS12,\n\t\t}\n\t}\n\n\trunner.\n\t\tRun(runCtx, func(ctx context.Context) error {\n\t\t\ts.started = true\n\n\t\t\tif enableTLS {\n\t\t\t\tlog.Println(\"Starting HTTPS service\")\n\t\t\t\tlog.Printf(\"HTTPS service is started on %s\\n\", s.option.Address)\n\n\t\t\t\treturn srv.ListenAndServeTLS(s.option.CertFile, s.option.KeyFile)\n\t\t\t}\n\n\t\t\tlog.Println(\"Starting HTTP service\")\n\t\t\tlog.Printf(\"HTTP service is started on %s\\n\", s.option.Address)\n\n\t\t\treturn srv.ListenAndServe()\n\t\t}).\n\t\tHandle(func(sig os.Signal) {\n\t\t\tif sig == syscall.SIGHUP {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlog.Println(\"Shutting down...\")\n\t\t\tcancel()\n\t\t})\n\n}", "func RunService() {\n\tforever := make(chan bool)\n\tworkers := 10\n\tlog.Printf(\"Create %d Workers \\n\", workers)\n\tcreateWorker(workers)\n\t<-forever\n}", "func startServer() {\r\n\t//G = NewGroup(ConnectionMax)\r\n\tserver := httptest.NewServer(new(myServer))\r\n\tserverAddr = server.Listener.Addr().String()\r\n\tlog.Print(\"Test WebSocket server listening on \", serverAddr)\r\n\r\n}", "func (server *testHTTPServerImpl) Start() {\n\tbinding := fmt.Sprintf(\":%d\", server.GetPort())\n\tsrv := &http.Server{\n\t\tAddr: binding,\n\t\tHandler: server.router,\n\t\tReadHeaderTimeout: 5 * time.Second,\n\t}\n\tgo func() {\n\t\trootFolder, err := GetRootFolder()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to get root folder of project: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tcertFile := fmt.Sprintf(\"%s/testutils/test-server.pem\", rootFolder)\n\t\tkeyFile := fmt.Sprintf(\"%s/testutils/test-server.key\", rootFolder)\n\t\tif err = srv.ListenAndServeTLS(certFile, keyFile); !errors.Is(err, http.ErrServerClosed) {\n\t\t\tlog.Fatalf(\"Failed to start http server using binding %s: %s\", binding, err)\n\t\t}\n\n\t}()\n\tserver.httpServer = srv\n\n\tserver.waitForServerAlive()\n}", "func (ms *MockServer) Start() error {\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/v1/identitybindingtoken\", ms.getFederatedToken).Methods(\"POST\")\n\n\tserver := &http.Server{\n\t\tAddr: \":\",\n\t\tHandler: router,\n\t}\n\tln, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\tlog.Errorf(\"Server failed to listen %v\", err)\n\t\treturn err\n\t}\n\n\tport := ln.Addr().(*net.TCPAddr).Port\n\tms.Port = port\n\tms.URL = fmt.Sprintf(\"http://localhost:%d\", port)\n\tserver.Addr = \":\" + strconv.Itoa(port)\n\n\tgo func() {\n\t\tif err := server.Serve(ln); err != nil {\n\t\t\tlog.Errorf(\"Server failed to serve in %q: %v\", ms.URL, err)\n\t\t}\n\t}()\n\n\t// sleep a while for mock server to start.\n\ttime.Sleep(time.Second)\n\n\treturn nil\n}", "func (sw *Switcher) Start() {\n\tsw.Server.Start()\n}", "func (s *TestServer) run() error {\n\n\tret := s.cmd.Start()\n\n\tch := make(chan error)\n\n\t// we wait for LaunchWaitTimeout and see if the server quit due to an error\n\tgo func() {\n\t\terr := s.cmd.Wait()\n\t\tselect {\n\t\tcase ch <- err:\n\t\tdefault:\n\t\t}\n\t}()\n\n\tselect {\n\tcase e := <-ch:\n\t\tlog.Println(\"Error waiting for process:\", e)\n\t\treturn e\n\tcase <-time.After(launchWaitTimeout):\n\t\tbreak\n\n\t}\n\n\treturn ret\n}", "func (s *Server) Start() error {\n\tidleConnsClosed := make(chan struct{})\n\n\tgo func() {\n\t\tsigint := make(chan os.Signal, 1)\n\t\tsignal.Notify(sigint, syscall.SIGINT, syscall.SIGTERM)\n\t\t<-sigint\n\n\t\ts.logger.Info(\"Shutting down HTTP server\")\n\n\t\terr := s.httpServer.Shutdown()\n\t\tfailpoint.Inject(\"shutdownErr\", func() {\n\t\t\terr = errors.New(\"mock shutdown error\")\n\t\t})\n\t\tif err != nil {\n\t\t\ts.logger.Error(\"srv.Shutdown: %v\", zap.Error(err))\n\t\t}\n\t\ts.logger.Info(\"HTTP server is stopped\")\n\n\t\tclose(idleConnsClosed)\n\t}()\n\n\ts.logger.Info(\"Starting HTTP server\", zap.String(\"address\", s.addr))\n\terr := s.httpServer.ListenAndServe(s.addr)\n\tfailpoint.Inject(\"listenAndServeErr\", func() {\n\t\terr = errors.New(\"mock listen and serve error\")\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ListenAndServe error: %w\", err)\n\t}\n\n\t<-idleConnsClosed\n\n\tif err := s.afterShutdown(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func startServer(t *testing.T, server *http.Server) {\n\tgo func() {\n\t\terr := server.ListenAndServe()\n\t\tif !errors.Is(err, http.ErrServerClosed) {\n\t\t\trequire.NoError(t, err)\n\t\t}\n\t}()\n}", "func Start() {\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tlog.Printf(\"http server started on %s\\n\", server.Server.Addr)\n\t\tif err := server.StartServer(server.Server); err != nil && err != http.ErrServerClosed {\n\t\t\tlog.Fatalf(\"error starting http server: %v\", err)\n\t\t}\n\t\tlog.Println(\"http server stopped\")\n\t}()\n}", "func (srv *Server) Start() {\n\tvar config *rest.Config\n\tvar err error\n\n\tif strings.ToUpper(srv.RunMode) == \"KUBE\" {\n\t\t// Create the Kubernetes in-cluster config\n\t\tconfig, err = rest.InClusterConfig()\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t} else {\n\t\t// use the current context in kubeconfig\n\t\tconfig, err = clientcmd.BuildConfigFromFlags(\"\", filepath.Join(util.HomeDir(), \".kube\", \"config\"))\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t}\n\n\t// Create the clientset\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\t// Create a watcher\n\twatcher, err := clientset.CoreV1().Services(\"\").Watch(metav1.ListOptions{})\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\t// Create a channel for the events to come in from the watcher\n\teventChannel := watcher.ResultChan()\n\n\t// Start an indefinite loop\n\tfor {\n\t\tevt := <-eventChannel\n\t\tsrv.handleEvent(evt)\n\t}\n}", "func (ms *MicroService) Start() {\n \tms.StartOnPort(8080)\n}", "func startServer(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\tlistener, err := net.Listen(\"tcp\", fmt.Sprintf(\"%s:%d\", MyHandle.Host, MyHandle.Port))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to startServer: %v\", err)\n\t}\n\n\tgrpcServer := grpc.NewServer()\n\tapi.RegisterGoChatServer(grpcServer, &chatServer{})\n\n\terr = grpcServer.Serve(listener)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to serve: %v\", err)\n\t}\n}", "func start_cs() *gsi.MockServer {\n\treturn gsi.Start_cs(true)\n}", "func (a *API) StartService(serviceID string) error {\n\treturn newServiceStarter(a).Start(serviceID)\n}", "func RunService(isDebug bool) {\n\tvar err error\n\tif isDebug {\n\t\telog = debug.New(config.ServiceName)\n\t} else {\n\t\telog, err = eventlog.Open(config.ServiceName)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tdefer elog.Close()\n\n\telog.Info(0x40000007, config.ServiceName)\n\trun := svc.Run\n\n\terr = run(config.ServiceName, &agentWindowsService{})\n\tif err != nil {\n\t\telog.Error(0xc0000008, err.Error())\n\t\treturn\n\t}\n\telog.Info(0x40000004, config.ServiceName)\n}", "func StartService() *Service {\n\treturn &Service{\n\t\tstreamch: hwinfo.StreamSharedMem(),\n\t}\n}", "func (s *server) Start(stop <-chan struct{}) error {\n\tlistener, err := newListener(s.bindAddress)\n\tif err != nil {\n\t\treturn err\n\t}\n\tserver := http.Server{\n\t\tHandler: s.mux,\n\t}\n\t// Run the server\n\tgo func() {\n\t\tlog.Info(\"starting http server\")\n\t\tif err := server.Serve(listener); err != nil && err != http.ErrServerClosed {\n\t\t\tlog.Error(err, \"http server error\")\n\t\t}\n\t}()\n\n\t// Shutdown the server when stop is close\n\t<-stop\n\treturn server.Shutdown(context.Background())\n}", "func Service() {\n\ts, err := NewServerFromOptions()\n\tif err != nil {\n\t\tlog.Fatal(err, \"Error starting server\")\n\t}\n\tgo s.Serve()\n}", "func Start() {\n\twebServer.Engine.Run(\":\" + strconv.Itoa(cfg.Read().App.WebServerPort))\n}", "func ServiceStart(service string, cmd string) (bool, error) {\n\tlog.Infof(\"ServiceStart called for '%s'\\n\", service)\n\tsvcStatus, pid, err := GetServiceStatus(service)\n\tif err != nil {\n\t\treturn false, errors.New(\"Could not get status for '\" + service + \"' : \" + err.Error())\n\t} else if svcStatus == SvcRunning && cmd == \"start\" {\n\t\tlog.Infof(\"service '%s' is already running, pid: %d\\n\", service, pid)\n\t} else {\n\t\t_, rc, err := ExecCommand(\"/usr/sbin/service\", service, cmd)\n\t\tif err != nil {\n\t\t\treturn false, errors.New(\"Could not \" + cmd + \" the '\" + service + \"' service: \" + err.Error())\n\t\t} else if rc == 0 {\n\t\t\t// service was sucessfully started\n\t\t\treturn true, nil\n\t\t}\n\t}\n\t// not started, service is already running\n\treturn false, nil\n}", "func (service *APIService) Start() error {\n\tlis, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", service.port))\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to listen to TCP port %d for RPC\", service.port)\n\t}\n\n\tlog.Info(\"Server started\", \"address\", service.Address)\n\tif err := service.GrpcServer.Serve(lis); err != http.ErrServerClosed {\n\t\treturn errors.Wrapf(err, \"failed to start HTTP server\")\n\t}\n\treturn nil\n}", "func (s *serviceStarter) Start(serviceID string) error {\n\tsr, err := s.api.db.Get(serviceID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsr, err = service.FromService(sr, service.ContainerOption(s.api.container))\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = sr.Start()\n\treturn err\n}", "func (s *Service) Start() {\n\tr := NewRouter()\n\thttp.Handle(\"/\", r)\n\n\tport := fmt.Sprintf(\"%d\", s.Port)\n\tlog.Println(\"Starting HTTP service at \" + port)\n\n\terr := http.ListenAndServe(\"0.0.0.0:\"+port, nil)\n\tif err != nil {\n\t\tlog.Println(\"An error occured starting HTTP listener at port \" + port)\n\t\tlog.Println(\"Error: \" + err.Error())\n\t}\n}", "func (mng *Manager) Start() {\n\tgo func() {\n\t\tif err := mng.srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {\n\t\t\tmng.lgr.Fatal(\"Error starting HTTP service: \" + err.Error())\n\t\t}\n\t}()\n}", "func (w *Webserver) Start() error {\n\n\t// listenAndServe the server\n\tgo func() {\n\t\tw.logger.Infof(\"Http server listening at %d!\", w.config.Port)\n\t\terr := w.listenAndServe()\n\t\tif err != nil && err != http.ErrServerClosed {\n\t\t\tw.logger.Errorw(fmt.Sprintf(\"webserver listening at port [%v] stopped\", w.config.Port), \"error\", err.Error())\n\t\t}\n\t}()\n\n\treturn nil\n}", "func TestServiceStart(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, nil)\n}", "func startServer(dataSlice []string) {\n\te := echo.New()\n\n\te.GET(\"/\", func(f echo.Context) error {\n\t\treturn f.JSON(http.StatusOK, dataSlice)\n\t})\n\n\tfmt.Println(\"Server running: http://localhost:8000\")\n\te.Logger.Fatal(e.Start(\":8000\"))\n}", "func StartChrome() {\n\tvar err error\n\tcheckWebDriver()\n\tdriver, err = core.Chrome()\n\tcheckFailure(err)\n\tcheckFailure(driver.Start())\n}", "func main() {\n\tdrv := serverless.NewDriver(os.Args[1]) // the 1st cmd-line argument: driver hostname and ip addr\n\tserviceName := os.Args[2] // the 2nd cmd-line argument: plugin Service name\n\n\tgo drv.Run(serviceName)\n\n\tdrv.Wait()\n}", "func (srv Web) Start() error {\n\tfmt.Printf(\"Starting service on port %s\\n\", srv.Settings.Port)\n\treturn http.ListenAndServe(srv.Settings.Port, srv.Router())\n}", "func Start(logger xlog.Logger) error {\n\tconst op string = \"chrome.Start\"\n\tlogger.DebugOp(op, \"starting new Google Chrome headless process on port 9222...\")\n\tresolver := func() error {\n\t\tcmd, err := cmd(logger)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// we try to start the process.\n\t\txexec.LogBeforeExecute(logger, cmd)\n\t\tif err := cmd.Start(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// if the process failed to start correctly,\n\t\t// we have to restart it.\n\t\tif !isViable(logger) {\n\t\t\treturn restart(logger, cmd.Process)\n\t\t}\n\t\treturn nil\n\t}\n\tif err := resolver(); err != nil {\n\t\treturn xerror.New(op, err)\n\t}\n\treturn nil\n}", "func (sn *SimNode) Start(snapshots map[string][]byte) error {\n\tnewService := func(name string) func(ctx *node.ServiceContext) (node.Service, error) {\n\t\treturn func(nodeCtx *node.ServiceContext) (node.Service, error) {\n\t\t\tctx := &ServiceContext{\n\t\t\t\tRPCDialer: sn.adapter,\n\t\t\t\tNodeContext: nodeCtx,\n\t\t\t\tConfig: sn.config,\n\t\t\t}\n\t\t\tif snapshots != nil {\n\t\t\t\tctx.Snapshot = snapshots[name]\n\t\t\t}\n\t\t\tserviceFunc := sn.adapter.services[name]\n\t\t\tservice, err := serviceFunc(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tsn.running[name] = service\n\t\t\treturn service, nil\n\t\t}\n\t}\n\n\t// ensure we only register the services once in the case of the node\n\t// being stopped and then started again\n\tvar regErr error\n\tsn.registerOnce.Do(func() {\n\t\tfor _, name := range sn.config.Services {\n\t\t\tif err := sn.node.Register(newService(name)); err != nil {\n\t\t\t\tregErr = err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n\tif regErr != nil {\n\t\treturn regErr\n\t}\n\n\tif err := sn.node.Start(); err != nil {\n\t\treturn err\n\t}\n\n\t// create an in-process RPC client\n\thandler, err := sn.node.RPCHandler()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsn.lock.Lock()\n\tsn.client = rpc.DialInProc(handler)\n\tsn.lock.Unlock()\n\n\treturn nil\n}", "func StartTestService(t *testing.T) {\n\tdb, err := sqorc.Open(\"sqlite3\", \":memory:\")\n\trequire.NoError(t, err)\n\tstartService(t, db)\n}", "func (s *service) Start() error {\n\tservice := &http.Server{Addr: s.host, Handler: s.mux}\n\n\treturn service.ListenAndServe()\n}", "func StartReplicatorService() {\n\tserviceName := common.ReplicatorServiceName\n\tcfg := common.SetupServerConfig(configure.NewCommonConfigure())\n\tif e := os.Setenv(\"port\", fmt.Sprintf(\"%d\", cfg.GetServiceConfig(serviceName).GetPort())); e != nil {\n\t\tlog.Panic(e)\n\t}\n\n\tmeta, err := metadata.NewCassandraMetadataService(cfg.GetMetadataConfig())\n\tif err != nil {\n\t\t// no metadata service - just fail early\n\t\tlog.WithField(common.TagErr, err).Fatal(`frontendhost: unable to instantiate metadata service`)\n\t}\n\thwInfoReader := common.NewHostHardwareInfoReader(meta)\n\treporter := common.NewMetricReporterWithHostname(cfg.GetServiceConfig(serviceName))\n\tdClient := dconfigclient.NewDconfigClient(cfg.GetServiceConfig(serviceName), serviceName)\n\tsCommon := common.NewService(serviceName, uuid.New(), cfg.GetServiceConfig(serviceName), common.NewUUIDResolver(meta), hwInfoReader, reporter, dClient, common.NewBypassAuthManager())\n\n\th, tc := replicator.NewReplicator(serviceName, sCommon, meta, replicator.NewReplicatorClientFactory(cfg, common.GetDefaultLogger()), cfg)\n\th.Start(tc)\n\n\t// start websocket server\n\tcommon.WSStart(cfg.GetServiceConfig(serviceName).GetListenAddress().String(),\n\t\tcfg.GetServiceConfig(serviceName).GetWebsocketPort(), h)\n\n\t// start diagnosis local http server\n\tcommon.ServiceLoop(cfg.GetServiceConfig(serviceName).GetPort()+diagnosticPortOffset, cfg, sCommon)\n}", "func (s *ServiceManager) StartService(name string) error {\n\t// Return error if service has already been started\n\tfor id, service := range s.services {\n\t\tif strings.IndexAny(id, name) >= 0 && service != nil {\n\t\t\treturn fmt.Errorf(\"The service '%s' has already been started\", name)\n\t\t}\n\t}\n\tch := make(chan ServiceCommand)\n\tservice, err := CreateService(name, s.config, ch)\n\tif err != nil {\n\t\tglog.Errorf(\"%s\", err)\n\t} else {\n\t\tglog.Infof(\"Create service '%s' success\", name)\n\t\ts.services[name] = service\n\t\ts.chs[name] = ch\n\t}\n\treturn nil\n}", "func (ser *Server) Start() error {\n\tlog.Printf(\"System webapp start at %s\", ser.addr)\n\treturn manners.ListenAndServe(ser.addr, ser.m)\n}", "func RunService(serviceinfo *ServiceInfo, handler ServiceHandler) error {\n\taddress, err := GetRegistryAddress()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconnection, err := net.DialTCP(TCP_PROTOCOL, nil, address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer connection.Close()\n\n\tlistener, err := net.ListenTCP(TCP_PROTOCOL, TCP_ANY_ADDR)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer listener.Close()\n\n\taddress, err = net.ResolveTCPAddr(TCP_PROTOCOL, listener.Addr().String())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbytes, err := json.Marshal(ServiceInfoAddress{strconv.Itoa(address.Port), *serviceinfo})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = connection.Write(bytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\tconnection, err := listener.AcceptTCP()\n\t\tif err == nil {\n\t\t\tgo handleServiceConnection(connection, handler)\n\t\t}\n\t}\n\n\treturn nil\n}", "func startHTTPServer(ch chan<- bool) {\n\tserver := http.Server{\n\t\tAddr: \":80\",\n\t}\n\tlog.Println(\"HTTP server started (listening on port 80).\")\n\tlog.Println(\"HTTP server stopped with error:\", server.ListenAndServe())\n\tch <- true\n}", "func (s * Service)Start(port string) {\n\tr := chi.NewRouter()\n\n\tr.Use(middleware.Logger)\n\tr.Use(middleware.Timeout(5 * time.Second))\n\n\trpc := rpc.RPC{\n\t\tApp: &app.App{},\n\t}\n\tif len(port) == 0 {\n\t\t// default port 3000\n\t\tport = \"3000\"\n\t}\n\tr.Post(\"/generate_pricing\", rpc.GeneratePricing)\n\tr.Get(\"/generate_pricing\", rpc.GeneratePricingConfig)\n\ts.ListenAndServe(\":\"+port, r)\n}", "func (b *Bot) Start(stop <-chan struct{}) error {\n\tlog.Info(\"start the bot ...\")\n\n\t// Now start all of the components.\n\tif err := b.server.Start(stop); err != nil {\n\t\treturn err\n\t}\n\n\tb.waitForShutdown(stop)\n\n\tlog.Info(\"bot started\")\n\treturn nil\n}", "func (d *Driver) Start() error {\n\tclient, err := d.getClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn client.StartVirtualMachine(d.vmName())\n}", "func StartServer() {\n\tif server == nil {\n\t\tGetInstance()\n\t}\n\n\tlog.Println(\"starting server on http://localhost\" + defaultPort)\n\tserver.Run(defaultPort)\n}", "func StartService(ctx context.Context, opener entroq.BackendOpener) (*grpc.Server, Dialer, error) {\n\tlis := bufconn.Listen(bufSize)\n\tsvc, err := qsvc.New(ctx, opener)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"start service: %w\", err)\n\t}\n\ts := grpc.NewServer()\n\thpb.RegisterHealthServer(s, health.NewServer())\n\tpb.RegisterEntroQServer(s, svc)\n\tgo s.Serve(lis)\n\n\treturn s, lis.Dial, nil\n}", "func StartService() {\n\tlogger, err := zap.NewProduction()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer func() {\n\t\t_ = logger.Sync()\n\t}()\n\n\terr = setupDependencies(logger)\n\tif err != nil {\n\t\tlogger.Fatal(\"Failed to setup dependecies\", zap.Error(err))\n\t}\n\n\thttpsTransportService, err := https.NewTransportService(\n\t\tlogger,\n\t\tconfigurationService,\n\t\tendpointCreatorService,\n\t\tmiddlewareProviderService)\n\tif err != nil {\n\t\tlogger.Fatal(\"Failed to create GraphQL transport service\", zap.Error(err))\n\t}\n\n\tsignalChan := make(chan os.Signal, 1)\n\tcleanupDone := make(chan struct{})\n\tsignal.Notify(signalChan, os.Interrupt)\n\n\tgo func() {\n\t\tif serviceErr := httpsTransportService.Start(); serviceErr != nil {\n\t\t\tlogger.Fatal(\"Failed to start HTTPS transport service\", zap.Error(serviceErr))\n\t\t}\n\t}()\n\n\tgo func() {\n\t\t<-signalChan\n\t\tlogger.Info(\"Received an interrupt, stopping services...\")\n\n\t\tif err := httpsTransportService.Stop(); err != nil {\n\t\t\tlogger.Error(\"Failed to stop HTTPS transport service\", zap.Error(err))\n\t\t}\n\n\t\tclose(cleanupDone)\n\t}()\n\t<-cleanupDone\n}", "func (server *TempestServer) Start() {\n\tif server.running {\n\t\treturn\n\t}\n\n\tserver.running = true\n\n\tserver.serverWaitGroup.Add(1)\n\tgo server.startServer()\n\tserver.serverWaitGroup.Wait()\n}", "func start() error {\n\tcfg, err := config.NewConfig()\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"get config: %w\", err)\n\t}\n\n\trestclientCfg, apiShutdown, err := k8sapiserver.StartAPIServer(cfg.EtcdURL)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"start API server: %w\", err)\n\t}\n\tdefer apiShutdown()\n\n\tclient := clientset.NewForConfigOrDie(restclientCfg)\n\n\tpvshutdown, err := pvcontroller.StartPersistentVolumeController(client)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"start pv controller: %w\", err)\n\t}\n\tdefer pvshutdown()\n\n\tsched := scheduler.NewSchedulerService(client, restclientCfg)\n\n\tsc, err := defaultconfig.DefaultSchedulerConfig()\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"create scheduler config\")\n\t}\n\n\tif err := sched.StartScheduler(sc); err != nil {\n\t\treturn xerrors.Errorf(\"start scheduler: %w\", err)\n\t}\n\tdefer sched.ShutdownScheduler()\n\n\terr = scenario(client)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"start scenario: %w\", err)\n\t}\n\n\treturn nil\n}", "func (s *LocalService) Start() error {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, route := range s.routes {\n\t\tpath := filepath.Join(s.chroot, route.Username)\n\t\tif err := watcher.Add(path); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.watcher = watcher\n\n\tgo s.watch()\n\n\treturn nil\n}", "func StartControllerService() {\n\tserviceName := common.ControllerServiceName\n\tcfg := common.SetupServerConfig(configure.NewCommonConfigure())\n\tif e := os.Setenv(\"port\", fmt.Sprintf(\"%d\", cfg.GetServiceConfig(serviceName).GetPort())); e != nil {\n\t\tlog.Panic(e)\n\t}\n\n\tmeta, err := metadata.NewCassandraMetadataService(cfg.GetMetadataConfig())\n\tif err != nil {\n\t\t// no metadata service - just fail early\n\t\tlog.WithField(common.TagErr, err).Fatal(`unable to instantiate metadata service (did you run ./scripts/setup_cassandra_schema.sh?)`)\n\t}\n\thwInfoReader := common.NewHostHardwareInfoReader(meta)\n\treporter := common.NewMetricReporterWithHostname(cfg.GetServiceConfig(serviceName))\n\tdClient := dconfigclient.NewDconfigClient(cfg.GetServiceConfig(serviceName), serviceName)\n\tsVice := common.NewService(serviceName, uuid.New(), cfg.GetServiceConfig(serviceName), common.NewUUIDResolver(meta), hwInfoReader, reporter, dClient, common.NewBypassAuthManager())\n\tmcp, tc := controllerhost.NewController(cfg, sVice, meta, common.NewDummyZoneFailoverManager())\n\tmcp.Start(tc)\n\tcommon.ServiceLoop(cfg.GetServiceConfig(serviceName).GetPort()+diagnosticPortOffset, cfg, mcp.Service)\n}", "func StartHTTPServer() chan struct{} {\n\thttpDone := make(chan struct{}, 1)\n\tgo func() {\n\t\tcfg := struct {\n\t\t\tHTTPListenPort string `mapstructure:\"httpListenPort\"`\n\t\t}{}\n\t\tapplyConfig(\"\", &cfg)\n\n\t\tloginfof(\"Starting http listen server on :%s\", cfg.HTTPListenPort)\n\t\tif err := http.ListenAndServe(\":\"+cfg.HTTPListenPort, nil); err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\thttpDone <- struct{}{}\n\t}()\n\treturn httpDone\n}", "func (d *Driver) Start() error {\n\tcs := d.client()\n\t_, err := cs.AsyncRequest(&egoscale.StartVirtualMachine{\n\t\tID: d.ID,\n\t}, d.async)\n\n\treturn err\n}", "func Start() error {\n\tvar service Service\n\n\tservice.router = InitRouter()\n\tservice.RegisterRoutes()\n\n\treturn service.router.Run(\":8080\")\n}" ]
[ "0.65938157", "0.65938157", "0.65253013", "0.63578004", "0.63578004", "0.63268375", "0.6300758", "0.6254333", "0.6249277", "0.62008077", "0.6186681", "0.61138135", "0.6103919", "0.60101557", "0.6006752", "0.5996253", "0.59815216", "0.596186", "0.5913349", "0.5891949", "0.58751696", "0.5874221", "0.5873712", "0.5832628", "0.5798888", "0.5798888", "0.57734156", "0.577121", "0.57684267", "0.5760323", "0.57325953", "0.5721237", "0.5712176", "0.57081974", "0.57066774", "0.57019943", "0.56869763", "0.5671978", "0.56538475", "0.5651428", "0.5650993", "0.56385946", "0.56006277", "0.559885", "0.55773294", "0.55735284", "0.5573453", "0.55598885", "0.5558409", "0.55465", "0.55432373", "0.5541942", "0.5532239", "0.55245763", "0.55112725", "0.55082595", "0.5500279", "0.549475", "0.54833513", "0.5482305", "0.5479315", "0.54766953", "0.5475253", "0.54746467", "0.5467663", "0.54675364", "0.5457162", "0.545079", "0.54470515", "0.54467976", "0.5444495", "0.54356915", "0.54342395", "0.5432754", "0.5430014", "0.54297423", "0.542744", "0.54261315", "0.54211855", "0.5417305", "0.54162264", "0.540503", "0.5399878", "0.5399812", "0.5395128", "0.5394582", "0.5391496", "0.5387302", "0.5376532", "0.53752327", "0.5371384", "0.53670377", "0.5364234", "0.5359883", "0.5352933", "0.5345683", "0.5345256", "0.5339273", "0.53381175", "0.5334798" ]
0.79222965
0
StopService stops selenium server
func StopService() { seleniumService.Stop() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func StopService() {\n\tserver.Stop()\n}", "func (n *Node) StopService() {\n\tif n.stopServer == nil {\n\t\treturn\n\t}\n\tn.stopServer()\n\tn.stopServer = nil\n}", "func (s *Service) StopService() {\n\tutils.Logger().Info().Msg(\"Shutting down explorer service.\")\n\tif err := s.server.Shutdown(context.Background()); err != nil {\n\t\tutils.Logger().Error().Err(err).Msg(\"Error when shutting down explorer server\")\n\t} else {\n\t\tutils.Logger().Info().Msg(\"Shutting down explorer server successufully\")\n\t}\n}", "func (s *Service) StopService() {\n\tutils.Logger().Info().Msg(\"Shutting down explorer service.\")\n\tif err := s.server.Shutdown(context.Background()); err != nil {\n\t\tutils.Logger().Error().Err(err).Msg(\"Error when shutting down explorer server\")\n\t} else {\n\t\tutils.Logger().Info().Msg(\"Shutting down explorer server successufully\")\n\t}\n}", "func Stop() {\n\tdriver.Stop()\n}", "func (m *mockService) Stop() {\n\t// m.ctrl.Finish() calls runtime.Goexit() on errors\n\t// put it in defer so cleanup is always done\n\tdefer func() {\n\t\tm.server.Shutdown()\n\t\tm.started = false\n\t}()\n\tm.ctrl.Finish()\n}", "func (htmlServer *HTMLServer) Stop() error {\n\n\tconst timeout = 5 * time.Second\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\n\tlog.Println(Detail(\"SERVER : Service stopping.\"))\n\n\tif e := htmlServer.server.Shutdown(ctx); e != nil {\n\n\t\tif e := htmlServer.server.Close(); e != nil {\n\t\t\tlog.Printf(Warn(\"SERVER : Service stopping : Error=%s\"), e)\n\t\t\treturn e\n\t\t}\n\t}\n\n\thtmlServer.wg.Wait()\n\tlog.Println(Detail(\"SERVER : Stopped\"))\n\treturn nil\n}", "func (i *ServiceInitializer) StopService(s turbo.Servable) {\n}", "func (s JSONHTTPServer) StopService() {\n\tlog.Debug(\"Stopping json-http service...\")\n\ts.stop <- true\n}", "func (s JSONHTTPServer) StopService() {\n\tlog.Debug(\"Stopping json-http service...\")\n\ts.stop <- true\n}", "func (srv *Server) Stop() {\n Warn(fmt.Sprintf(\"stopping server %s\", srv.addrURL.String()))\n srv.mu.Lock()\n if srv.httpServer == nil {\n srv.mu.Unlock()\n return\n }\n graceTimeOut := time.Duration(50)\n ctx, cancel := context.WithTimeout(context.Background(), graceTimeOut)\n defer cancel()\n if err := srv.httpServer.Shutdown(ctx); err != nil {\n Debug(\"Wait is over due to error\")\n if err := srv.httpServer.Close(); err != nil {\n Debug(err.Error())\n }\n Debug(err.Error())\n }\n close(srv.stopc)\n <-srv.donec\n srv.mu.Unlock()\n Warn(fmt.Sprintf(\"stopped server %s\", srv.addrURL.String()))\n}", "func StopTestServer(server *http.Server) error {\n\terror := server.Shutdown(nil)\n\tif error != nil {\n\t\tfmt.Printf(\"Could not stop test websocket server in test_utils: %s\", error.Error())\n\t\treturn error\n\t}\n\treturn nil\n}", "func stopServer(w http.ResponseWriter, r *http.Request) {\n\tgo localServer.Shutdown(context.Background())\n}", "func (s * Service)Stop() {\n\tlog.Println(\"Stopping Server!\")\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\tif s.Server != nil {\n\t\tlog.Println(\" Initiating Server Shutdown!\")\n\t\tif err := s.Server.Shutdown(ctx); err != nil {\n\t\t\t// handle err\n\t\t\tlog.Println(\"Error while stopping Server!\", err)\n\t\t}\n\t}\n}", "func (serv *webService) Stop(ctx context.Context) error {\n\tvar err error\n\tif serv.Server != nil {\n\t\terr = serv.Server.Shutdown(ctx)\n\t\tserv.Server = nil\n\t}\n\tif serv.RPC != nil {\n\t\terr2 := serv.RPC.Stop()\n\t\tif err2 != nil {\n\t\t\terr = err2\n\t\t}\n\t}\n\treturn err\n}", "func (h *Harness) Stop() {\n\tfin := make(chan struct{})\n\tgo func() {\n\t\tdefer close(fin)\n\t\tif err := h.Client.Close(); err != nil {\n\t\t\th.t.Fatal(err)\n\t\t}\n\n\t\tif err := h.cmd.Process.Kill(); err != nil {\n\t\t\th.t.Fatal(err)\n\t\t}\n\n\t\tif err := os.Remove(h.configPath); err != nil {\n\t\t\th.t.Fatal(err)\n\t\t}\n\t}()\n\tselect {\n\tcase <-fin:\n\tcase <-time.After(h.StopTimeout):\n\t}\n}", "func (s *Server) Stop() {\n\tclose(s.quit)\n\ts.listener.Close()\n\ts.eventLogger.Info(uint32(windows.NO_ERROR), fmt.Sprintf(\"remove all %+v\", s.proxy.portMappings))\n\tif err := s.proxy.removeAll(); err != nil {\n\t\ts.eventLogger.Warning(uint32(windows.ERROR_EXCEPTION_IN_SERVICE), err.Error())\n\t}\n\ts.stopped = true\n}", "func (m *DevicePluginStub) Stop() error {\n\tglog.V(2).Infof(\"Stopping server %s\", m.SocketName())\n\n\tm.server.Stop()\n\tclose(m.stop)\n\n\tif err := m.waitTimeout(); err != nil {\n\t\treturn err\n\t}\n\n\treturn m.cleanup()\n}", "func (s Server) stop(ctx context.Context) {\n\ts.grpcServer.Stop()\n\terr := s.httpServer.Shutdown(ctx)\n\tif err != nil {\n\t\tlog.Err(err).Msg(\"error shutting down the http server\")\n\t}\n}", "func (srv *Server) Stop() error {\n\tif err := srv.app.Shutdown(); err != nil {\n\t\treturn err\n\t}\n\tif err := srv.config.StorageDriver.Disconnect(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func stopServer(grpcServer *grpc.Server, v *Vibranium) error {\n\ttime.Sleep(1 * time.Second) // to aviod \"read: connection reset by peer\"\n\tdefer time.Sleep(1 * time.Second) // to aviod \"bind error\"\n\tgrpcServer.GracefulStop()\n\tlog.Info(\"gRPC server stopped gracefully.\")\n\n\tlog.Info(\"Now check if cluster still have running tasks...\")\n\twait := make(chan interface{})\n\tgo func() {\n\t\tv.Wait()\n\t\twait <- \"\"\n\t}()\n\ttimer := time.NewTimer(time.Second * 30)\n\tselect {\n\tcase <-timer.C:\n\t\t// force quit(terminate all running tasks/goroutines)\n\t\tfor {\n\t\t\tif v.TaskNum == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tv.taskDone(\"\", false)\n\t\t}\n\t\tlog.Info(\"Cluster stopped FORCEFULLY\")\n\tcase <-wait:\n\t\tlog.Info(\"Cluster stopped gracefully\")\n\t}\n\treturn nil\n}", "func StopService(service string) error {\n\treturn doService(service, \"stop\")\n}", "func (sv *Unit) Stop() (err error) {\n\tif cmd := strings.Fields(sv.Definition.Service.ExecStop); len(cmd) > 0 {\n\t\treturn exec.Command(cmd[0], cmd[1:]...).Run()\n\t}\n\tif sv.Cmd.Process != nil {\n\t\treturn sv.Cmd.Process.Kill()\n\t}\n\treturn nil\n}", "func (d *Driver) Stop() error {\n\tclient, err := d.getClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn client.ShutdownVirtualMachine(d.vmName(), true)\n}", "func Stop() {\n\t// /bin/dbus-send --system --dest=org.ganesha.nfsd --type=method_call /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.shutdown\n}", "func (service *APIService) Stop() {\n\tservice.GrpcServer.GracefulStop()\n\tservice.HttpServer.Close()\n}", "func (ms *MarvinServer) Stop() {\n\n}", "func (s *ServiceManager) StopService(id string) error {\n\tfor name, service := range s.services {\n\t\tif name == id && service != nil {\n\t\t\tservice.Stop()\n\t\t\ts.services[name] = nil\n\t\t\tclose(s.chs[name])\n\t\t}\n\t}\n\treturn nil\n}", "func StopWebdriver() {\n\tif driver == nil {\n\t\tginkgo.Fail(\"WebDriver not started\", 1)\n\t}\n\tdriver.Stop()\n\tdriver = nil\n}", "func (s *LocalService) Stop() error {\n\t// This is really nasty but it works\n\t// Wait for the channels to be drained\n\tfor {\n\t\twriteNotifications := reflect.ValueOf(s.writeNotifications)\n\n\t\tif writeNotifications.Len() == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\n\tclose(s.writeNotifications)\n\n\tif err := s.watcher.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *GenericGrpcServer) Stop() {\n\tif s.Listener != nil {\n\t\tlog.Infof(\"Stopping service at %s\", s.Listener.Addr())\n\t}\n\tif s.Server != nil {\n\t\ts.Server.GracefulStop()\n\t}\n}", "func (server *TempestServer) Stop() {\n\tserver.running = false\n\tserver.serverWaitGroup.Wait()\n}", "func (server *Server) Stop() {\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tif cancel != nil {\n\t\tserver.srv.Shutdown(ctx)\n\t\tserver.srv = nil\n\t}\n\tif server.hub != nil {\n\t\tserver.hub.stop()\n\t}\n}", "func (p *Pebble) Stop(t *testing.T) {\n\tstopCMD(t, p.pebbleCMD)\n\tstopCMD(t, p.challtestsrvCMD)\n}", "func Stop(server *TestDex) {\n\tserver.webServer.CloseClientConnections()\n\tserver.webServer.Close()\n}", "func Stop(r *registry.Registry) error {\n\treturn r.Server.Shutdown(context.Background())\n}", "func Test_WAIS_stopSystemTest(t *testing.T) {\n\terr := deleteScenario(\"wais-system-test\")\n\tif err != nil {\n\t\tlog.Error(\"cannot delete scenario :\", err)\n\t}\n}", "func (d *Driver) Stop() error {\n\tif err := d.verifyRootPermissions(); err != nil {\n\t\treturn err\n\t}\n\n\ts, err := d.GetState()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif s != state.Stopped {\n\t\terr := d.sendSignal(syscall.SIGTERM)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"hyperkit sigterm failed\")\n\t\t}\n\t\t// wait 120s for graceful shutdown\n\t\tfor i := 0; i < 60; i++ {\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t\ts, _ := d.GetState()\n\t\t\tlog.Debugf(\"VM state: %s\", s)\n\t\t\tif s == state.Stopped {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn errors.New(\"VM Failed to gracefully shutdown, try the kill command\")\n\t}\n\treturn nil\n}", "func (s *RegistryServer) Stop() error {\n\treturn s.listener.Close()\n}", "func (tm *ServiceTracerouteManager) Stop() {\n\ttm.StopChan <- true\n}", "func (s *Server) Stop() {\n\ts.cmd.Process.Kill()\n\tos.RemoveAll(s.DataDir)\n}", "func (s *DeviceService) Stop(force bool) {\n\tif s.initialized {\n\t\t_ = s.driver.Stop(false)\n\t}\n\tautoevent.GetManager().StopAutoEvents()\n}", "func (s *Server) Stop() error {\n\n\tizap.Logger.Info(\"Stopping http server\", zap.String(\"address\", s.srv.Addr))\n\treturn s.srv.Shutdown(context.Background())\n}", "func (a *Agent) Stop() {\n\tif a.ha != nil {\n\t\tif err := a.ha.Stop(); err != nil {\n\t\t\tlevel.Error(a.logger).Log(\"msg\", \"failed to stop scraping service server\", \"err\", err)\n\t\t}\n\t}\n\ta.cm.Stop()\n}", "func (proxy *proxyService) Stop() error {\n\tproxy.logger.Infof(\"Stopping service\")\n\tproxy.done = true\n\treturn proxy.listener.Close()\n}", "func (m *WebsocketRoutineManager) Stop() error {\n\tif m == nil {\n\t\treturn fmt.Errorf(\"websocket routine manager %w\", ErrNilSubsystem)\n\t}\n\n\tm.mu.Lock()\n\tif atomic.LoadInt32(&m.state) == stoppedState {\n\t\tm.mu.Unlock()\n\t\treturn fmt.Errorf(\"websocket routine manager %w\", ErrSubSystemNotStarted)\n\t}\n\tatomic.StoreInt32(&m.state, stoppedState)\n\tm.mu.Unlock()\n\n\tclose(m.shutdown)\n\tm.wg.Wait()\n\n\treturn nil\n}", "func (srv *RegistryServer) Stop() {\n\tif srv.server != nil {\n\t\tfmt.Printf(\"Stopping registration server..\")\n\t\tsrv.server.Stop()\n\t}\n}", "func (w *Webserver) Stop() error {\n\tw.logger.Infof(\"gracefully shutting down http server at %d...\", w.config.Port)\n\n\terr := w.Server.Shutdown(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclose(w.jobs)\n\treturn nil\n}", "func cmdStop() {\n\tswitch state := status(B2D.VM); state {\n\tcase vmUnregistered:\n\t\tlog.Fatalf(\"%s is not registered.\", B2D.VM)\n\tcase vmRunning:\n\t\tlog.Printf(\"Shutting down %s...\", B2D.VM)\n\t\tif err := vbm(\"controlvm\", B2D.VM, \"acpipowerbutton\"); err != nil {\n\t\t\tlog.Fatalf(\"failed to shutdown vm: %s\", err)\n\t\t}\n\t\tfor status(B2D.VM) == vmRunning {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\tdefault:\n\t\tlog.Printf(\"%s is not running.\", B2D.VM)\n\t}\n}", "func (d *Driver) Stop() error {\n\tcs := d.client()\n\t_, err := cs.AsyncRequest(&egoscale.StopVirtualMachine{\n\t\tID: d.ID,\n\t}, d.async)\n\n\treturn err\n}", "func (hsp HistoryServicePrecacher) Stop() { hsp.pc.Stop() }", "func (s *MockMetricsServer) Stop() {\n\t_ = s.e.Close()\n}", "func (srv *Server) Stop() {\n\terr := srv.httpServer.Shutdown(context.Background())\n\tif err != nil {\n\t\tsrv.log.Errorf(\"Unexpected error while shutting down HTTP server - %s\", err)\n\t}\n\tdefer srv.runCancel()\n}", "func (s *server) Stop() error {\n\t// Make sure this only happens once.\n\tif atomic.AddInt32(&s.shutdown, 1) != 1 {\n\t\tlogging.CPrint(logging.INFO, \"server is already in the process of shutting down\", logging.LogFormat{})\n\t\treturn nil\n\t}\n\n\ts.syncManager.Stop()\n\n\t// Signal the remaining goroutines to quit.\n\tclose(s.quit)\n\n\ts.wg.Done()\n\n\treturn nil\n}", "func runStop(cmd *types.Command, args []string) {\n\tif stopHelp {\n\t\tcmd.PrintUsage()\n\t}\n\tif len(args) < 1 {\n\t\tcmd.PrintShortUsage()\n\t}\n\n\thasError := false\n\tfor _, needle := range args {\n\t\tserverID := cmd.API.GetServerID(needle)\n\t\taction := \"poweroff\"\n\t\tif stopT {\n\t\t\taction = \"terminate\"\n\t\t}\n\t\terr := cmd.API.PostServerAction(serverID, action)\n\t\tif err != nil {\n\t\t\tif err.Error() != \"server should be running\" && err.Error() != \"server is being stopped or rebooted\" {\n\t\t\t\tlog.Warningf(\"failed to stop server %s: %s\", serverID, err)\n\t\t\t\thasError = true\n\t\t\t}\n\t\t} else {\n\t\t\tif stopW {\n\t\t\t\t// We wait for 10 seconds which is the minimal amount of time needed for a server to stop\n\t\t\t\ttime.Sleep(10 * time.Second)\n\t\t\t\t_, err = api.WaitForServerStopped(cmd.API, serverID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"failed to wait for server %s: %v\", serverID, err)\n\t\t\t\t\thasError = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Println(needle)\n\t\t}\n\t}\n\n\tif hasError {\n\t\tos.Exit(1)\n\t}\n}", "func (s *Server) Stop() error {\n\t// Stop operations server\n\terr := s.Operations.Stop()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif s.listener == nil {\n\t\treturn nil\n\t}\n\n\t_, port, err := net.SplitHostPort(s.listener.Addr().String())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = s.closeListener()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif s.wait == nil {\n\t\treturn nil\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\tselect {\n\t\tcase <-s.wait:\n\t\t\tlog.Debugf(\"Stop: successful stop on port %s\", port)\n\t\t\tclose(s.wait)\n\t\t\ts.wait = nil\n\t\t\treturn nil\n\t\tdefault:\n\t\t\tlog.Debugf(\"Stop: waiting for listener on port %s to stop\", port)\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}\n\tlog.Debugf(\"Stop: timed out waiting for stop notification for port %s\", port)\n\t// make sure DB is closed\n\terr = s.closeDB()\n\tif err != nil {\n\t\tlog.Errorf(\"Close DB failed: %s\", err)\n\t}\n\n\treturn nil\n}", "func (srv *Server) Stop() {\n\tsrv.s.Close()\n\tsrv.wg.Wait()\n}", "func (s *Service) Stop() {\n\ts.s.Shutdown()\n}", "func (s *server) Stop() {\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\ts.httpServer.SetKeepAlivesEnabled(false)\n\terr := s.httpServer.Shutdown(ctx)\n\tif err != nil {\n\t\ts.logger.Fatalf(\"could not gracefully shutdown the server: %v\\n\", err)\n\t}\n\n\t<-s.done\n}", "func (f *feeService) stop() {\n\tif err := f.srv.Shutdown(context.Background()); err != nil {\n\t\tfmt.Printf(\"error: cannot stop fee api: %v\", err)\n\t}\n\n\tf.wg.Wait()\n}", "func (t *TcpServer) Stop() {\n\tt.isRunning = false\n}", "func (wsServer *WsServer) Stop() {\n\tif atomic.AddInt32(&wsServer.shutdown, 1) != 1 {\n\t\tLogger.log.Info(\"RPC server is already in the process of shutting down\")\n\t}\n\tLogger.log.Info(\"RPC server shutting down\")\n\tif wsServer.started != 0 {\n\t\twsServer.server.Close()\n\t}\n\tfor _, listen := range wsServer.config.HttpListenters {\n\t\tlisten.Close()\n\t}\n\tLogger.log.Warn(\"RPC server shutdown complete\")\n\twsServer.started = 0\n\twsServer.shutdown = 1\n}", "func (s *Service) Stop() {\n\tclose(s.stopChan)\n}", "func (w *Web) Stop() error {\n\tw.L(\"Stopping web server on %s:%s\", w.Address, w.Port)\n\tctx, cancel := context.WithTimeout(context.Background(), nonZeroDuration(w.Timeouts.Shutdown, time.Second*30))\n\tdefer cancel()\n\terr := w.Shutdown(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.running = false\n\treturn nil\n}", "func (service *Service) Stop() {\n\tlog.Info(\"close TaskService size:\")\n\tif err := service.kafka.Stop(); err != nil {\n\t\tpanic(err)\n\t}\n\t<-service.stopped\n\t_ = service.clickhouse.Close()\n\tlog.Info(\"closed TaskService size:\")\n}", "func (r *server) Stop() {\n\t// TODO: pass context in as a parameter.\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\tif err := r.stopHTTPServers(ctx); err != nil {\n\t\tlog.WithError(err).Error(\"Some HTTP servers failed to shutdown.\")\n\t}\n\n\tr.Server.Stop()\n}", "func shutDown(ctx context.Context, logger *log.Logger, srv *http.Server) {\n\tquit := make(chan os.Signal, 1)\n\tsignal.Notify(quit, os.Interrupt)\n\t<-quit\n\n\tlogger.Info(\"msg\", \"Shutting down HTTP/REST gateway server...\")\n\n\tctx, cancel := context.WithTimeout(ctx, 5*time.Second)\n\tdefer cancel()\n\n\tif err := srv.Shutdown(ctx); err != nil {\n\t\tlogger.Error(\"err\", fmt.Sprintf(\"Shutdown HTTP/REST gateway server: %s\", err.Error()))\n\t}\n\n\tlogger.Info(\"msg\", \"Shutdown done HTTP/REST gateway server\")\n}", "func (s *daemonServer) Stop() {\n\ts.grpcServer.Stop()\n}", "func (app *App) Stop() error {\n\t// Close already uploaded file tracker\n\tapp.Logger.Debug(\"Shutting down File Tracker service...\")\n\tif err := app.FileTracker.Close(); err != nil {\n\t\treturn err\n\t}\n\n\t// Close upload session tracker\n\tapp.Logger.Debug(\"Shutting down Upload Tracker service...\")\n\tif err := app.UploadTracker.Close(); err != nil {\n\t\treturn err\n\t}\n\n\t// Close token manager\n\tapp.Logger.Debug(\"Shutting down Token Manager service...\")\n\tif err := app.TokenManager.Close(); err != nil {\n\t\treturn err\n\t}\n\n\tapp.Logger.Debug(\"All services has been shut down successfully\")\n\treturn nil\n}", "func (ts *Server) Stop() error {\n\tif ts.Server == nil {\n\t\treturn nil\n\t}\n\tif err := ts.Server.Shutdown(context.Background()); err != nil {\n\t\treturn err\n\t}\n\tts.Server = nil\n\treturn nil\n}", "func Stop(args ...string) {\n switch {\n case cfg.Kill:\n Kill(args...)\n default:\n runInstances(\"Stopping\", func(i int, id string) error {\n defer os.Remove(pidFileName(i))\n return run(\"stop\", id)\n })\n }\n}", "func (c *Controller) Stop() {\n\tglog.Info(\"shutdown http service\")\n}", "func (s *RepService) Stop() {\n\ts.running = false\n}", "func TearDownSuite(suiteCtx *types.SuiteContext) {\n\tBy(\"tearing down the test environment\")\n\n\tselenium.RemoveSeleniumIfNeeded(suiteCtx)\n\n\terr := suiteCtx.TestEnv.Stop()\n\tExpect(err).ToNot(HaveOccurred())\n}", "func (service *HTTPRestService) Stop() {\n\tservice.Uninitialize()\n\tlog.Printf(\"[Azure CNS] Service stopped.\")\n}", "func (s *Service) Stop() error {\n\treturn s.client.Close()\n}", "func (k *k8sService) Stop() {\n\t// prevent other Start/Stop operations until we are done\n\tk.startStopMutex.Lock()\n\tdefer k.startStopMutex.Unlock()\n\n\t// Protect state access from other go-routines\n\tk.Lock()\n\tif !k.running {\n\t\tk.Unlock()\n\t\treturn\n\t}\n\tlog.Infof(\"Stopping k8s service\")\n\n\tk.running = false\n\tk.isLeader = false\n\tif k.modCh != nil {\n\t\tclose(k.modCh)\n\t}\n\tif k.cancel != nil {\n\t\tk.cancel()\n\t}\n\n\t// release lock so that goroutines can make progress and terminate cleanly\n\tk.Unlock()\n\n\t// Wait for goroutines to terminate\n\tk.Wait()\n\n\tk.client = nil\n\tk.strClient = nil\n\tk.cancel = nil\n\tk.modCh = nil\n}", "func (server *Server) stop() {\n\tfor address, connection := range server.connections {\n\t\tif server.breakConnection(connection) {\n\t\t\tserver.Logger.Info(\"Close connection at\", address)\n\t\t} else {\n\t\t\tserver.Logger.Warning(\"Impossible to close connection at\", address)\n\t\t}\n\t}\n\tif server.tcp_socket != nil {\n//\t\tfor conn_type, socket := range server.tcp_socket {\n//\t\t\terr := socket.Close()\n//\t\t\tif err != nil {\n//\t\t\t\tserver.Logger.Error(\"Error occured during closing \" + conn_type + \" socket:\", err)\n//\t\t\t}\n//\t\t}\n\t\terr := server.tcp_socket.Close()\n\t\tif err != nil {\n\t\t\tserver.Logger.Error(\"Error occured during closing \" + \"tcp\" + \" socket:\", err)\n\t\t}\n\t\tserver.tcp_socket = nil\n\t} else {\n\t\tserver.Logger.Error(\"Server can't be stoped, because socket is undefined.\")\n\t}\n\tserver.Logger.Info(\"Waiting for ending process of goroutines...\")\n\tserver.Wait()\n\tserver.storage.FlushAll()\n}", "func (s *SystemService) Stop() error {\n\terr := s.control(svc.Stop, svc.Stopped)\n\tif err != nil {\n\t\te := err.Error()\n\t\tif strings.Contains(e, \"service does not exist\") {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tattempt := 0\n\tmaxAttempts := 10\n\twait := 3 * time.Second\n\tfor {\n\t\tattempt++\n\n\t\tlogger.Log(\"waiting for service to stop\")\n\n\t\t// // Wait a few seconds before retrying.\n\t\ttime.Sleep(wait)\n\n\t\t// // Attempt to start the service again.\n\t\tstat, err := s.Status()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// // Check the status to see if it is running yet.\n\t\t// stat, err := system.Service.Status()\n\t\t// if err != nil {\n\t\t// \texit(err, stop)\n\t\t// }\n\n\t\t// // If it is now running, exit the retry loop.\n\t\tif !stat.Running {\n\t\t\tbreak\n\t\t}\n\n\t\tif attempt == maxAttempts {\n\t\t\treturn errors.New(\"could not stop system service after multiple attempts\")\n\t\t}\n\t}\n\n\treturn nil\n\t// _, err := runScCommand(\"stop\", fmt.Sprintf(\"\\\"%s\\\"\", s.Command.Name))\n\n\t// if err != nil {\n\t// \tlogger.Log(\"stop service error: \", err)\n\n\t// \tif strings.Contains(err.Error(), \"exit status 1062\") {\n\t// \t\tlogger.Log(\"service already stopped\")\n\t// \t} else {\n\t// \t\treturn err\n\t// \t}\n\t// }\n\n\t// return nil\n}", "func (s *Service) Stop() {\n\tclose(s.ch)\n\ts.waitGroup.Wait()\n}", "func (b *Bot) Stop() {\n\tb.serversProtect.RLock()\n\tfor _, srv := range b.servers {\n\t\tb.stopServer(srv)\n\t}\n\tb.serversProtect.RUnlock()\n}", "func (s *Server) Stop() {\n\tclose(s.stopChan)\n\tfor _, l := range s.listeners {\n\t\tl.Stop()\n\t}\n\tif s.Statistics != nil {\n\t\ts.Statistics.Stop()\n\t}\n\ts.health.Deregister() //nolint:errcheck\n\ts.Started = false\n}", "func (hSvr *HTTPServer) Stop(ctx context.Context) error {\n\treturn hSvr.svr.Shutdown(ctx)\n}", "func (ms *MockOpenIDDiscoveryServer) Stop() error {\n\tms.HitNum = 0\n\treturn ms.server.Close()\n}", "func (o *Object) StopHttpServer() {\n\t_ = o.server.Shutdown(context.Background())\n}", "func (ms *MockServer) Stop() error {\n\tif ms.server == nil {\n\t\treturn nil\n\t}\n\n\treturn ms.server.Close()\n}", "func (m *ntpManager) Stop() error {\n\tif m == nil {\n\t\treturn fmt.Errorf(\"ntp manager %w\", ErrNilSubsystem)\n\t}\n\tif atomic.LoadInt32(&m.started) == 0 {\n\t\treturn fmt.Errorf(\"NTP manager %w\", ErrSubSystemNotStarted)\n\t}\n\tdefer func() {\n\t\tlog.Debugf(log.TimeMgr, \"NTP manager %s\", MsgSubSystemShutdown)\n\t\tatomic.CompareAndSwapInt32(&m.started, 1, 0)\n\t}()\n\tlog.Debugf(log.TimeMgr, \"NTP manager %s\", MsgSubSystemShuttingDown)\n\tclose(m.shutdown)\n\treturn nil\n}", "func (a API) Stop(ctx context.Context) error {\n\treturn a.srv.Shutdown(ctx)\n}", "func (s SystemdInitSystem) DisableAndStopService() error {\n\tenabled, err := s.isEnabled()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error checking if etcd service is enabled: %w\", err)\n\t}\n\tif enabled {\n\t\tif err := s.disable(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tactive, err := s.IsActive()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error checking if etcd service is active: %w\", err)\n\t}\n\n\tif active {\n\t\tif err := s.stop(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *Server) Stop(ctx context.Context) {\n\ts.shutdownFuncsM.Lock()\n\tdefer s.shutdownFuncsM.Unlock()\n\ts.shutdownOnce.Do(func() {\n\t\tclose(s.shuttingDown)\n\t\t// Shut down the HTTP server in parallel to calling any custom shutdown functions\n\t\twg := sync.WaitGroup{}\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tif err := s.srv.Shutdown(ctx); err != nil {\n\t\t\t\tslog.Debug(ctx, \"Graceful shutdown failed; forcibly closing connections 👢\")\n\t\t\t\tif err := s.srv.Close(); err != nil {\n\t\t\t\t\tslog.Critical(ctx, \"Forceful shutdown failed, exiting 😱: %v\", err)\n\t\t\t\t\tpanic(err) // Something is super hosed here\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tfor _, f := range s.shutdownFuncs {\n\t\t\tf := f // capture range variable\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tf(ctx)\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\t})\n}", "func (r *AgentServer) Stop() {\n\tr.listener.Close()\n\tr.wg.Wait()\n}", "func Test_RNIS_stopSystemTest(t *testing.T) {\n\terr := deleteScenario(\"rnis-system-test\")\n\tif err != nil {\n\t\tlog.Error(\"cannot delete scenario :\", err)\n\t}\n}", "func (s *Server) OnStop() {}", "func (v *vtStopCrawler) stop() {\n\tfor _, worker := range v.workers {\n\t\tworker.stop()\n\t}\n\tclose(v.done)\n}", "func (p *PrivNegAPI) Stop() {\n\tif err := p.server.Shutdown(nil); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (this *SimulateLocationService) Stop() error {\n\tif _, err := this.service.GetConnection().Write([]byte{0x00, 0x00, 0x00, 0x01}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (svc *Service) Stop(s service.Service) (err error) {\n\tif svc.daemon.exited.Get() {\n\t\treturn nil\n\t}\n\tlog.Info(\"service is stopping. notifying agent process.\")\n\n\tsvc.daemon.Lock()\n\tdefer svc.daemon.Unlock()\n\n\tsvc.daemon.stopRequested.Set(true)\n\n\t// notify the agent to gracefully stop\n\twindows.PostNotificationMessage(windows.GetPipeName(svcName), ipc.Stop)\n\n\treturn svc.terminate(err)\n}", "func RemoteStop(host string, port string) error {\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", \"http://\"+host+\":\"+port+\"/stop\", nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Close = true\n\treq.Header.Set(\"Content-Type\", \"application/text\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tbs := string(body)\n\tfmt.Printf(\"checkme: %s\\n\", bs)\n\n\treturn nil\n}", "func StopDefaultServer() {\n\tDefaultServer.StopService()\n}", "func handleStop(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {\n\tselect {\n\tcase s.requestProcessShutdown <- struct{}{}:\n\tdefault:\n\t}\n\treturn \"kaspad stopping.\", nil\n}" ]
[ "0.70196897", "0.6680899", "0.66318434", "0.66318434", "0.66209775", "0.64933527", "0.6451682", "0.6430011", "0.6388342", "0.6388342", "0.6270665", "0.6192106", "0.61838084", "0.6182933", "0.61214626", "0.6117665", "0.60970193", "0.6061633", "0.60576785", "0.60330755", "0.60320735", "0.60318935", "0.60080284", "0.59994376", "0.5989082", "0.59443283", "0.59346735", "0.59337455", "0.5919091", "0.5915639", "0.59020025", "0.5901632", "0.5891067", "0.5864932", "0.5864047", "0.58559364", "0.5847781", "0.5847426", "0.5834038", "0.583301", "0.5809361", "0.5807444", "0.58051765", "0.5801658", "0.5800131", "0.57983994", "0.5796868", "0.5796411", "0.57937545", "0.5782394", "0.5781536", "0.5779614", "0.5771035", "0.5762901", "0.5762505", "0.5753524", "0.57528144", "0.57433695", "0.5737611", "0.57283306", "0.57255155", "0.5719444", "0.57142526", "0.57104015", "0.5708173", "0.57067513", "0.5704591", "0.5686042", "0.5683895", "0.5681114", "0.5680379", "0.56735843", "0.56645006", "0.56579846", "0.5657303", "0.56512374", "0.56415206", "0.5635641", "0.5631651", "0.56252104", "0.5623119", "0.5621463", "0.5619028", "0.5611796", "0.5611285", "0.56099755", "0.5609232", "0.56064343", "0.56018764", "0.5599246", "0.55977917", "0.55977297", "0.5595132", "0.5593844", "0.559082", "0.55848753", "0.5583347", "0.55762106", "0.5574213", "0.5570039" ]
0.8379308
0
NewDriver create new browser driver
func NewDriver(browser string) selenium.WebDriver { StartService() caps := selenium.Capabilities{"browserName": browser} switch browser { case "chrome": chrCaps := chrome.Capabilities{ Args: []string{ "--no-sandbox", }, W3C: true, } if headless { chrCaps.Args = append(chrCaps.Args, "--headless") } caps.AddChrome(chrCaps) case "htmlunit": caps["javascriptEnabled"] = true } wd, err := selenium.NewRemote(caps, fmt.Sprintf("http://localhost:%d/wd/hub", port)) if err != nil { } driver = wd return wd }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewDriver() godfish.Driver { return &driver{} }", "func newDriver() *driver {\n\treturn &driver{\n\t\tnetworks: map[string]*bridgeNetwork{},\n\t\tportAllocator: portallocator.Get(),\n\t}\n}", "func RegisterNewDriver(driver string, defaultscopes []string, callback func(client *http.Client, u *models.User), endpoint oauth2.Endpoint, apimap, usermap map[string]string) {\n\tapiMap[driver] = apimap\n\tuserMap[driver] = usermap\n\tendpointMap[driver] = endpoint\n\tcallbackMap[driver] = callback\n\tdefaultScopesMap[driver] = defaultscopes\n}", "func NewDriver(root string) *Driver {\n\treturn &Driver{\n\t\troot: root,\n\t}\n}", "func NewDriver(cfg *config.Config) *Driver {\n\tdriver := &Driver{\n\t\tcfg: cfg,\n\t}\n\n\treturn driver\n}", "func NewDriver(baseURL string, token string) (*Driver, error) {\n\traw, err := hype.New(baseURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td := &Driver{\n\t\traw,\n\t\ttoken,\n\t\thype.NewHeader(\"Accept\", \"application/json\"),\n\t\thype.NewHeader(\"Content-Type\", \"application/json\"),\n\t\thype.NewHeader(\"User-Agent\", \"fbz/0.1.0 (https://github.com/ess/fbz)\"),\n\t}\n\n\treturn d, nil\n}", "func NewDriver(name string) (*App, error) {\n\treturn newApp(\"driver.\" + name)\n}", "func NewDriver() *Driver {\n\treturn &Driver{\n\t\tServer: http.Server{\n\t\t\tReadTimeout: 30 * time.Second,\n\t\t\tWriteTimeout: 30 * time.Second,\n\t\t\tIdleTimeout: 120 * time.Second,\n\t\t},\n\t}\n}", "func NewDriver(root string, client *pilosa.Client) *Driver {\n\treturn &Driver{\n\t\troot: root,\n\t\tclient: client,\n\t}\n}", "func NewDriver(machineID string, secretData map[string][]byte, classKind string, machineClass interface{}, machineName string) Driver {\n\n\tswitch classKind {\n\tcase \"OpenStackMachineClass\":\n\t\treturn &OpenStackDriver{\n\t\t\tOpenStackMachineClass: machineClass.(*v1alpha1.OpenStackMachineClass),\n\t\t\tCredentialsData: secretData,\n\t\t\tUserData: string(secretData[\"userData\"]),\n\t\t\tMachineID: machineID,\n\t\t\tMachineName: machineName,\n\t\t}\n\n\tcase \"AWSMachineClass\":\n\t\treturn &AWSDriver{\n\t\t\tAWSMachineClass: machineClass.(*v1alpha1.AWSMachineClass),\n\t\t\tCredentialsData: secretData,\n\t\t\tUserData: string(secretData[\"userData\"]),\n\t\t\tMachineID: machineID,\n\t\t\tMachineName: machineName,\n\t\t}\n\n\tcase \"AzureMachineClass\":\n\t\treturn &AzureDriver{\n\t\t\tAzureMachineClass: machineClass.(*v1alpha1.AzureMachineClass),\n\t\t\tCredentialsData: secretData,\n\t\t\tUserData: string(secretData[\"userData\"]),\n\t\t\tMachineID: machineID,\n\t\t\tMachineName: machineName,\n\t\t}\n\n\tcase \"GCPMachineClass\":\n\t\treturn &GCPDriver{\n\t\t\tGCPMachineClass: machineClass.(*v1alpha1.GCPMachineClass),\n\t\t\tCredentialsData: secretData,\n\t\t\tUserData: string(secretData[\"userData\"]),\n\t\t\tMachineID: machineID,\n\t\t\tMachineName: machineName,\n\t\t}\n\n\tcase \"AlicloudMachineClass\":\n\t\treturn &AlicloudDriver{\n\t\t\tAlicloudMachineClass: machineClass.(*v1alpha1.AlicloudMachineClass),\n\t\t\tCredentialsData: secretData,\n\t\t\tUserData: string(secretData[\"userData\"]),\n\t\t\tMachineID: machineID,\n\t\t\tMachineName: machineName,\n\t\t}\n\tcase \"PacketMachineClass\":\n\t\treturn &PacketDriver{\n\t\t\tPacketMachineClass: machineClass.(*v1alpha1.PacketMachineClass),\n\t\t\tCredentialsData: secretData,\n\t\t\tUserData: string(secretData[\"userData\"]),\n\t\t\tMachineID: machineID,\n\t\t\tMachineName: machineName,\n\t\t}\n\t}\n\n\treturn NewFakeDriver(\n\t\tfunc() (string, string, error) {\n\t\t\tfakeVMs[\"fake\"] = \"fake_ip\"\n\t\t\treturn \"fake\", \"fake_ip\", nil\n\t\t},\n\t\tfunc(machineID string, machineName string) error {\n\t\t\tfakeVMs[machineID] = machineName\n\t\t\treturn nil\n\t\t},\n\t\tfunc(machineID string) error {\n\t\t\t// delete(fakeVMs, \"fake\")\n\t\t\tdelete(fakeVMs, machineID)\n\t\t\treturn nil\n\t\t},\n\t\tfunc() (string, error) {\n\t\t\treturn \"\", nil\n\t\t},\n\t\tfunc() (VMs, error) {\n\t\t\treturn fakeVMs, nil\n\t\t},\n\t\tfunc([]corev1.PersistentVolumeSpec) ([]string, error) {\n\t\t\treturn []string{}, nil\n\t\t},\n\t\tfunc() string {\n\t\t\treturn \"\"\n\t\t},\n\t\tfunc(string) {\n\t\t\treturn\n\t\t},\n\t)\n}", "func NewRemote(capabilities Capabilities, urlPrefix string) (WebDriver, error) {\n\tif urlPrefix == \"\" {\n\t\turlPrefix = DefaultURLPrefix\n\t}\n\n\twd := &remoteWD{\n\t\turlPrefix: urlPrefix,\n\t\tcapabilities: capabilities,\n\t}\n\tif b := capabilities[\"browserName\"]; b != nil {\n\t\twd.browser = b.(string)\n\t}\n\tif _, err := wd.NewSession(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn wd, nil\n}", "func CreateDriver(url string, capabilities map[string]string) *Driver {\n\tnewDriver := &Driver{\n\t\tclient.CreateClient(url),\n\t\tcapabilities,\n\t\t\"\",\n\t}\n\n\treturn newDriver\n}", "func NewDriver(machineName string, storePath string) *Driver {\n\tbmlog.DebugLevel = 5\n\tdefaultDiscs := brain.Discs{\n\t\tbrain.Disc{\n\t\t\tLabel: \"disk-1\",\n\t\t\tStorageGrade: \"sata\",\n\t\t\tSize: 25600,\n\t\t},\n\t}\n\n\treturn &Driver{\n\t\tspec: brain.VirtualMachineSpec{\n\t\t\tVirtualMachine: brain.VirtualMachine{\n\t\t\t\tCores: defaultCores,\n\t\t\t\tDiscs: defaultDiscs,\n\n\t\t\t\tMemory: defaultMemory,\n\t\t\t\tZoneName: defaultZone,\n\t\t\t\tName: defaultName,\n\t\t\t},\n\t\t\tReimage: &brain.ImageInstall{\n\t\t\t\tDistribution: \"stretch\",\n\t\t\t\t// TODO: generate a random root password.\n\t\t\t\tRootPassword: \"Shohshu9mi9aephahnaigi5l\",\n\t\t\t},\n\t\t},\n\t\tBaseDriver: &drivers.BaseDriver{\n\t\t\tSSHUser: defaultUser,\n\t\t\tMachineName: machineName,\n\t\t\tStorePath: storePath,\n\t\t},\n\t}\n}", "func NewDriver(ca CABackend, client *client.Client) (*Driver, error) {\n\treturn &Driver{\n\t\tca: ca,\n\t\tclient: client,\n\t}, nil\n}", "func New() (d *Driver) {\n\treturn &Driver{}\n}", "func NewDriver(ctx context.Context, name string, opts DriverOptions) (Driver, error) {\n\tfactory, ok := factories[name]\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"unsupported driver type: %q\", name)\n\t}\n\treturn factory(ctx, opts)\n}", "func NewDriver(endpoint, driverName, nodeID string) *Driver {\n\tglog.Infof(\"NewDriver for CHDFS, driverName: %v version: %v nodeID: %v\", driverName, version, nodeID)\n\n\tcsiDriver := csicommon.NewCSIDriver(driverName, version, nodeID)\n\tcsiDriver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{\n\t\tcsi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,\n\t})\n\n\treturn &Driver{\n\t\tcsiDriver: csiDriver,\n\t\tendpoint: endpoint,\n\t}\n}", "func (m *Manager) GetNewDriver(machineName string) (Driver, error) {\n\tif driverFactory, ok := m.drivers[machineName]; ok {\n\t\treturn driverFactory(), nil\n\t}\n\n\treturn nil, errors.New(\"No such driver: \" + machineName)\n}", "func NewDriver(options *DriverOptions) CSIDriver {\n\tif !*useDriverV2 {\n\t\treturn newDriverV1(options)\n\t} else {\n\t\treturn newDriverV2(options)\n\t}\n}", "func NewDriver(cfg *configv1.InfrastructureStatus, clnt client.Client) Driver {\n\n\tctx := context.Background()\n\tvar driver Driver\n\n\tif cfg.PlatformStatus.Type == \"AWS\" {\n\t\tdriver = s3.NewDriver(ctx, cfg, clnt)\n\t}\n\n\tif cfg.PlatformStatus.Type == \"GCP\" {\n\t\tdriver = gcs.NewDriver(ctx, cfg, clnt)\n\t}\n\n\treturn driver\n}", "func NewDriver(name string, loader func(interface{}) error) (Driver, error) {\n\tfactorysMu.RLock()\n\tfactoryi, ok := factories[name]\n\tfactorysMu.RUnlock()\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"file: unknown driver %q (forgotten import?)\", name)\n\t}\n\treturn factoryi(loader)\n}", "func NewDriver(hostName, storePath string) drivers.Driver {\n\treturn &Driver{\n\t\tBaseDriver: &drivers.BaseDriver{\n\t\t\tSSHUser: \"docker\",\n\t\t\tMachineName: hostName,\n\t\t\tStorePath: storePath,\n\t\t},\n\t}\n}", "func (r *Relwarc) NewBrowser() *Browser {\n\tctx, cancel := chromedp.NewContext(r.ctx)\n\n\t// make sure a browser and its first tab are created.\n\tif err := chromedp.Run(ctx); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// enable network by default.\n\tif err := chromedp.Run(ctx, network.Enable()); err != nil {\n\t\tpanic(err)\n\t}\n\n\ttgt := chromedp.FromContext(ctx).Target\n\n\ttab := Tab{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\ttarget: tgt,\n\t\trequestMap: map[network.RequestID]*Request{},\n\t}\n\n\tchromedp.ListenTarget(ctx, tab.onTargetEvent)\n\n\tbrowser := Browser{\n\t\tctx: ctx,\n\t\tfirst: &tab,\n\t\ttabs: map[target.ID]*Tab{},\n\t}\n\n\treturn &browser\n}", "func NewDriver(machineName, storePath string) drivers.Driver {\n\treturn &Driver{\n\t\tInstanceProfile: defaultInstanceProfile,\n\t\tDiskSize: defaultDiskSize,\n\t\tImage: defaultImage,\n\t\tAvailabilityZone: defaultAvailabilityZone,\n\t\tasync: egoscale.AsyncInfo{\n\t\t\tRetries: 3,\n\t\t\tDelay: 20,\n\t\t},\n\t\tBaseDriver: &drivers.BaseDriver{\n\t\t\tMachineName: machineName,\n\t\t\tStorePath: storePath,\n\t\t},\n\t}\n}", "func CreatePage(browserName ...string) core.Page {\n\tcapabilities := core.Use()\n\tif len(browserName) > 0 {\n\t\tcapabilities.Browser(browserName[0])\n\t}\n\tnewPage, err := driver.Page(capabilities)\n\tcheckFailure(err)\n\treturn newPage\n}", "func NewDriver(version string, ctx *cli.Context) (*Driver, error) {\n\tdocker, err := client.NewEnvClient()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not connect to docker: %s\", err)\n\t}\n\n\td := &Driver{\n\t\tnetworks: networkTable{},\n\t\tdClient: docker,\n\t}\n\treturn d, nil\n}", "func WDInit() selenium.WebDriver {\n\tvar err error\n\n\tops := []selenium.ServiceOption{\n\t\tselenium.ChromeDriver(seleniumPath),\n\t}\n\n\t//service, err := selenium.NewSeleniumService(seleniumPath, port, ops...)\n\tservice, err := selenium.NewChromeDriverService(chromeDriverPath, port, ops...)\n\tif err != nil {\n\t\tlog.Printf(\"Error starting the ChromeDriver server: %v\", err)\n\t}\n\t//Delay service shutdown\n\tdefer service.Stop()\n\n\t//log.Println(\"Service => \", service)\n\n\tcaps := selenium.Capabilities(map[string]interface{}{\"browserName\": \"chrome\"})\n\t//log.Println(\"Capabilities => \", caps)\n\n\tdriver, err := selenium.NewRemote(caps, \"\")\n\n\tif err != nil {\n\t\tlog.Println(\"support/base | Error al instanciar el driver de Selenium : \", err.Error())\n\t}\n\t//driver.ResizeWindow(\"note\", 1920, 1080)\n\treturn driver\n}", "func newHub(scheme string, vendorID uint16, productIDs []uint16, usageID uint16, endpointID int, makeDriver func(bgmlogs.bgmlogsger) driver) (*Hub, error) {\n\tif !hid.Supported() {\n\t\treturn nil, errors.New(\"unsupported platform\")\n\t}\n\thub := &Hub{\n\t\tscheme: scheme,\n\t\tvendorID: vendorID,\n\t\tproductIDs: productIDs,\n\t\tusageID: usageID,\n\t\tendpointID: endpointID,\n\t\tmakeDriver: makeDriver,\n\t\tquit: make(chan chan error),\n\t}\n\thubPtr.refreshWallets()\n\treturn hub, nil\n}", "func NewDriver() Driver {\n\treturn &boltDriver{}\n}", "func NewDriver() *Driver {\n\treturn &Driver{\n\t\tVMDriver: &drivers.VMDriver{\n\t\t\tBaseDriver: &drivers.BaseDriver{},\n\t\t\tCPU: DefaultCPUs,\n\t\t\tMemory: DefaultMemory,\n\t\t},\n\t}\n}", "func NewDriver(dialect string, c Conn) *Driver {\n\treturn &Driver{dialect: dialect, Conn: c}\n}", "func newMongoDriver(name string, opts MongoDBOptions) (*mongoDriver, error) {\n\thost, _ := os.Hostname() // nolint\n\n\tif err := opts.Validate(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"invalid mongo driver options\")\n\t}\n\n\treturn &mongoDriver{\n\t\tname: name,\n\t\topts: opts,\n\t\tinstanceID: fmt.Sprintf(\"%s.%s.%s\", name, host, uuid.New()),\n\t}, nil\n}", "func newStubDriver() *stubDriver {\n\treturn &stubDriver{\n\t\tdedupedBlocks: make(map[string][]byte),\n\t\theaders: make(map[string][]byte),\n\t}\n}", "func openNewMongoDriver(ctx context.Context, name string, opts MongoDBOptions, client *mongo.Client) (*mongoDriver, error) {\n\td, err := newMongoDriver(name, opts)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not create driver\")\n\t}\n\n\tif err := d.start(ctx, clientStartOptions{client: client}); err != nil {\n\t\treturn nil, errors.Wrap(err, \"problem starting driver\")\n\t}\n\n\treturn d, nil\n}", "func newVideoDriver(scaleFactor float64, unlimitedFPS bool) (*videoDriver, error) {\n\tvar vd videoDriver\n\n\tvd.unlimitedFPS = unlimitedFPS\n\n\terr := sdl.Init(sdl.INIT_EVERYTHING)\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"initializing SDL: %v\", err)\n\t}\n\n\tvd.window, err = sdl.CreateWindow(\n\t\t\"Gopherboy\",\n\t\tsdl.WINDOWPOS_UNDEFINED, sdl.WINDOWPOS_UNDEFINED,\n\t\tint32(gameboy.ScreenWidth*scaleFactor),\n\t\tint32(gameboy.ScreenHeight*scaleFactor),\n\t\tsdl.WINDOW_OPENGL)\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"initializing window: %v\", err)\n\t}\n\n\tvd.renderer, err = sdl.CreateRenderer(vd.window, -1, sdl.RENDERER_ACCELERATED)\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"initializing renderer: %v\", err)\n\t}\n\n\tvd.renderer.SetDrawColor(255, 255, 255, 255)\n\n\tvd.readyForNewFrame = true\n\n\treturn &vd, nil\n}", "func NewDriver(context *base.Context) (drivers.Driver, error) {\n\ttokens := strings.SplitN(context.Region, \":\", 2)\n\tif len(tokens) != 2 {\n\t\treturn nil, util.Errorf(\"invalid region syntax, expected <driver>:<region name>, got: %q\", context.Region)\n\t}\n\n\tvar driver drivers.Driver\n\n\tprovider := tokens[0]\n\tregion := tokens[1]\n\tswitch provider {\n\tcase \"aws\":\n\t\tdriver = amazon.NewDriver(context, region)\n\tcase \"gce\":\n\t\tdriver = google.NewDriver(context, region)\n\tdefault:\n\t\treturn nil, util.Errorf(\"unknown driver: %s\", driver)\n\t}\n\n\terr := driver.Init()\n\treturn driver, err\n}", "func NewDriver(hostName, storePath string) *Driver {\n\treturn &Driver{\n\t\tBaseDriver: &drivers.BaseDriver{\n\t\t\tMachineName: hostName,\n\t\t\tStorePath: storePath,\n\t\t},\n\t\tBoot2DockerIsoVersion: defaultBoot2DockerIsoVersion,\n\t\tBoot2DockerURL: defaultBoot2DockerURL,\n\t\tBootCmd: defaultBootCmd,\n\t\tCPU: defaultCPU,\n\t\tCaCertPath: defaultCaCertPath,\n\t\tDiskSize: defaultDiskSize,\n\t\tMacAddr: defaultMacAddr,\n\t\tMemory: defaultMemory,\n\t\tPrivateKeyPath: defaultPrivateKeyPath,\n\t\tUUID: defaultUUID,\n\t\tNFSShare: defaultNFSShare,\n\t}\n}", "func newDriverV2(options *DriverOptions) *DriverV2 {\n\tklog.Warning(\"Using DriverV2\")\n\tdriver := DriverV2{}\n\tdriver.Name = options.DriverName\n\tdriver.Version = driverVersion\n\tdriver.NodeID = options.NodeID\n\tdriver.VolumeAttachLimit = options.VolumeAttachLimit\n\tdriver.volumeLocks = volumehelper.NewVolumeLocks()\n\tdriver.perfOptimizationEnabled = options.EnablePerfOptimization\n\tdriver.cloudConfigSecretName = options.CloudConfigSecretName\n\tdriver.cloudConfigSecretNamespace = options.CloudConfigSecretNamespace\n\tdriver.customUserAgent = options.CustomUserAgent\n\tdriver.userAgentSuffix = options.UserAgentSuffix\n\tdriver.useCSIProxyGAInterface = options.UseCSIProxyGAInterface\n\tdriver.enableOtelTracing = options.EnableOtelTracing\n\tdriver.ioHandler = azureutils.NewOSIOHandler()\n\tdriver.hostUtil = hostutil.NewHostUtil()\n\n\ttopologyKey = fmt.Sprintf(\"topology.%s/zone\", driver.Name)\n\treturn &driver\n}", "func NewDriver(config dbmate.DriverConfig) dbmate.Driver {\n\treturn &Driver{\n\t\tmigrationsTableName: config.MigrationsTableName,\n\t\tdatabaseURL: config.DatabaseURL,\n\t\tlog: config.Log,\n\t}\n}", "func NewDriver(driveClient drive.APIClient) (drive.Driver, error) {\n\treturn newDriver(driveClient)\n}", "func NewChromium(profile, key, name, storage string) (Browser, error) {\n\treturn &Chromium{profilePath: profile, keyPath: key, name: name, storage: storage}, nil\n}", "func NewDriver(storage StorageDriver, mountPath string) Driver {\n\treturn &driverInfo{\n\t\tstorage: storage,\n\t\tmountPath: mountPath,\n\t\tvolumes: make(map[string]*Volume),\n\t}\n}", "func NewDriver(nodeID string, endpoint string, synoOption *options.SynologyOptions) (Driver, error) {\n\tglog.Infof(\"Driver: %v\", DriverName)\n\n\tsession, _, err := Login(synoOption)\n\tif err != nil {\n\t\tglog.V(3).Infof(\"Failed to login: %v\", err)\n\t\treturn nil, err\n\t}\n\n\td := &driver{\n\t\tendpoint: endpoint,\n\t\tsynologyHost: synoOption.Host,\n\t\tsession: *session,\n\t}\n\n\tcsiDriver := csicommon.NewCSIDriver(DriverName, version, nodeID)\n\tcsiDriver.AddControllerServiceCapabilities(\n\t\t[]csi.ControllerServiceCapability_RPC_Type{\n\t\t\tcsi.ControllerServiceCapability_RPC_LIST_VOLUMES,\n\t\t\tcsi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,\n\t\t\tcsi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME,\n\t\t\tcsi.ControllerServiceCapability_RPC_EXPAND_VOLUME,\n\t\t})\n\tcsiDriver.AddVolumeCapabilityAccessModes(\n\t\t[]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER})\n\n\td.csiDriver = csiDriver\n\n\treturn d, nil\n}", "func ServiceBrowserNew(conn *dbus.Conn, path dbus.ObjectPath) (*ServiceBrowser, error) {\n\tc := new(ServiceBrowser)\n\n\tc.object = conn.Object(\"org.freedesktop.Avahi\", path)\n\tc.AddChannel = make(chan Service)\n\tc.RemoveChannel = make(chan Service)\n\tc.closeCh = make(chan struct{})\n\n\treturn c, nil\n}", "func New(params DriverParameters) (*Driver, error) {\n defer obs.CloseLog()\n //obs.SyncLog() \n\tclient, _ := obs.New(\n\t\tparams.AccessKey,\n\t\tparams.SecretKey,\n\t\tparams.Endpoint,\n\t\tobs.WithRegion(params.Region),\n\t\tobs.WithSslVerify(params.Secure),\n\t\tobs.WithSignature(params.V2Auth),\n\t\tobs.WithPathStyle(params.PathStyle),\n\t\t//obs.WithSecurityToken(params.SecurityToken),\n\t)\n \n\n\td := &driver{\n\t\tClient: client,\n\t\tBucket: params.Bucket,\n\t\tChunkSize: params.ChunkSize,\n\t\tMultipartCopyChunkSize: params.MultipartCopyChunkSize,\n\t\tMultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency,\n\t\tMultipartCopyThresholdSize: params.MultipartCopyThresholdSize,\n\t\tRootDirectory: params.RootDirectory,\n\t\tStorageClass: params.StorageClass,\n\t\tObjectACL: params.ObjectACL,\n\t\tPathStyle: params.PathStyle,\n\t}\n\treturn &Driver{\n\t\tbaseEmbed: baseEmbed{\n\t\t\tBase: base.Base{\n\t\t\t\tStorageDriver: d,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func New(strs ...interface{}) (*Browser, error) {\n\tvar err error\n\tb := &Browser{}\n\tfor _, str := range strs {\n\t\tif errr := rpc.Register(str); errr != nil {\n\t\t\treturn nil, errr\n\t\t}\n\t}\n\tport := js.Global.Get(\"window\").Get(\"location\").Get(\"port\").String()\n\tb.s, err = websocket.Dial(\"ws://localhost:\" + port + \"/ws-client\") // Blocks until connection is established\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Println(\"connected to ws-client\")\n\tgo jsonrpc.ServeConn(b.s)\n\n\tb.c, err = websocket.Dial(\"ws://localhost:\" + port + \"/ws-server\") // Blocks until connection is established\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Println(\"connected to ws-server\")\n\tb.Client = jsonrpc.NewClient(b.c)\n\treturn b, nil\n}", "func New(lDs, gDs Placeholder, dfn DriverNotifyFunc, ifn Placeholder, pg plugingetter.PluginGetter) (*DrvRegistry, error) {\n\treturn &DrvRegistry{\n\t\tNetworks: Networks{Notify: dfn},\n\t\tpluginGetter: pg,\n\t}, nil\n}", "func NewDriver(client *redis.Client, prefix string) *Driver {\n\treturn &Driver{\n\t\tclient: client,\n\t\tprefix: prefix,\n\t}\n}", "func New(image, rootfs string) (Driver, error) {\n\tfor _, name := range drivers {\n\t\tv := reflect.New(driverRegistry[name])\n\t\td, ok := v.Interface().(Driver)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"%s driver doesn't seem to implement the fsdriver.Driver interface\")\n\t\t}\n\n\t\tif err := d.Init(image, rootfs); err == nil {\n\t\t\treturn d, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"none of %v drivers are supported on the host\", drivers)\n}", "func NewChromeDriver(url string, port, threadsCount int, timeout time.Duration) *ChromeDriver {\n\td := &ChromeDriver{}\n\td.url = url\n\td.Port = port\n\td.Threads = threadsCount\n\td.StartTimeout = timeout\n\treturn d\n}", "func NewDriver(p *Periph) *Driver {\n\treturn &Driver{p: p, timeoutRx: -1, timeoutTx: -1}\n}", "func NewDriver(ctx context.Context, signupDisabled bool, cfg ldap.Config) (sdk.AuthDriver, error) {\n\tvar d = AuthDriver{\n\t\tsignupDisabled: signupDisabled,\n\t}\n\n\tldap, err := ldap.NewLdapDriver(ctx, cfg)\n\tif err != nil {\n\t\treturn d, err\n\t}\n\td.driver = ldap\n\n\treturn d, nil\n}", "func NewDriver(options *DriverOptions) *Driver {\n\td := Driver{\n\t\tvolLockMap: util.NewLockMap(),\n\t\tsubnetLockMap: util.NewLockMap(),\n\t\tvolumeLocks: newVolumeLocks(),\n\t\tcloudConfigSecretName: options.CloudConfigSecretName,\n\t\tcloudConfigSecretNamespace: options.CloudConfigSecretNamespace,\n\t\tcustomUserAgent: options.CustomUserAgent,\n\t\tuserAgentSuffix: options.UserAgentSuffix,\n\t\tblobfuseProxyEndpoint: options.BlobfuseProxyEndpoint,\n\t\tenableBlobfuseProxy: options.EnableBlobfuseProxy,\n\t\tblobfuseProxyConnTimout: options.BlobfuseProxyConnTimout,\n\t\tenableBlobMockMount: options.EnableBlobMockMount,\n\t\tallowEmptyCloudConfig: options.AllowEmptyCloudConfig,\n\t\tenableGetVolumeStats: options.EnableGetVolumeStats,\n\t\tmountPermissions: options.MountPermissions,\n\t}\n\td.Name = options.DriverName\n\td.Version = driverVersion\n\td.NodeID = options.NodeID\n\n\td.DefaultControllerServer.Driver = &d.CSIDriver\n\td.DefaultIdentityServer.Driver = &d.CSIDriver\n\td.DefaultNodeServer.Driver = &d.CSIDriver\n\n\tvar err error\n\tgetter := func(key string) (interface{}, error) { return nil, nil }\n\tif d.accountSearchCache, err = azcache.NewTimedcache(time.Minute, getter); err != nil {\n\t\tklog.Fatalf(\"%v\", err)\n\t}\n\treturn &d\n}", "func NewDriver(libType string) (Driver, bool) {\n\tvar ecosystem dbTypes.Ecosystem\n\tvar comparer compare.Comparer\n\n\tswitch libType {\n\tcase ftypes.Bundler, ftypes.GemSpec:\n\t\tecosystem = vulnerability.RubyGems\n\t\tcomparer = rubygems.Comparer{}\n\tcase ftypes.RustBinary, ftypes.Cargo:\n\t\tecosystem = vulnerability.Cargo\n\t\tcomparer = compare.GenericComparer{}\n\tcase ftypes.Composer:\n\t\tecosystem = vulnerability.Composer\n\t\tcomparer = compare.GenericComparer{}\n\tcase ftypes.GoBinary, ftypes.GoModule:\n\t\tecosystem = vulnerability.Go\n\t\tcomparer = compare.GenericComparer{}\n\tcase ftypes.Jar, ftypes.Pom, ftypes.Gradle:\n\t\tecosystem = vulnerability.Maven\n\t\tcomparer = maven.Comparer{}\n\tcase ftypes.Npm, ftypes.Yarn, ftypes.Pnpm, ftypes.NodePkg, ftypes.JavaScript:\n\t\tecosystem = vulnerability.Npm\n\t\tcomparer = npm.Comparer{}\n\tcase ftypes.NuGet, ftypes.DotNetCore:\n\t\tecosystem = vulnerability.NuGet\n\t\tcomparer = compare.GenericComparer{}\n\tcase ftypes.Pipenv, ftypes.Poetry, ftypes.Pip, ftypes.PythonPkg:\n\t\tecosystem = vulnerability.Pip\n\t\tcomparer = pep440.Comparer{}\n\tcase ftypes.Pub:\n\t\tecosystem = vulnerability.Pub\n\t\tcomparer = compare.GenericComparer{}\n\tcase ftypes.Hex:\n\t\tecosystem = vulnerability.Erlang\n\t\tcomparer = compare.GenericComparer{}\n\tcase ftypes.Conan:\n\t\tecosystem = vulnerability.Conan\n\t\t// Only semver can be used for version ranges\n\t\t// https://docs.conan.io/en/latest/versioning/version_ranges.html\n\t\tcomparer = compare.GenericComparer{}\n\tcase ftypes.Swift:\n\t\t// Swift uses semver\n\t\t// https://www.swift.org/package-manager/#importing-dependencies\n\t\tecosystem = vulnerability.Swift\n\t\tcomparer = compare.GenericComparer{}\n\tcase ftypes.Cocoapods:\n\t\t// CocoaPods uses RubyGems version specifiers\n\t\t// https://guides.cocoapods.org/making/making-a-cocoapod.html#cocoapods-versioning-specifics\n\t\tecosystem = vulnerability.Cocoapods\n\t\tcomparer = rubygems.Comparer{}\n\tcase ftypes.CondaPkg:\n\t\tlog.Logger.Warn(\"Conda package is supported for SBOM, not for vulnerability scanning\")\n\t\treturn Driver{}, false\n\tdefault:\n\t\tlog.Logger.Warnf(\"The %q library type is not supported for vulnerability scanning\", libType)\n\t\treturn Driver{}, false\n\t}\n\treturn Driver{\n\t\tecosystem: ecosystem,\n\t\tcomparer: comparer,\n\t\tdbc: db.Config{},\n\t}, true\n}", "func getWD() (svc *selenium.Service, wd selenium.WebDriver, err error) {\n\topts := []selenium.ServiceOption{}\n\tsvc, err = selenium.NewChromeDriverService(*seleniumPath, *port, opts...)\n\tif nil != err {\n\t\tfmt.Println(\"start a chromedriver service falid\", err.Error())\n\t\treturn nil, nil, err\n\t}\n\n\t//注意这里,server关闭之后,chrome窗口也会关闭\n\t//defer svc.Stop()\n\n\t//链接本地的浏览器 chrome\n\tcaps := selenium.Capabilities{\n\t\t\"browserName\": \"chrome\",\n\t}\n\n\t//禁止图片加载,加快渲染速度\n\timagCaps := map[string]interface{}{\n\t\t\"profile.managed_default_content_settings.images\": 2,\n\t}\n\tchromeCaps := chrome.Capabilities{\n\t\tPrefs: imagCaps,\n\t\tPath: \"\",\n\t}\n\t//以上是设置浏览器参数\n\tcaps.AddChrome(chromeCaps)\n\n\t// 调起chrome浏览器\n\twd, err = selenium.NewRemote(caps, fmt.Sprintf(\"http://localhost:%d/wd/hub\", *port))\n\tif err != nil {\n\t\tfmt.Println(\"connect to the webDriver faild\", err.Error())\n\t\treturn nil, nil, err\n\t}\n\n\treturn\n}", "func NewDriver(ctx context.Context, prefix string) workers.Driver {\n\tdriver, err := redis.NewDriver(\n\t\tctx,\n\t\tredis.WithQueuePrefix(prefix),\n\t\tredis.WithRedisPool(client),\n\t)\n\tassert.Nil(err)\n\treturn driver\n}", "func newcomputer(brand string) *computer {\n\treturn &computer{brand: brand}\n}", "func newTestEnv(ctx context.Context, t *testing.T, pipelineInfo *pps.PipelineInfo, realEnv *realenv.RealEnv) *testEnv {\n\tlogger := logs.New(pctx.Child(ctx, t.Name()))\n\tworkerDir := filepath.Join(realEnv.Directory, \"worker\")\n\tdriver, err := driver.NewDriver(\n\t\tctx,\n\t\trealEnv.ServiceEnv,\n\t\trealEnv.PachClient,\n\t\tpipelineInfo,\n\t\tworkerDir,\n\t)\n\trequire.NoError(t, err)\n\n\tctx, cancel := pctx.WithCancel(realEnv.PachClient.Ctx())\n\tt.Cleanup(cancel)\n\tdriver = driver.WithContext(ctx)\n\n\treturn &testEnv{\n\t\tRealEnv: realEnv,\n\t\tlogger: logger,\n\t\tdriver: &testDriver{driver},\n\t}\n}", "func openNewMongoGroupDriver(ctx context.Context, name string, opts MongoDBOptions, group string, client *mongo.Client) (*mongoDriver, error) {\n\td, err := newMongoGroupDriver(name, opts, group)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not create driver\")\n\t}\n\n\topts.UseGroups = true\n\topts.GroupName = group\n\n\tif err := d.start(ctx, clientStartOptions{client: client}); err != nil {\n\t\treturn nil, errors.Wrap(err, \"starting driver\")\n\t}\n\n\treturn d, nil\n}", "func New(fsys fs.FS, path string) (source.Driver, error) {\n\tvar i driver\n\tif err := i.Init(fsys, path); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to init driver with path %s: %w\", path, err)\n\t}\n\treturn &i, nil\n}", "func NewDefaultDriver() *DefaultDriver {\n\treturn &DefaultDriver{\n\t\tServer: http.Server{\n\t\t\tReadTimeout: 30 * time.Second,\n\t\t\tWriteTimeout: 30 * time.Second,\n\t\t\tIdleTimeout: 120 * time.Second,\n\t\t},\n\t}\n}", "func NewDriver(rootPath string) (*Driver, error) {\n\tfileDriver := new(Driver)\n\tfileDriver.secretsDataFilePath = filepath.Join(rootPath, secretsDataFile)\n\t// the lockfile functions require that the rootPath dir is executable\n\tif err := os.MkdirAll(rootPath, 0o700); err != nil {\n\t\treturn nil, err\n\t}\n\n\tlock, err := lockfile.GetLockFile(filepath.Join(rootPath, \"secretsdata.lock\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfileDriver.lockfile = lock\n\n\treturn fileDriver, nil\n}", "func New(apiVersion, maestroVersion string) (*Driver, error) {\n\tdClient, dockerErr := dockerEngine.NewEnvClient()\n\tif dockerErr != nil {\n\t\treturn nil, dockerErr\n\t}\n\treturn &Driver{\n\t\tclient: dClient,\n\t\timage: fmt.Sprintf(\"cpg1111/maestro:%s\", maestroVersion),\n\t}, nil\n}", "func New(addr string, root string) *Driver {\n\tdefer debugTime()()\n\tshell := shell.NewShell(addr)\n\tinfo, err := shell.ID()\n\tif err != nil {\n\t\tlog.Error(\"error constructing node: \", err)\n\t\treturn nil\n\t}\n\tif strings.HasPrefix(root, \"/ipns/local/\") {\n\t\troot = strings.Replace(root, \"local\", info.ID, 1)\n\t}\n\tif !strings.HasPrefix(root, \"/ipns/\") {\n\t\tlog.Error(\"tried to use non-ipns root\")\n\t\treturn nil\n\t}\n\n\tipnsroot, err := shell.Resolve(info.ID)\n\tif err != nil {\n\t\tlog.Error(\"failed to resolve ipns root: \", err)\n\t\treturn nil\n\t}\n\n\tlog.Error(\"ID: \", info.ID)\n\tlog.Error(\"IPNSROOT: \", ipnsroot)\n\thash, err := shell.ResolvePath(ipnsroot + \"/docker-registry\")\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"no link named\") {\n\t\t\tlog.Error(\"failed to resolve docker-registry dir: \", err)\n\t\t\treturn nil\n\t\t}\n\n\t\th, err := shell.NewObject(\"unixfs-dir\")\n\t\tif err != nil {\n\t\t\tlog.Error(\"failed to get new empty dir: \", err)\n\t\t\treturn nil\n\t\t}\n\n\t\thash = h\n\t}\n\n\td := &driver{\n\t\tshell: shell,\n\t\troot: root,\n\t\troothash: hash,\n\t}\n\td.publish = d.runPublisher(info.ID)\n\n\treturn &Driver{\n\t\tbaseEmbed: baseEmbed{\n\t\t\tBase: base.Base{\n\t\t\t\tStorageDriver: d,\n\t\t\t},\n\t\t},\n\t}\n}", "func newW3CCapabilities(caps Capabilities) Capabilities {\n\tisValidW3CCapability := map[string]bool{}\n\tfor _, name := range w3cCapabilityNames {\n\t\tisValidW3CCapability[name] = true\n\t}\n\tif b, ok := caps[\"browserName\"]; ok && b == \"chrome\" {\n\t\tfor _, name := range chromeCapabilityNames {\n\t\t\tisValidW3CCapability[name] = true\n\t\t}\n\t}\n\n\talwaysMatch := make(Capabilities)\n\tfor name, value := range caps {\n\t\tif isValidW3CCapability[name] || strings.Contains(name, \":\") {\n\t\t\talwaysMatch[name] = value\n\t\t}\n\t}\n\n\t// Move the Firefox profile setting from the old location to the new\n\t// location.\n\tif prof, ok := caps[\"firefox_profile\"]; ok {\n\t\tif c, ok := alwaysMatch[firefox.CapabilitiesKey]; ok {\n\t\t\tfirefoxCaps := c.(firefox.Capabilities)\n\t\t\tif firefoxCaps.Profile == \"\" {\n\t\t\t\tfirefoxCaps.Profile = prof.(string)\n\t\t\t}\n\t\t} else {\n\t\t\talwaysMatch[firefox.CapabilitiesKey] = firefox.Capabilities{\n\t\t\t\tProfile: prof.(string),\n\t\t\t}\n\t\t}\n\t}\n\n\treturn Capabilities{\n\t\t\"alwaysMatch\": alwaysMatch,\n\t}\n}", "func (d DriverFactory) NewDriver() (ftp.Driver, error) {\n\tlogrus.Debugf(\"Trying to create an aws session with: Region: %q, PathStyle: %v, Endpoint: %q\", d.s3Region, d.s3PathStyle, d.s3Endpoint)\n\ts3Session, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(d.s3Region),\n\t\tS3ForcePathStyle: aws.Bool(d.s3PathStyle),\n\t\tEndpoint: aws.String(d.s3Endpoint),\n\t\tCredentials: d.awsCredentials,\n\t})\n\tif err != nil {\n\t\treturn nil, goErrors.Wrapf(err, \"Failed to instantiate driver\")\n\t}\n\ts3Client := s3.New(s3Session)\n\n\tvar metricsSender MetricsSender\n\tif d.DisableCloudWatch {\n\t\tmetricsSender = NopSender{}\n\t} else {\n\t\tcloudwatchSession, err := session.NewSession(&aws.Config{\n\t\t\tRegion: aws.String(d.s3Region),\n\t\t\tCredentials: d.awsCredentials,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, goErrors.Wrapf(err, \"Failed to create cloudwatch session\")\n\t\t}\n\n\t\tmetricsSender, err = NewCloudwatchSender(cloudwatchSession)\n\t\tif err != nil {\n\t\t\treturn nil, goErrors.Wrapf(err, \"Failed to instantiate cloudwatch sender\")\n\t\t}\n\t}\n\treturn S3Driver{\n\t\tfeatureFlags: d.featureFlags,\n\t\tnoOverwrite: d.noOverwrite,\n\t\ts3: s3Client,\n\t\tuploader: s3manager.NewUploaderWithClient(s3Client),\n\t\tmetrics: metricsSender,\n\t\tbucketName: d.bucketName,\n\t\tbucketURL: d.bucketURL,\n\t}, nil\n}", "func New(capabilities ...string) *Capability {\n\treturn &Capability{\n\t\tCapabilities: capabilities,\n\t}\n}", "func createChrome(ctx context.Context, chromeOpts ...chrome.Option) (*chrome.Chrome, *chrome.TestConn, error) {\n\t// Create a fresh login.\n\tcr, err := chrome.New(ctx, chromeOpts...)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to create Chrome\")\n\t}\n\ttconn, err := cr.TestAPIConn(ctx)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to connect to Test API\")\n\t}\n\n\treturn cr, tconn, nil\n}", "func newVideoTrackFromDriver(d driver.Driver, recorder driver.VideoRecorder, constraints MediaTrackConstraints, selector *CodecSelector) (Track, error) {\n\treader, err := recorder.VideoRecord(constraints.selectedMedia)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newVideoTrackFromReader(d, reader, selector), nil\n}", "func NewBrowserSiteList()(*BrowserSiteList) {\n m := &BrowserSiteList{\n Entity: *NewEntity(),\n }\n return m\n}", "func NewCapabilities() *Capabilities {\n\treturn &Capabilities{\n\t\tm: make(map[string]*Capability, 0),\n\t}\n}", "func NewVDENetworkDriver(socketRoot string) *VDENetworkDriver {\n\treturn &VDENetworkDriver{\n\t\tsocketRoot: socketRoot,\n\t\tnetworks: make(map[string]*VDENetworkDesc),\n\t\tipam: make(map[string]*IPAMNetworkPool),\n\t}\n}", "func (a *api) newScenario(i interface{}) {\n\ta.c = nil\n\ta.resp = nil\n\ta.err = nil\n\ta.c = client.New(goaclient.HTTPClientDoer(http.DefaultClient))\n\ta.c.Host = \"localhost:8080\"\n}", "func NewCapabilities(features ...string) Capabilities {\n\tc := Capabilities{}\n\tfor _, feature := range features {\n\t\tc.With(feature)\n\t}\n\treturn c\n}", "func NewStackDriver(projectID string) (*StackDriver, error) {\n\tctx := context.Background()\n\tmetricsClient, err := stackmoni.NewMetricClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tloggingClient, err := logging.NewClient(ctx, projectID)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create client: %v\", err)\n\t}\n\treturn &StackDriver{metricsClient: metricsClient, loggingClient: loggingClient, projectID: projectID, ctx: ctx}, nil\n}", "func RegisterDriver(factory DriverFactory, driver string) {\n\tif _, ok := factories[driver]; ok {\n\t\tlogger.Panicf(\"power driver %q already registered!\", driver)\n\t}\n\tfactories[driver] = factory\n}", "func (m *Manager) AddDriver(machineName string, driverFactory func() Driver) {\n\tm.drivers[machineName] = driverFactory\n}", "func New(\n\tflags ChromiumFlags,\n\tbinary string,\n\tworkdir string,\n\tstdout string,\n\tstderr string,\n) *Chrome {\n\treturn &Chrome{\n\t\tflags: flags,\n\t\tbinary: binary,\n\t\tstderr: stderr,\n\t\tstdout: stdout,\n\t\tworkdir: workdir,\n\t}\n}", "func newCommand(tb DirCleaner, opts ...server.CommandOption) *Command {\n\tpath := tb.TempDir()\n\n\t// Set aggressive close timeout by default to avoid hanging tests. This was\n\t// a problem with PDK tests which used pilosa/client as well. We put it at the\n\t// beginning of the option slice so that it can be overridden by user-passed\n\t// options.\n\topts = append([]server.CommandOption{\n\t\tserver.OptCommandCloseTimeout(time.Millisecond * 2),\n\t}, opts...)\n\n\tm := &Command{commandOptions: opts}\n\toutput := io.Discard\n\tif testing.Verbose() {\n\t\toutput = os.Stderr\n\t}\n\tm.Command = server.NewCommand(output, opts...)\n\t// pick etcd ports using a socket rather than a real port\n\terr := GetPortsGenConfigs(tb, []*Command{m})\n\tif err != nil {\n\t\ttb.Fatalf(\"generating config: %v\", err)\n\t}\n\tm.Config.DataDir = path\n\tdefaultConf := server.NewConfig()\n\n\tif m.Config.Bind == defaultConf.Bind {\n\t\tm.Config.Bind = \"http://localhost:0\"\n\t}\n\n\tif m.Config.BindGRPC == defaultConf.BindGRPC {\n\t\tm.Config.BindGRPC = \"http://localhost:0\"\n\t}\n\n\tm.Config.Translation.MapSize = 140000\n\tm.Config.WorkerPoolSize = 2\n\n\treturn m\n}", "func NewPage(c PageConfig) Page {\n\treturn driver.NewPage(c)\n}", "func newAudioTrackFromDriver(d driver.Driver, recorder driver.AudioRecorder, constraints MediaTrackConstraints, selector *CodecSelector) (Track, error) {\n\treader, err := recorder.AudioRecord(constraints.selectedMedia)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newAudioTrackFromReader(d, reader, selector), nil\n}", "func NewDBDriver(log *logrus.Logger) Driver {\n\treturn &DBDriver{log: log}\n}", "func handleNewCommand() {\n\tneoCliRoot := os.Getenv(\"GOPATH\") + \"/src/github.com/ivpusic/neo/cmd/neo\"\n\n\tif len(*templateName) == 0 {\n\t\tlogger.Info(\"Creating Neo project\")\n\t\trunCmd(neoCliRoot+\"/scripts/neo-template\", []string{*projectName})\n\n\t} else {\n\t\tswitch *templateName {\n\t\tcase \"angular\":\n\t\t\tlogger.Info(\"Creating Neo Angular project\")\n\t\t\trunCmd(neoCliRoot+\"/scripts/angular-template\", []string{*projectName})\n\t\tcase \"html\":\n\t\t\tlogger.Info(\"Creating Neo HTML project\")\n\t\t\trunCmd(neoCliRoot+\"/scripts/neo-html-template\", []string{*projectName})\n\t\tdefault:\n\t\t\tlogger.Errorf(\"Unkonown template %s!\", *projectName)\n\t\t}\n\t}\n}", "func New(logger log.Logger, db store.DriversStore) http.Handler {\n\tvar (\n\t\tsvc = service.NewDriversService(db)\n\t\toptions = []httptransport.ServerOption{\n\t\t\thttptransport.ServerErrorEncoder(encodeError),\n\t\t}\n\t)\n\n\trouter := mux.NewRouter().PathPrefix(\"/api/\").Subrouter()\n\trouter.Methods(\"POST\").Path(\"/import\").Handler(httptransport.NewServer(\n\t\tlogRecoverMiddleware(logger)(service.MakeDriversImportEndpoint(svc)),\n\t\tservice.DecodeDriversImportRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t))\n\trouter.Methods(\"GET\").Path(\"/driver/{id}\").Handler(httptransport.NewServer(\n\t\tlogRecoverMiddleware(logger)(service.MakeDriversGetByIDEndpoint(svc)),\n\t\tservice.DecodeDriversGetByIDRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t))\n\trouter.NotFoundHandler = notFoundHandler{}\n\trouter.MethodNotAllowedHandler = methodNotAllowedHandler{}\n\n\thandler := http.Handler(router)\n\thandler = &requestIDMiddleware{router}\n\n\treturn handler\n}", "func NewDb(db *sql.DB, driverName string) *DB {\n return &DB{DB: db, driverName: driverName, Mapper: mapper()}\n}", "func newScenario(name string) *Instruction {\n\treturn &Instruction{\n\t\tType: ScenarioInst,\n\t\tName: name,\n\t\tVersion: &Version{},\n\t}\n}", "func newControllerCapabilities() []*csi.ControllerServiceCapability {\n\tfromType := func(cap csi.ControllerServiceCapability_RPC_Type) *csi.ControllerServiceCapability {\n\t\treturn &csi.ControllerServiceCapability{\n\t\t\tType: &csi.ControllerServiceCapability_Rpc{\n\t\t\t\tRpc: &csi.ControllerServiceCapability_RPC{\n\t\t\t\t\tType: cap,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\tvar capabilities []*csi.ControllerServiceCapability\n\tfor _, cap := range []csi.ControllerServiceCapability_RPC_Type{\n\t\tcsi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,\n\t\tcsi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,\n\t\tcsi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS,\n\t\tcsi.ControllerServiceCapability_RPC_LIST_VOLUMES,\n\t} {\n\t\tcapabilities = append(capabilities, fromType(cap))\n\t}\n\treturn capabilities\n}", "func NewSimulationAutomationRun()(*SimulationAutomationRun) {\n m := &SimulationAutomationRun{\n Entity: *NewEntity(),\n }\n return m\n}", "func newDatabase(info extraInfo, db *sql.DB) *database {\n\treturn &database{\n\t\tname: info.dbName,\n\t\tdriverName: info.driverName,\n\t\tdb: db,\n\t}\n}", "func NewQemuDriver(ctx *DriverContext) Driver {\n\treturn &QemuDriver{DriverContext: *ctx}\n}", "func (factory *sharedInMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {\n\tn, ok := parameters[\"name\"]\n\tif !ok {\n\t\treturn nil, errors.New(\"sharedInMemoryDriverFactory: parameter 'name' must be specified\")\n\t}\n\n\tname, ok := n.(string)\n\tif !ok {\n\t\treturn nil, errors.New(\"sharedInMemoryDriverFactory: parameter 'name' must be a string\")\n\t}\n\n\tif _, ok := factory.drivers[name]; !ok {\n\t\tfactory.drivers[name] = inmemory.New()\n\t}\n\n\treturn factory.drivers[name], nil\n}", "func LocateBrowser(preferredBrowser string) BrowserProtocol {\n\tp := BrowserProtocol{}\n\n\tswitch preferredBrowser {\n\tcase \"chrome\":\n\t\tp = LocateChrome()\n\tcase \"edge\":\n\t\tp = LocateEdge()\n\tcase \"brave\":\n\t\tp = LocateBrave()\n\t// case \"firefox\":\n\t// \tp = LocateFirefox()\n\tdefault:\n\t\tswitch runtime.GOOS {\n\t\tcase \"darwin\":\n\t\tcase \"windows\":\n\t\t\tp = LocateEdge()\n\t\tdefault:\n\t\t\t// p = LocateFirefox()\n\t\t\t// ! In Firefox, remote debugging port needs to be enabled explicitly.\n\t\t}\n\t}\n\n\tif p.Path == \"\" {\n\t\tp = LocateChrome()\n\t}\n\n\tif p.Path == \"\" {\n\t\tp = LocateBrave()\n\t}\n\n\tif p.Path == \"\" {\n\t\tp = LocateEdge()\n\t}\n\n\t// if p.Path == \"\" {\n\t// \tp = LocateFirefox()\n\t// }\n\n\treturn p\n}", "func newMongoGroupDriver(name string, opts MongoDBOptions, group string) (*mongoDriver, error) {\n\thost, _ := os.Hostname() // nolint\n\n\tif err := opts.Validate(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"invalid mongo driver options\")\n\t}\n\n\topts.UseGroups = true\n\topts.GroupName = group\n\n\treturn &mongoDriver{\n\t\tname: name,\n\t\topts: opts,\n\t\tinstanceID: fmt.Sprintf(\"%s.%s.%s.%s\", name, group, host, uuid.New()),\n\t}, nil\n}", "func New(executor executor.Executor, name, arguments string) executor.Launcher {\n\treturn stressng{\n\t\texecutor: executor,\n\t\targuments: arguments,\n\t\tname: name,\n\t}\n}", "func newCollection(d driver.Collection) *Collection {\n\treturn &Collection{driver: d}\n}", "func loadDrivers(wd string) error {\n\tskipDirs := []string{\"completer\", \"metadata\"}\n\terr := fs.WalkDir(os.DirFS(wd), \".\", func(n string, d fs.DirEntry, err error) error {\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn err\n\t\tcase d.IsDir():\n\t\t\treturn nil\n\t\t}\n\t\tm := dirRE.FindAllStringSubmatch(n, -1)\n\t\tif m == nil || m[0][1] != m[0][2] || slices.Contains(skipDirs, m[0][1]) {\n\t\t\treturn nil\n\t\t}\n\t\ttag, dest := m[0][1], mostDrivers\n\t\tdriver, err := parseDriverInfo(tag, filepath.Join(wd, n))\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn err\n\t\tcase driver.Group == \"base\":\n\t\t\tdest = baseDrivers\n\t\tcase driver.Group == \"most\":\n\t\tcase driver.Group == \"all\":\n\t\t\tdest = allDrivers\n\t\tcase driver.Group == \"bad\":\n\t\t\tdest = badDrivers\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"driver %s has invalid group %q\", tag, driver.Group)\n\t\t}\n\t\tdest[tag] = driver\n\t\tif dest[tag].Aliases != nil {\n\t\t\tfor _, alias := range dest[tag].Aliases {\n\t\t\t\twireDrivers[alias[0]] = DriverInfo{\n\t\t\t\t\tTag: tag,\n\t\t\t\t\tDriver: alias[0],\n\t\t\t\t\tPkg: dest[tag].Pkg,\n\t\t\t\t\tDesc: alias[1],\n\t\t\t\t\tWire: true,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func newServer() *negroni.Negroni {\n\tn := negroni.Classic()\n\tn.UseHandler(router())\n\treturn n\n}", "func (c *DriversController) AddDriver() {\n\tvar driver models.Driver\n\tjson.Unmarshal(c.Ctx.Input.RequestBody, &driver)\n\tresult := models.AddDriver(driver)\n\tbuildResponse(result, c.Ctx, driver)\n}", "func NewNodeDriver(ctx *pulumi.Context,\n\tname string, args *NodeDriverArgs, opts ...pulumi.ResourceOption) (*NodeDriver, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Active == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Active'\")\n\t}\n\tif args.Builtin == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Builtin'\")\n\t}\n\tif args.Url == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Url'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource NodeDriver\n\terr := ctx.RegisterResource(\"rancher2:index/nodeDriver:NodeDriver\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (plugin *flexProvisioner) NewDriverCall(execPath, command string) *DriverCall {\n\treturn plugin.NewDriverCallWithTimeout(execPath, command, 0)\n}" ]
[ "0.678176", "0.6750688", "0.6364941", "0.62132597", "0.6142062", "0.61376053", "0.6127721", "0.6018265", "0.60131764", "0.5968937", "0.59261644", "0.592092", "0.5875122", "0.5862739", "0.58357203", "0.5813662", "0.580961", "0.58073485", "0.5793834", "0.57759947", "0.5756461", "0.57492083", "0.5714924", "0.5674313", "0.56598437", "0.56355065", "0.5608847", "0.55671406", "0.5563688", "0.5563599", "0.5527457", "0.5523985", "0.55228174", "0.55218256", "0.5516527", "0.55098456", "0.5501041", "0.54970914", "0.5462325", "0.54037076", "0.5397357", "0.53958464", "0.53742284", "0.5353503", "0.535344", "0.5347493", "0.52348584", "0.52144253", "0.5185292", "0.51770866", "0.5147021", "0.5141222", "0.5099769", "0.5083045", "0.5080358", "0.5068259", "0.50675434", "0.5061458", "0.50095993", "0.4971977", "0.49617794", "0.4925792", "0.49165148", "0.4913952", "0.48609996", "0.48555496", "0.48532495", "0.48244643", "0.4819102", "0.48174244", "0.47953904", "0.47777206", "0.4744596", "0.47431934", "0.47404787", "0.47369114", "0.47095513", "0.46943572", "0.46643394", "0.46451288", "0.46369913", "0.46296248", "0.46273094", "0.46232298", "0.4619219", "0.45874453", "0.456762", "0.45657536", "0.4546927", "0.45332554", "0.4530385", "0.45298138", "0.45223567", "0.45134348", "0.45002198", "0.45000112", "0.44893765", "0.44842598", "0.44812888", "0.44772357" ]
0.73467785
0
Address will create a new Address service that can interact with the Swyftx addresses endpoints The asset code is required for the Deposit, Withdraw and CheckDeposit endpoints
func (c *Client) Address(assetCode ...string) *AddressService { if len(assetCode) == 0 { assetCode[0] = "" } return &AddressService{service{c}, assetCode[0]} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (as *AddressService) Create(name string) (*Address, error) {\n\tif isEmptyStr(as.assetCode) {\n\t\treturn nil, errAssetCode\n\t}\n\n\tvar (\n\t\taddresses []*Address\n\t\tbody struct {\n\t\t\tAddress struct {\n\t\t\t\tName string `json:\"name\"`\n\t\t\t} `json:\"address\"`\n\t\t}\n\t)\n\tbody.Address.Name = name\n\n\tif err := as.client.Post(buildString(\"address/deposit/\", as.assetCode), &body, &addresses); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn addresses[0], nil\n}", "func (as *ApiService) CreateDepositAddress(currency string) (*ApiResponse, error) {\n\treq := NewRequest(http.MethodPost, \"/api/v1/deposit-addresses\", map[string]string{\"currency\": currency})\n\treturn as.Call(req)\n}", "func (as *AddressbookService) CreateAddress(addr models.Address) (models.Address, error) {\n\treturn as.repo.AddAddress(addr)\n}", "func CreateAddress() *addresspb.Address {\n\ta := addresspb.Address{\n\t\tCorrespondanceAddr: &addresspb.Location{\n\t\t\tLocation: \"loc 1\",\n\t\t\tCity: &addresspb.City{\n\t\t\t\tName: \"Mumbai\",\n\t\t\t\tZipCode: \"400005\",\n\t\t\t\tRegion: addresspb.Division_WEST,\n\t\t\t},\n\t\t},\n\n\t\tAdditionalAddr: []*addresspb.Location{\n\t\t\t{\n\t\t\t\tLocation: \"loc 2\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Srinagar\",\n\t\t\t\t\tZipCode: \"190001\",\n\t\t\t\t\tRegion: addresspb.Division_NORTH,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tLocation: \"loc 3\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Imphal\",\n\t\t\t\t\tZipCode: \"795001\",\n\t\t\t\t\tRegion: addresspb.Division_EAST,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tLocation: \"loc 4\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Mysore\",\n\t\t\t\t\tZipCode: \"570001\",\n\t\t\t\t\tRegion: addresspb.Division_SOUTH,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &a\n}", "func CreateAddress(address models.RequestAddress) (bool, models.ResponseAddress, error) {\n\n\t//Create request\n\trequest := models.Request{}\n\trequest.AddBody(address)\n\trequest.SetUri(\"https://api.easypost.com/v2/addresses\")\n\trequest.SetMethod(\"POST\")\n\n\t//Send request\n\tresponseBody, err := SendRequest(request)\n\n\t//Initialize response address\n\tresponseAddress := models.ResponseAddress{}\n\n\terr = json.Unmarshal(responseBody, &responseAddress)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn false, responseAddress, fmt.Errorf(\"Unrecognized response from easypost %s\", err.Error())\n\t}\n\n\treturn responseAddress.Verifications.Delivery.Success, responseAddress, err\n}", "func (handler *Handler) CreateAddress(ctx context.Context, in *candyland_grpc.CreateAddressRequest) (*candyland_grpc.CreateAddressReply, error) {\n\terr := handler.CreateAddressUsecase.Create(in.UserId, in.CountryId, in.StateId, in.CityId, in.StreetId, in.Number, in.Complement.String())\n\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn nil, err\n\t}\n\n\tres := &candyland_grpc.CreateAddressReply{\n\t\tWasCreated: true,\n\t}\n\n\treturn res, nil\n}", "func (id *Public) CreateAddress(version, stream uint64) {\n\tid.Address.Version = version\n\tid.Address.Stream = stream\n\tcopy(id.Address.Ripe[:], id.hash())\n}", "func CreateAddress(amount int) (string, []Wallet) {\n\n\twallets := []Wallet{}\n\tfor i := 0; i < amount; i++ {\n\t\twif, _ := network.CreatePrivateKey()\n\t\taddress, _ := network.GetAddress(wif)\n\t\tvar wallet = Wallet{ADDRESS: address.EncodeAddress(), PRIVKEY: wif.String()}\n\t\twallets = append(wallets, wallet)\n\t}\n\n\tjson := ConvertToJSON(&wallets)\n\n\tlog.Println(\"Generated\", amount, \"addresses\")\n\n\treturn json, wallets\n\n}", "func (lu *litUiClient) Address() (string, error) {\n\n\t// cointype of 0 means default, not mainnet.\n\t// this is ugly but does prevent mainnet use for now.\n\n\tvar cointype, numadrs uint32\n\n\t// if no arguments given, generate 1 new address.\n\t// if no cointype given, assume type 1 (testnet)\n\n\tnumadrs = 0\n\n\treply := new(litrpc.AddressReply)\n\n\targs := new(litrpc.AddressArgs)\n\targs.CoinType = cointype\n\targs.NumToMake = numadrs\n\n\tfmt.Printf(\"adr cointye: %d num:%d\\n\", args.CoinType, args.NumToMake)\n\terr := lu.rpccon.Call(\"LitRPC.Address\", args, reply)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tresponse := reply.WitAddresses[len(reply.WitAddresses)-1]\n\treturn response, nil\n}", "func CreateAddress(b types.Address, nonce *big.Int) types.Address {\n\tdata, _ := rlp.EncodeToBytes([]interface{}{b, nonce})\n\treturn types.BytesToAddress(keccak.Keccak256(data)[12:])\n}", "func (e Endpoints) PostAddress(ctx context.Context, profileID string, a Address) error {\n\n\t// TODO: Create detailed ref spec\n\trequest := postAddressRequest{ProfileID: profileID, Address: a}\n\n\tresponse, err := e.PostAddressEndpoint(ctx, request)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp := response.(postAddressResponse)\n\n\treturn resp.Err\n}", "func (dcr *ExchangeWallet) NewAddress() (string, error) {\n\treturn dcr.DepositAddress()\n}", "func Address(props *AddressProps, children ...Element) *AddressElem {\n\trProps := &_AddressProps{\n\t\tBasicHTMLElement: newBasicHTMLElement(),\n\t}\n\n\tif props != nil {\n\t\tprops.assign(rProps)\n\t}\n\n\treturn &AddressElem{\n\t\tElement: createElement(\"address\", rProps, children...),\n\t}\n}", "func CreateAddress(b common.Address, nonce uint64) common.Address {\n\tdata, _ := rlp.EncodeToBytes([]interface{}{b, nonce})\n\taddr := common.BytesToAddress(Keccak512(data)[:])\n\treturn common.DarmaAddressToContractAddress(addr)\n}", "func (lu *litUiClient) NewAddress() (string, error) {\n\n\t// cointype of 0 means default, not mainnet.\n\t// this is ugly but does prevent mainnet use for now.\n\n\tvar cointype, numadrs uint32\n\n\t// if no arguments given, generate 1 new address.\n\t// if no cointype given, assume type 1 (testnet)\n\n\tnumadrs = 1\n\n\treply := new(litrpc.AddressReply)\n\n\targs := new(litrpc.AddressArgs)\n\targs.CoinType = cointype\n\targs.NumToMake = numadrs\n\n\tfmt.Printf(\"adr cointye: %d num:%d\\n\", args.CoinType, args.NumToMake)\n\terr := lu.rpccon.Call(\"LitRPC.Address\", args, reply)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tresponse := reply.WitAddresses[0]\n\t//\tfmt.Fprintf(color.Output, \"new adr(s): %s\\nold: %s\\n\",\n\t//\t\tlnutil.Address(reply.WitAddresses), lnutil.Address(reply.LegacyAddresses))\n\treturn response, nil // reply.WitAddresses[]\n\n}", "func (s *SOC) Address() (swarm.Address, error) {\n\tif len(s.owner) != crypto.AddressSize {\n\t\treturn swarm.ZeroAddress, errInvalidAddress\n\t}\n\treturn CreateAddress(s.id, s.owner)\n}", "func (c *Client) Addresses(ctx context.Context, foreignID, currency string) (Address, error) {\n\treqBody := map[string]string{\n\t\t\"foreign_id\": foreignID,\n\t\t\"currency\": currency,\n\t}\n\n\treqJSON, err := json.Marshal(reqBody)\n\tif err != nil {\n\t\treturn Address{}, fmt.Errorf(\"request body marshaling error: %w\", err)\n\t}\n\n\taddressesURL, err := joinURL(c.api, addressesEndpoint)\n\tif err != nil {\n\t\treturn Address{}, fmt.Errorf(\"request url creating error: %w\", err)\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, addressesURL.String(), bytes.NewBuffer(reqJSON))\n\tif err != nil {\n\t\treturn Address{}, fmt.Errorf(\"request creating error: %w\", err)\n\t}\n\n\tsig, err := createHmac(c.secret, reqJSON)\n\tif err != nil {\n\t\treturn Address{}, fmt.Errorf(\"hmac signature creationg error: %w\", err)\n\t}\n\n\treq.Header.Set(contentTypeHeader, jsonContentType)\n\treq.Header.Set(keyHeader, c.apiKey)\n\treq.Header.Set(signatureHeader, sig)\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn Address{}, fmt.Errorf(\"request error: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\terr = ensureSuccessResponse(resp)\n\tif err != nil {\n\t\treturn Address{}, fmt.Errorf(\"request failed: %w\", err)\n\t}\n\n\trespBody := struct {\n\t\tData Address `json:\"data\"`\n\t}{}\n\n\terr = json.NewDecoder(resp.Body).Decode(&respBody)\n\tif err != nil {\n\t\treturn Address{}, fmt.Errorf(\"response unmarshaling error: %w\", err)\n\t}\n\n\treturn respBody.Data, nil\n}", "func createAddress(creator *Account) Word256 {\n\tnonce := creator.Nonce\n\tcreator.Nonce += 1\n\ttemp := make([]byte, 32+8)\n\tcopy(temp, creator.Address[:])\n\tPutUint64BE(temp[32:], nonce)\n\treturn LeftPadWord256(sha3.Sha3(temp)[:20])\n}", "func CreateAddress(id ID, owner []byte) (swarm.Address, error) {\n\tsum, err := hash(id, owner)\n\tif err != nil {\n\t\treturn swarm.ZeroAddress, err\n\t}\n\treturn swarm.NewAddress(sum), nil\n}", "func NewAddress() platformservices.Address {\n\n\tvar lat = 37.7917146\n\tvar lng = -122.397054\n\n\treturn platformservices.Address{\n\t\tAddressType: platformservices.AddressTypeLegal,\n\t\tStreetAddress: \"100 Main Street\",\n\t\tCity: \"San Francisco\",\n\t\tState: \"CA\",\n\t\tCountry: \"US\",\n\t\tPostalCode: \"94100\",\n\t\tLatitude: &lat,\n\t\tLongitude: &lng,\n\t}\n}", "func (c *CompaniesGetCall) Address(address string) *CompaniesGetCall {\n\tc.urlParams_.Set(\"address\", address)\n\treturn c\n}", "func NewAccountAddress(\n\taccountConfiguration *signing.Configuration,\n\tkeyPath signing.RelativeKeypath,\n\tnet *chaincfg.Params,\n\tlog *logrus.Entry,\n) *AccountAddress {\n\n\tvar address btcutil.Address\n\tvar redeemScript []byte\n\tconfiguration, err := accountConfiguration.Derive(keyPath)\n\tif err != nil {\n\t\tlog.WithError(err).Panic(\"Failed to derive the configuration.\")\n\t}\n\tlog = log.WithFields(logrus.Fields{\n\t\t\"key-path\": configuration.AbsoluteKeypath().Encode(),\n\t\t\"configuration\": configuration.String(),\n\t})\n\tlog.Debug(\"Creating new account address\")\n\n\tpublicKeyHash := btcutil.Hash160(configuration.PublicKey().SerializeCompressed())\n\tswitch configuration.ScriptType() {\n\tcase signing.ScriptTypeP2PKH:\n\t\taddress, err = btcutil.NewAddressPubKeyHash(publicKeyHash, net)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to get P2PKH addr. from public key hash.\")\n\t\t}\n\tcase signing.ScriptTypeP2WPKHP2SH:\n\t\tvar segwitAddress *btcutil.AddressWitnessPubKeyHash\n\t\tsegwitAddress, err = btcutil.NewAddressWitnessPubKeyHash(publicKeyHash, net)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to get p2wpkh-p2sh addr. from publ. key hash.\")\n\t\t}\n\t\tredeemScript, err = txscript.PayToAddrScript(segwitAddress)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to get redeem script for segwit address.\")\n\t\t}\n\t\taddress, err = btcutil.NewAddressScriptHash(redeemScript, net)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to get a P2SH address for segwit.\")\n\t\t}\n\tcase signing.ScriptTypeP2WPKH:\n\t\taddress, err = btcutil.NewAddressWitnessPubKeyHash(publicKeyHash, net)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to get p2wpkh addr. from publ. key hash.\")\n\t\t}\n\tcase signing.ScriptTypeP2TR:\n\t\toutputKey := txscript.ComputeTaprootKeyNoScript(configuration.PublicKey())\n\t\taddress, err = btcutil.NewAddressTaproot(schnorr.SerializePubKey(outputKey), net)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to get p2tr addr\")\n\t\t}\n\tdefault:\n\t\tlog.Panic(fmt.Sprintf(\"Unrecognized script type: %s\", configuration.ScriptType()))\n\t}\n\n\treturn &AccountAddress{\n\t\tAddress: address,\n\t\tAccountConfiguration: accountConfiguration,\n\t\tConfiguration: configuration,\n\t\tredeemScript: redeemScript,\n\t\tlog: log,\n\t}\n}", "func (gen *AddressGenerator) Address() Address {\n\treturn generateAddress(gen.chainID, gen.state)\n}", "func (_Token *TokenCaller) FactoryAddress(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _Token.contract.Call(opts, &out, \"factoryAddress\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func (mc *MoacChain) createChainAddress(tradePassword string) (addr string, err error) {\n\n\tdefer func() {\n\t\tif re := recover(); re != nil {\n\t\t\terr = re.(error)\n\t\t}\n\t}()\n\n\terr = rpcClient.Call(&addr, \"personal_newAccount\", tradePassword)\n\n\treturn addr, err\n}", "func CreateAddress(a *AddressDAL) (*AddressDAL, error) {\n\tresult := db.DB().Create(a)\n\tif result.Error != nil {\n\t\treturn nil, result.Error\n\t}\n\treturn a, nil\n}", "func (app *App) Address(addr string) *App {\n\tapp.Addr = addr\n\treturn app\n}", "func (a *Api) Address(address string) (*AddressResult, error) {\n\treturn a.AddressAtHeight(address, LatestBlockHeight)\n}", "func ExampleServiceClient_ValidateAddress() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armdatabox.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewServiceClient().ValidateAddress(ctx, \"westus\", armdatabox.ValidateAddress{\n\t\tValidationType: to.Ptr(armdatabox.ValidationInputDiscriminatorValidateAddress),\n\t\tDeviceType: to.Ptr(armdatabox.SKUNameDataBox),\n\t\tShippingAddress: &armdatabox.ShippingAddress{\n\t\t\tAddressType: to.Ptr(armdatabox.AddressTypeCommercial),\n\t\t\tCity: to.Ptr(\"XXXX XXXX\"),\n\t\t\tCompanyName: to.Ptr(\"XXXX XXXX\"),\n\t\t\tCountry: to.Ptr(\"XX\"),\n\t\t\tPostalCode: to.Ptr(\"00000\"),\n\t\t\tStateOrProvince: to.Ptr(\"XX\"),\n\t\t\tStreetAddress1: to.Ptr(\"XXXX XXXX\"),\n\t\t\tStreetAddress2: to.Ptr(\"XXXX XXXX\"),\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.AddressValidationOutput = armdatabox.AddressValidationOutput{\n\t// \tProperties: &armdatabox.AddressValidationProperties{\n\t// \t\tValidationType: to.Ptr(armdatabox.ValidationInputDiscriminatorValidateAddress),\n\t// \t\tAlternateAddresses: []*armdatabox.ShippingAddress{\n\t// \t\t\t{\n\t// \t\t\t\tAddressType: to.Ptr(armdatabox.AddressTypeNone),\n\t// \t\t\t\tCity: to.Ptr(\"XXXX XXXX\"),\n\t// \t\t\t\tCountry: to.Ptr(\"XX\"),\n\t// \t\t\t\tPostalCode: to.Ptr(\"00000\"),\n\t// \t\t\t\tStateOrProvince: to.Ptr(\"XX\"),\n\t// \t\t\t\tStreetAddress1: to.Ptr(\"XXXX XXXX\"),\n\t// \t\t\t\tStreetAddress2: to.Ptr(\"XXXX XXXX\"),\n\t// \t\t\t\tStreetAddress3: to.Ptr(\"\"),\n\t// \t\t}},\n\t// \t\tValidationStatus: to.Ptr(armdatabox.AddressValidationStatusValid),\n\t// \t},\n\t// }\n}", "func (ms MockSOC) Address() boson.Address {\n\taddr, _ := soc.CreateAddress(ms.ID, ms.Owner)\n\treturn addr\n}", "func (f *Factory) Address() string { return f.address }", "func Address(val string) Argument {\n\treturn func(request *requests.Request) error {\n\t\trequest.AddArgument(\"address\", val)\n\t\treturn nil\n\t}\n}", "func Address(ctx context.Context, name string) (*apis.URL, error) {\n\treturn k8s.Address(ctx, svc.GVR(), name)\n}", "func (a *Transactions) Address(ctx context.Context, address proto.WavesAddress, limit uint) ([]proto.Transaction, *Response, error) {\n\turl, err := joinUrl(a.options.BaseUrl, fmt.Sprintf(\"/transactions/address/%s/limit/%d\", address.String(), limit))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := http.NewRequest(\"GET\", url.String(), nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar out []TransactionsField\n\tresponse, err := doHttp(ctx, a.options, req, &out)\n\tif err != nil {\n\t\treturn nil, response, err\n\t}\n\tif len(out) == 0 {\n\t\treturn nil, response, nil\n\t}\n\treturn out[0], response, nil\n}", "func (e Exchange) DepositAddress(exch string, currencyCode currency.Code) (out string, err error) {\n\tif currencyCode.IsEmpty() {\n\t\terr = errors.New(\"currency code is empty\")\n\t\treturn\n\t}\n\treturn engine.Bot.DepositAddressManager.GetDepositAddressByExchange(exch, currencyCode)\n}", "func (c *CompaniesListCall) Address(address string) *CompaniesListCall {\n\tc.urlParams_.Set(\"address\", address)\n\treturn c\n}", "func (id *publicAddress) Address() Address {\n\tvar a Address\n\ta, _ = id.address()\n\treturn a\n}", "func NewAddress(address common.Address, backend bind.ContractBackend) (*Address, error) {\n\tcontract, err := bindAddress(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Address{AddressCaller: AddressCaller{contract: contract}, AddressTransactor: AddressTransactor{contract: contract}, AddressFilterer: AddressFilterer{contract: contract}}, nil\n}", "func NewAddress(address common.Address, backend bind.ContractBackend) (*Address, error) {\n\tcontract, err := bindAddress(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Address{AddressCaller: AddressCaller{contract: contract}, AddressTransactor: AddressTransactor{contract: contract}, AddressFilterer: AddressFilterer{contract: contract}}, nil\n}", "func NewAddress(address common.Address, backend bind.ContractBackend) (*Address, error) {\n\tcontract, err := bindAddress(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Address{AddressCaller: AddressCaller{contract: contract}, AddressTransactor: AddressTransactor{contract: contract}, AddressFilterer: AddressFilterer{contract: contract}}, nil\n}", "func NewAddress(address common.Address, backend bind.ContractBackend) (*Address, error) {\n\tcontract, err := bindAddress(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Address{AddressCaller: AddressCaller{contract: contract}, AddressTransactor: AddressTransactor{contract: contract}, AddressFilterer: AddressFilterer{contract: contract}}, nil\n}", "func NewAddress(address common.Address, backend bind.ContractBackend) (*Address, error) {\n\tcontract, err := bindAddress(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Address{AddressCaller: AddressCaller{contract: contract}, AddressTransactor: AddressTransactor{contract: contract}, AddressFilterer: AddressFilterer{contract: contract}}, nil\n}", "func makeAddress(keyPair *keypair.KeyPair, testnet bool) *account.Account {\n\n\treturn &account.Account{\n\t\tAccountInterface: &account.ED25519Account{\n\t\t\tTest: testnet,\n\t\t\tPublicKey: keyPair.PublicKey[:],\n\t\t},\n\t}\n}", "func CreateAddress(address string) string {\n\tnumeric := []string{\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"}\n\talpha := []string{\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\"}\n\n\tindexZero := RandomIndex(0, 10)\n\tindexOne := RandomIndex(0, 10)\n\tindexTwo := RandomIndex(0, 10)\n\n\treturn fmt.Sprintf(\"%s %s%s%s\", address, alpha[indexZero], numeric[indexOne], alpha[indexTwo])\n}", "func ExampleSnowball_CreateAddress_shared00() {\n\tsvc := snowball.New(session.New())\n\tinput := &snowball.CreateAddressInput{\n\t\tAddress: &snowball.Address{\n\t\t\tCity: aws.String(\"Seattle\"),\n\t\t\tCompany: aws.String(\"My Company's Name\"),\n\t\t\tCountry: aws.String(\"USA\"),\n\t\t\tName: aws.String(\"My Name\"),\n\t\t\tPhoneNumber: aws.String(\"425-555-5555\"),\n\t\t\tPostalCode: aws.String(\"98101\"),\n\t\t\tStateOrProvince: aws.String(\"WA\"),\n\t\t\tStreet1: aws.String(\"123 Main Street\"),\n\t\t},\n\t}\n\n\tresult, err := svc.CreateAddress(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase snowball.ErrCodeInvalidAddressException:\n\t\t\t\tfmt.Println(snowball.ErrCodeInvalidAddressException, aerr.Error())\n\t\t\tcase snowball.ErrCodeUnsupportedAddressException:\n\t\t\t\tfmt.Println(snowball.ErrCodeUnsupportedAddressException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (ar AccountRef) Address() sdk.AccAddress {\n\treturn (sdk.AccAddress)(ar)\n}", "func (n *Node) CreateAddress() *Node {\n\tkeystore := n.client.KeystoreAPI()\n\tif _, err := keystore.CreateUser(n.UserPass); err != nil {\n\t\tpanic(stacktrace.Propagate(err, \"Could not create user for node.\"))\n\t}\n\n\txAddress, err := n.client.XChainAPI().CreateAddress(n.UserPass)\n\tif err != nil {\n\t\tpanic(stacktrace.Propagate(err, \"Could not create user address in the XChainAPI.\"))\n\t}\n\tn.XAddress = xAddress\n\n\tpAddress, err := n.client.PChainAPI().CreateAddress(n.UserPass)\n\tif err != nil {\n\t\tpanic(stacktrace.Propagate(err, \"Could not create user address in the PChainAPI.\"))\n\t}\n\tn.PAddress = pAddress\n\treturn n\n}", "func (addressManager *AddressManager) Address(addressIndex uint64) address.Address {\n\t// update lastUnspentAddressIndex if necessary\n\taddressManager.spentAddressIndexes(addressIndex)\n\n\treturn addressManager.seed.Address(addressIndex)\n}", "func CreateAddressPool(ctx iris.Context) {\n\turi := ctx.Request().RequestURI\n\tfabricID := ctx.Params().Get(\"id\")\n\tif _, err := capmodel.GetFabric(fabricID); err != nil {\n\t\terrMsg := fmt.Sprintf(\"failed to fetch fabric data for uri %s: %s\", uri, err.Error())\n\t\tcreateDbErrResp(ctx, err, errMsg, []interface{}{\"Fabric\", fabricID})\n\t\treturn\n\t}\n\tvar addresspoolData model.AddressPool\n\terr := ctx.ReadJSON(&addresspoolData)\n\tif err != nil {\n\t\terrorMessage := \"error while trying to get JSON body from the request: \" + err.Error()\n\t\tlog.Error(errorMessage)\n\t\tresp := updateErrorResponse(response.MalformedJSON, errorMessage, nil)\n\t\tctx.StatusCode(http.StatusBadRequest)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\t// Todo :Add required validation for the request params\n\tmissingAttribute, err := validateAddressPoolRequest(addresspoolData)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\tresp := updateErrorResponse(response.PropertyMissing, err.Error(), []interface{}{missingAttribute})\n\t\tctx.StatusCode(http.StatusBadRequest)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\tif addresspoolData.Ethernet.IPv4.GatewayIPAddress != \"\" {\n\t\tif _, _, err := net.ParseCIDR(addresspoolData.Ethernet.IPv4.GatewayIPAddress); err != nil {\n\t\t\terrorMessage := \"Invalid value for GatewayIPAddress:\" + err.Error()\n\t\t\tlog.Errorf(errorMessage)\n\t\t\tresp := updateErrorResponse(response.PropertyValueFormatError, errorMessage, []interface{}{addresspoolData.Ethernet.IPv4.GatewayIPAddress, \"GatewayIPAddress\"})\n\t\t\tctx.StatusCode(http.StatusBadRequest)\n\t\t\tctx.JSON(resp)\n\t\t\treturn\n\n\t\t}\n\t\tif addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Lower != addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Upper {\n\t\t\terrorMessage := fmt.Sprintf(\"Requested VLANIdentifierAddressRange Lower %d is not equal to Upper %d\", addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Lower, addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Upper)\n\t\t\tlog.Error(errorMessage)\n\t\t\tresp := updateErrorResponse(response.PropertyUnknown, errorMessage, []interface{}{\"VLANIdentifierAddressRange\"})\n\t\t\tctx.StatusCode(http.StatusBadRequest)\n\t\t\tctx.JSON(resp)\n\t\t\treturn\n\t\t}\n\t\taddressPools, err := capmodel.GetAllAddressPools(fabricID)\n\t\tif err != nil {\n\t\t\terrMsg := fmt.Sprintf(\"failed to fetch AddressPool data for uri %s: %s\", uri, err.Error())\n\t\t\tcreateDbErrResp(ctx, err, errMsg, []interface{}{\"Fabric\", fabricID})\n\t\t\treturn\n\t\t}\n\t\tfor _, data := range addressPools {\n\t\t\tif data.Ethernet.IPv4.GatewayIPAddress == addresspoolData.Ethernet.IPv4.GatewayIPAddress {\n\t\t\t\terrorMessage := \"Requested GatewayIPAddress is already present in the addresspool \" + data.ODataID\n\t\t\t\tlog.Error(errorMessage)\n\t\t\t\tresp := updateErrorResponse(response.ResourceAlreadyExists, errorMessage, []interface{}{\"AddressPool\", \"GatewayIPAddress\", addresspoolData.Ethernet.IPv4.GatewayIPAddress})\n\t\t\t\tctx.StatusCode(http.StatusConflict)\n\t\t\t\tctx.JSON(resp)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tif addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Lower > addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Upper {\n\t\terrorMessage := fmt.Sprintf(\"Requested VLANIdentifierAddressRange Lower %d is greater than Upper %d\", addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Lower, addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Upper)\n\t\tlog.Error(errorMessage)\n\t\tresp := updateErrorResponse(response.PropertyUnknown, errorMessage, []interface{}{\"VLANIdentifierAddressRange\"})\n\t\tctx.StatusCode(http.StatusBadRequest)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\t// validate the VLANIdentifierAddressRange lower value\n\tresp, statusCode := validateVLANIdentifierAddressRange(addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Lower, addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Upper)\n\tif statusCode != http.StatusOK {\n\t\tctx.StatusCode(statusCode)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\n\taddressPoolID := uuid.NewV4().String()\n\taddresspoolData.ODataContext = \"/ODIM/v1/$metadata#AddressPool.AddressPool\"\n\taddresspoolData.ODataType = \"#AddressPool.v1_1_0.AddressPool\"\n\taddresspoolData.ODataID = fmt.Sprintf(\"%s/%s\", uri, addressPoolID)\n\taddresspoolData.ID = addressPoolID\n\n\tif err = capmodel.SaveAddressPool(fabricID, addresspoolData.ODataID, &addresspoolData); err != nil {\n\t\terrMsg := fmt.Sprintf(\"failed to save AddressPool data for uri %s: %s\", uri, err.Error())\n\t\tcreateDbErrResp(ctx, err, errMsg, []interface{}{\"Fabric\", fabricID})\n\t\treturn\n\t}\n\n\tcommon.SetResponseHeader(ctx, map[string]string{\n\t\t\"Location\": addresspoolData.ODataID,\n\t})\n\n\tctx.StatusCode(http.StatusCreated)\n\tctx.JSON(addresspoolData)\n}", "func (s *SkyСoinService) GenerateAddr(pubStr string) (maddr *AddressResponse, err error) {\n\tmaddr = &AddressResponse{}\n\n\tpubKey, err := cipher.PubKeyFromHex(pubStr)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taddress := cipher.AddressFromPubKey(pubKey)\n\n\tmaddr.Address = address.String()\n\treturn maddr, nil\n}", "func (mr *Repo) AddAddress(addr models.Address) (models.Address, error) {\n\tcollection := mr.client.Database(mr.dbInfo.DBName).Collection(mr.dbInfo.CollectionName)\n\tdbAddr := &dbAddress{}\n\tdbAddr.fromServiceAddress(addr)\n\tr, err := collection.InsertOne(mr.ctx, dbAddr)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn addr, err\n\t}\n\n\tid := r.InsertedID.(primitive.ObjectID).Hex()\n\taddr.ID = id\n\treturn addr, nil\n}", "func Address(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"address\", Attributes: attrs, Children: children}\n}", "func NewFromCashAddress(addr *cashaddress.Address) (*Address, error) {\n\tvar network = MainNet\n\tvar addrtype = P2SH\n\n\tswitch addr.Prefix {\n\tcase cashaddress.MainNet:\n\t\tnetwork = MainNet\n\tcase cashaddress.TestNet:\n\t\tnetwork = TestNet\n\tcase cashaddress.RegTest:\n\t\tnetwork = RegTest\n\tdefault:\n\t\treturn nil, errors.New(\"invalid address network\")\n\t}\n\n\tswitch addr.Version {\n\tcase cashaddress.P2KH:\n\t\taddrtype = P2KH\n\tcase cashaddress.P2SH:\n\t\taddrtype = P2SH\n\tdefault:\n\t\treturn nil, errors.New(\"invalid address type\")\n\t}\n\n\treturn &Address{\n\t\tNetwork: network,\n\t\tVersion: addrtype,\n\t\tPayload: addr.Payload,\n\t}, nil\n}", "func ImportAddress(c echo.Context) error {\n\tc.Logger().Print(\"executing ImportAddress handler\")\n\n\tvar req importAddressReq\n\tif err := json.NewDecoder(c.Request().Body).Decode(&req); err != nil {\n\t\treturn c.JSON(http.StatusBadRequest, genericResponse{\n\t\t\tError: \"missing addr field in request\",\n\t\t\tCode: ErrorInvalidRequest,\n\t\t})\n\t}\n\n\tclient, ok := c.Get(\"coin_client\").(transport.AddressImporter)\n\tif !ok {\n\t\treturn c.JSON(http.StatusBadRequest, genericResponse{\n\t\t\tError: fmt.Sprintf(\"client: %s does not have import address functionality\", c.Param(\"asset_id\")),\n\t\t\tCode: ErrorCodeCannotImport,\n\t\t})\n\t}\n\n\terr := client.ImportAddress(req.Addr)\n\tif err != nil {\n\t\tc.Logger().Errorf(\"error getting importing address: %s for coin: %s, err: %v\", req.Addr, c.Param(\"assetId\"), err)\n\t\treturn c.JSON(http.StatusBadRequest, genericResponse{\n\t\t\tError: \"could not import address\",\n\t\t\tCode: ErrorCodeCannotImport,\n\t\t})\n\t}\n\n\treturn c.JSON(http.StatusOK, successResponse)\n}", "func AddressGenerator(t *rapid.T) *rapid.Generator[sdk.AccAddress] {\n\treturn rapid.Custom(func(t *rapid.T) sdk.AccAddress {\n\t\tpkBz := rapid.SliceOfN(rapid.Byte(), 20, 20).Draw(t, \"hex\")\n\t\treturn sdk.AccAddress(pkBz)\n\t})\n}", "func (a *Account) NewAddress() (btcutil.Address, error) {\n\t// Get current block's height and hash.\n\trpcc, err := accessClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbs, err := rpcc.BlockStamp()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get next address from wallet.\n\taddr, err := a.KeyStore.NextChainedAddress(&bs, cfg.KeypoolSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Immediately write updated wallet to disk.\n\tAcctMgr.ds.ScheduleWalletWrite(a)\n\tif err := AcctMgr.ds.FlushAccount(a); err != nil {\n\t\treturn nil, fmt.Errorf(\"account write failed: %v\", err)\n\t}\n\n\t// Mark this new address as belonging to this account.\n\tAcctMgr.MarkAddressForAccount(addr, a)\n\n\t// Request updates from btcd for new transactions sent to this address.\n\tif err := rpcc.NotifyReceived([]btcutil.Address{addr}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn addr, nil\n}", "func (address *Address) CreateAddress(db *gorm.DB) (*Address, error) {\n\n\terr := db.Debug().Create(&address).Error\n\tif err != nil {\n\t\treturn &Address{}, err\n\t}\n\n\treturn address, nil\n}", "func (mt *EasypostAddress) Validate() (err error) {\n\tif mt.ID == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"id\"))\n\t}\n\tif mt.Object == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"object\"))\n\t}\n\n\tif ok := goa.ValidatePattern(`^adr_`, mt.ID); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.id`, mt.ID, `^adr_`))\n\t}\n\tif !(mt.Mode == \"test\" || mt.Mode == \"production\") {\n\t\terr = goa.MergeErrors(err, goa.InvalidEnumValueError(`response.mode`, mt.Mode, []interface{}{\"test\", \"production\"}))\n\t}\n\tif ok := goa.ValidatePattern(`^Address$`, mt.Object); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.object`, mt.Object, `^Address$`))\n\t}\n\treturn\n}", "func NewAddress(i, ii int) *Address {\n\tn := strconv.Itoa(i)\n\treturn &Address{\n\t\tId: time.Now().UnixNano(),\n\t\tStreet: \"10\" + n + \" Somewhere Lane\",\n\t\tCity: \"Awesome City \" + n,\n\t\tState: func() string {\n\t\t\tif i%2 == 0 {\n\t\t\t\treturn \"PA\"\n\t\t\t}\n\t\t\treturn \"CA\"\n\t\t}(),\n\t\tZip: ii,\n\t}\n}", "func (cc *CustomerCreate) AddAddress(c ...*CustomerAddress) *CustomerCreate {\n\tids := make([]int, len(c))\n\tfor i := range c {\n\t\tids[i] = c[i].ID\n\t}\n\treturn cc.AddAddresIDs(ids...)\n}", "func (b *Backend) NewAddress() wallet.Address {\n\taddr := Address{}\n\treturn &addr\n}", "func addAddresses(authRequest *sleet.AuthorizationRequest, request *checkout.PaymentRequest) {\n\tif authRequest.BillingAddress != nil && authRequest.Options[sleet.GooglePayTokenOption] == nil {\n\t\tbillingStreetNumber, billingStreetName := extractAdyenStreetFormat(common.SafeStr(authRequest.BillingAddress.StreetAddress1))\n\t\trequest.BillingAddress = &checkout.Address{\n\t\t\tCity: common.SafeStr(authRequest.BillingAddress.Locality),\n\t\t\tCountry: common.SafeStr(authRequest.BillingAddress.CountryCode),\n\t\t\tHouseNumberOrName: billingStreetNumber,\n\t\t\tPostalCode: common.SafeStr(authRequest.BillingAddress.PostalCode),\n\t\t\tStateOrProvince: common.SafeStr(authRequest.BillingAddress.RegionCode),\n\t\t\tStreet: billingStreetName,\n\t\t}\n\t}\n\tif authRequest.ShippingAddress != nil {\n\t\tshippingStreetNumber, shippingStreetName := extractAdyenStreetFormat(common.SafeStr(authRequest.ShippingAddress.StreetAddress1))\n\t\trequest.DeliveryAddress = &checkout.Address{\n\t\t\tCity: common.SafeStr(authRequest.ShippingAddress.Locality),\n\t\t\tCountry: common.SafeStr(authRequest.ShippingAddress.CountryCode),\n\t\t\tHouseNumberOrName: shippingStreetNumber,\n\t\t\tPostalCode: common.SafeStr(authRequest.ShippingAddress.PostalCode),\n\t\t\tStateOrProvince: common.SafeStr(authRequest.ShippingAddress.RegionCode),\n\t\t\tStreet: shippingStreetName,\n\t\t}\n\t}\n}", "func (c *Constructor) newAddress(ctx context.Context) (string, error) {\n\tkp, err := keys.GenerateKeypair(c.curveType)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w unable to generate keypair\", err)\n\t}\n\n\taddress, _, err := c.helper.Derive(\n\t\tctx,\n\t\tc.network,\n\t\tkp.PublicKey,\n\t\tnil,\n\t)\n\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: unable to derive address\", err)\n\t}\n\n\terr = c.helper.StoreKey(ctx, address, kp)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: unable to store address\", err)\n\t}\n\n\tif err := c.handler.AddressCreated(ctx, address); err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: could not handle address creation\", err)\n\t}\n\n\treturn address, nil\n}", "func AddAddress(c router.Context) (interface{}, error) {\n\t// get the data from the request and parse it as structure\n\tdata := c.Param(`data`).(Address)\n\n\t// Validate the inputed data\n\terr := data.Validate()\n\tif err != nil {\n\t\tif _, ok := err.(validation.InternalError); ok {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, status.ErrStatusUnprocessableEntity.WithValidationError(err.(validation.Errors))\n\t}\n\n\t// check if address already exists or not\n\tqueryString := fmt.Sprintf(\"{\\\"selector\\\": {\\\"user_addresses\\\": {\\\"$elemMatch\\\": {\\\"value\\\": \\\"%s\\\"}},\\\"doc_type\\\":\\\"%s\\\"}}\", data.Value, utils.DocTypeUser)\n\tuserResult, _, err := utils.Get(c, queryString, fmt.Sprintf(\"User already exists with the given address %s!\", data.Value))\n\n\tif userResult != nil {\n\t\treturn nil, status.ErrBadRequest.WithMessage(fmt.Sprintf(\"This address %s already exists in the system!\", data.Value))\n\t}\n\n\t// check if address already exists or not\n\tlabelQueryString := fmt.Sprintf(\"{\\\"selector\\\": {\\\"user_addresses\\\": {\\\"$elemMatch\\\": {\\\"label\\\": \\\"%s\\\"}},\\\"_id\\\":{\\\"$ne\\\":\\\"%s\\\"},\\\"doc_type\\\":\\\"%s\\\"}}\", data.Label, data.UserID, utils.DocTypeUser)\n\tlabelResult, _, err := utils.Get(c, labelQueryString, fmt.Sprintf(\"User already exists with the given label %s!\", data.Label))\n\n\tif labelResult != nil {\n\t\treturn nil, status.ErrBadRequest.WithMessage(fmt.Sprintf(\"This label %s has already been taken!\", data.Label))\n\t}\n\n\taddress1 := Address{UserID: data.UserID, Label: data.Label, Value: data.Value}\n\tstub := c.Stub()\n\tuserAsBytes, _ := stub.GetState(data.UserID)\n\tuser := User{}\n\n\terr = json.Unmarshal(userAsBytes, &user)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tuser.UserAddresses = append(user.UserAddresses, address1)\n\t// prepare the response body\n\tresponseBody := UserResponse{ID: data.UserID, Address: user.Address, WalletBalance: user.WalletBalance, Symbol: user.Symbol, CreatedAt: user.CreatedAt, UserAddresses: user.UserAddresses, Identity: user.Identity}\n\t// Save the data and return the response\n\treturn responseBody, c.State().Put(data.UserID, user)\n}", "func CreateQueryCustomerAddressListRequest() (request *QueryCustomerAddressListRequest) {\nrequest = &QueryCustomerAddressListRequest{\nRpcRequest: &requests.RpcRequest{},\n}\nrequest.InitWithApiInfo(\"BssOpenApi\", \"2017-12-14\", \"QueryCustomerAddressList\", \"\", \"\")\nreturn\n}", "func ExampleAddressClient_Validate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclient, err := armbilling.NewAddressClient(cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := client.Validate(ctx,\n\t\tarmbilling.AddressDetails{\n\t\t\tAddressLine1: to.Ptr(\"1 Test\"),\n\t\t\tCity: to.Ptr(\"bellevue\"),\n\t\t\tCountry: to.Ptr(\"us\"),\n\t\t\tPostalCode: to.Ptr(\"12345\"),\n\t\t\tRegion: to.Ptr(\"wa\"),\n\t\t},\n\t\tnil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// TODO: use response item\n\t_ = res\n}", "func (h *HitBTC) GenerateNewAddress(ctx context.Context, currency string) (DepositCryptoAddresses, error) {\n\tresp := DepositCryptoAddresses{}\n\terr := h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost,\n\t\tapiV2CryptoAddress+\"/\"+currency,\n\t\turl.Values{},\n\t\totherRequests,\n\t\t&resp)\n\n\treturn resp, err\n}", "func ServiceAddress(chain ChainID) Address {\n\treturn generateAddress(chain, serviceAddressState)\n}", "func (c *Contract) Address() sdk.AccAddress {\n\treturn c.self.Address()\n}", "func NewAddress(street string) *Address {\n // Just return a dummy for STUB\n return &Address{}\n}", "func NewAddress(pk ctypes.PubKey) *Address {\n\ta := Address{pk}\n\treturn &a\n}", "func (*CreateAddressRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_vpc_v1_address_service_proto_rawDescGZIP(), []int{4}\n}", "func (mc *MoacChain) addContractAddress(contractAddr string) (err error) {\n\n\terr = mc.getContractInfo(contractAddr)\n\tif err == nil {\n\t\tvar cm database.CurrencyManagement\n\t\tflag2, err2 := database.Engine.Where(\"currency_contract_address = ?\", contractAddr).Get(&cm)\n\t\tif err2 == nil {\n\t\t\tif flag2 {\n\t\t\t\tvar countId int64 = 1\n\t\t\t\tvar counter int = 0\n\t\t\t\tfor {\n\t\t\t\t\tflag, err1 := database.Engine.Where(\"id = ?\", countId).Exist(&database.UserInfo{})\n\t\t\t\t\tif err1 == nil {\n\t\t\t\t\t\tif flag {\n\t\t\t\t\t\t\tcounter = 0\n\n\t\t\t\t\t\t\tvar ua database.UserAssets\n\t\t\t\t\t\t\tua.UserId = countId\n\t\t\t\t\t\t\tua.CurrencyNumber = 0\n\t\t\t\t\t\t\tua.CurrencyId = cm.CurrencyId\n\t\t\t\t\t\t\terr = database.SessionSubmit(func(session *xorm.Session) (err1 error) {\n\t\t\t\t\t\t\t\t_, err1 = session.Insert(ua)\n\t\t\t\t\t\t\t\treturn err1\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tcounter++\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif counter == 11 {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcountId++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}", "func TestGenerateAddress(t *testing.T) {\n\tif _, err := GenerateAddress(); err != nil {\n\t\tt.Fatalf(\"Failed to generate new address: %v\", err)\n\t}\n}", "func (t *SimpleChaincode) modifyAddress(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tvar dni, address string // Entities\n\tvar err error\n\n\tif len(args) != 2 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 3\")\n\t}\n\n\tdni = args[0]\n\taddress = args[1]\n\n\tvar personObject Person;\n\tpersonStored, err := stub.GetState(dni)\n\tif err != nil {\n\t\treturn shim.Error(\"Failed to get state\")\n\t}\n\terr = json.Unmarshal(personStored, &personObject)\n\t\n\tfmt.Println(\"Modifying person DNI \" + dni + \" with Name \" + personObject.Name)\n\tpersonObject.Address = address\n\n\tdata, err := json.Marshal(personObject)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\terr = stub.PutState(dni, data)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\treturn shim.Success(nil)\n}", "func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {\n\tb, err := opts.ToAddressScopeCreateMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\tresp, err := client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{\n\t\tOkCodes: []int{201},\n\t})\n\t_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)\n\treturn\n}", "func (*uploadDownloadContractor) Address() modules.NetAddress { return \"\" }", "func bindAddress(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := ParsedABI(K_Address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil\n}", "func (addressManager *AddressManager) NewAddress() address.Address {\n\treturn addressManager.Address(addressManager.lastAddressIndex + 1)\n}", "func Address(value string) *SimpleElement { return newSEString(\"address\", value) }", "func CreateAddrEntry(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tvar address Address\n\terr := json.NewDecoder(r.Body).Decode(&address)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\t_, ok := GetEntry(address.FirstName, address.LastName)\n\tif ok {\n\t\thttp.Error(w, fmt.Sprintf(\"Duplicate entry for firstName: %s, lastName: %s\", address.FirstName, address.LastName), 400)\n\t\treturn\n\t}\n\tUpdateEntry(address)\n\tjson.NewEncoder(w).Encode(address)\n}", "func (w Wallet) Address() []byte {\n\tripemd160 := PublicKeyHash(w.PublicKey)\n\n\tversionedRimpemd160 := append([]byte{version}, ripemd160...)\n\tchecksum := CheckSumSlice(versionedRimpemd160)\n\n\tfullHash := append(versionedRimpemd160, checksum...)\n\taddress := Base58Encode(fullHash)\n\n\treturn address\n}", "func (w *Wallet) NewAddress(account uint32,\n\tscope waddrmgr.KeyScope) (btcutil.Address, er.R) {\n\n\tchainClient, err := w.requireChainClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\taddr btcutil.Address\n\t\tprops *waddrmgr.AccountProperties\n\t)\n\terr = walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) er.R {\n\t\taddrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey)\n\t\tvar err er.R\n\t\taddr, props, err = w.newAddress(addrmgrNs, account, scope)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Notify the rpc server about the newly created address.\n\terr = chainClient.NotifyReceived([]btcutil.Address{addr})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tw.NtfnServer.notifyAccountProperties(props)\n\n\treturn addr, nil\n}", "func bindAddress(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(AddressABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindAddress(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(AddressABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindAddress(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(AddressABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindAddress(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(AddressABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func (_BaseContentSpace *BaseContentSpaceCaller) Factory(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _BaseContentSpace.contract.Call(opts, &out, \"factory\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func (w *Wallet) NewAddress(s *aklib.DBConfig, pwd []byte, isPublic bool) (*address.Address, error) {\n\tadrmap := w.AddressPublic\n\tvar idx uint32\n\tif !isPublic {\n\t\tadrmap = w.AddressChange\n\t\tidx = 1\n\t}\n\tmaster, err := address.DecryptSeed(w.EncSeed, pwd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tseed := address.HDseed(master, idx, uint32(len(adrmap)))\n\ta, err := address.New(s.Config, seed)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tadr := &Address{\n\t\tAddress: a,\n\t\tAdrstr: a.Address58(s.Config),\n\t\tNo: len(adrmap),\n\t}\n\tif err := w.PutAddress(s, pwd, adr, true); err != nil {\n\t\treturn nil, err\n\t}\n\tadrmap[a.Address58(s.Config)] = struct{}{}\n\treturn a, w.put(s)\n}", "func (addr *Address) CashAddress() (*cashaddress.Address, error) {\n\tvar network = cashaddress.MainNet\n\tvar addrtype = cashaddress.P2SH\n\n\tswitch addr.Network {\n\tcase MainNet:\n\t\tnetwork = cashaddress.MainNet\n\tcase TestNet:\n\t\tnetwork = cashaddress.TestNet\n\tcase RegTest:\n\t\tnetwork = cashaddress.RegTest\n\tdefault:\n\t\treturn nil, errors.New(\"invalid address network\")\n\t}\n\n\tswitch addr.Version {\n\tcase P2KH:\n\t\taddrtype = cashaddress.P2KH\n\tcase P2SH:\n\t\taddrtype = cashaddress.P2SH\n\tdefault:\n\t\treturn nil, errors.New(\"invalid address type\")\n\t}\n\treturn &cashaddress.Address{\n\t\tVersion: addrtype,\n\t\tPrefix: network,\n\t\tPayload: addr.Payload,\n\t}, nil\n}", "func (as *ASService) InputAddress(req *requests.AddrRequest) {\n\tas.AddrRequest(context.TODO(), req)\n}", "func (p *Poloniex) GenerateNewAddress(ctx context.Context, curr string) (string, error) {\n\ttype Response struct {\n\t\tSuccess int\n\t\tError string\n\t\tResponse string\n\t}\n\tresp := Response{}\n\tvalues := url.Values{}\n\tvalues.Set(\"currency\", curr)\n\n\terr := p.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, poloniexGenerateNewAddress, values, &resp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.Error != \"\" {\n\t\treturn \"\", errors.New(resp.Error)\n\t}\n\n\treturn resp.Response, nil\n}", "func (a *Account) NewAddress() (btcutil.Address, error) {\n\t// Get current block's height and hash.\n\tbs, err := GetCurBlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get next address from wallet.\n\taddr, err := a.Wallet.NextChainedAddress(&bs, cfg.KeypoolSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Immediately write updated wallet to disk.\n\tAcctMgr.ds.ScheduleWalletWrite(a)\n\tif err := AcctMgr.ds.FlushAccount(a); err != nil {\n\t\treturn nil, fmt.Errorf(\"account write failed: %v\", err)\n\t}\n\n\t// Mark this new address as belonging to this account.\n\tAcctMgr.MarkAddressForAccount(addr, a)\n\n\t// Request updates from btcd for new transactions sent to this address.\n\ta.ReqNewTxsForAddress(addr)\n\n\treturn addr, nil\n}", "func Address(a string) Option {\n\treturn func(o *Options) {\n\t\to.Address = a\n\t}\n}", "func (as *ApiService) DepositAddresses(currency string) (*ApiResponse, error) {\n\treq := NewRequest(http.MethodGet, \"/api/v1/deposit-addresses\", map[string]string{\"currency\": currency})\n\treturn as.Call(req)\n}", "func addresses(w http.ResponseWriter, r *http.Request) {\n\n\tuser := getParam(r, USER_ID)\n\tcode := getUserCode(r)\n\n\tresource := \"/users/\" + user + \"/addresses\"\n\tredirectURL := HOST + \"/\" + user + \"/users/addresses\"\n\n\tclient, err := sdk.Meli(CLIENT_ID, code, CLIENT_SECRET, redirectURL)\n\n\tif err != nil {\n\t\tlog.Printf(\"Error: \", err.Error())\n\t\treturn\n\t}\n\n\tvar response *http.Response\n\tif response, err = client.Get(resource); err != nil {\n\t\tlog.Printf(\"Error: \", err.Error())\n\t\treturn\n\t}\n\n\t/*Example\n\t If the API to be called needs authorization/authentication (private api), then the authentication URL needs to be generated.\n\t Once you generate the URL and call it, you will be redirected to a ML login page where your credentials will be asked. Then, after\n\t entering your credentials you will obtain a CODE which will be used to get all the authorization tokens.\n\t*/\n\tif response.StatusCode == http.StatusForbidden {\n\t\turl := sdk.GetAuthURL(CLIENT_ID, sdk.AUTH_URL_MLA, redirectURL)\n\t\tbody, _ := ioutil.ReadAll(response.Body)\n\t\tlog.Printf(\"Returning Authentication URL:%s\\n\", url)\n\t\tlog.Printf(\"Error:%s\", body)\n\t\thttp.Redirect(w, r, url, 301)\n\t}\n\n\tprintOutput(w, response)\n}", "func (f *Factory) WithAddress(address string) *Factory {\n\tf.address = address\n\treturn f\n}", "func NewAddress(path, addr string, net Network, change, addrIndex uint32) *Address {\n\treturn &Address{path: path, addr: addr, net: net, change: change, addrIndex: addrIndex}\n}", "func MakeAddress(accman *wallet.Manager, account string) (wallet.Account, error) {\n\t// If the specified account is a valid address, return it\n\tif helper.IsHexAddress(account) {\n\t\treturn wallet.Account{Address: helper.HexToAddress(account)}, nil\n\t}\n\t// Otherwise try to interpret the account as a keystore index\n\tindex, err := strconv.Atoi(account)\n\tif err != nil {\n\t\treturn wallet.Account{}, fmt.Errorf(\"invalid account address or index %q\", account)\n\t}\n\treturn accman.AccountByIndex(index)\n}", "func DeployAddress(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Address, error) {\n\tparsed, err := ParsedABI(K_Address)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\n\tif parsed == nil {\n\t\treturn common.Address{}, nil, nil, errors.New(\"GetABI returned nil\")\n\t}\n\n\taddress, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(AddressBin), backend)\n\tif err != nil {\n\t\treturn common.Address{}, nil, nil, err\n\t}\n\treturn address, tx, &Address{AddressCaller: AddressCaller{contract: contract}, AddressTransactor: AddressTransactor{contract: contract}, AddressFilterer: AddressFilterer{contract: contract}}, nil\n}" ]
[ "0.78183013", "0.7629378", "0.679397", "0.67914236", "0.66517043", "0.6582681", "0.64559764", "0.6390346", "0.6336294", "0.62283635", "0.62227005", "0.62106955", "0.62054926", "0.61936045", "0.6162253", "0.61483634", "0.61028504", "0.60747445", "0.6063984", "0.60360086", "0.6034897", "0.59988743", "0.5972265", "0.5971625", "0.5969762", "0.5963498", "0.59420764", "0.5926836", "0.5921498", "0.5915004", "0.58919835", "0.58894706", "0.5879588", "0.5872201", "0.583039", "0.58209383", "0.58180803", "0.5817585", "0.5817585", "0.5817585", "0.5817585", "0.5817585", "0.578661", "0.57581955", "0.5751077", "0.5750759", "0.5738778", "0.57359755", "0.5731498", "0.57213426", "0.5706656", "0.56942296", "0.5688643", "0.568476", "0.56828344", "0.56823474", "0.56692034", "0.56688535", "0.5654771", "0.56498075", "0.5638252", "0.56363094", "0.5611032", "0.5608034", "0.5602743", "0.5602377", "0.56013685", "0.55996203", "0.5592615", "0.5580301", "0.55709004", "0.55705786", "0.55693954", "0.55660844", "0.55576867", "0.5555394", "0.55448264", "0.55356944", "0.5529067", "0.55260164", "0.5511972", "0.5510217", "0.55089986", "0.5504246", "0.5504246", "0.5504246", "0.5504246", "0.55029464", "0.5497578", "0.549484", "0.54938716", "0.54853654", "0.54798526", "0.547885", "0.54680526", "0.54543644", "0.54409766", "0.54384214", "0.54345256", "0.5433521" ]
0.79032356
0
Create will create a new address for a specific asset and return the newly created address
func (as *AddressService) Create(name string) (*Address, error) { if isEmptyStr(as.assetCode) { return nil, errAssetCode } var ( addresses []*Address body struct { Address struct { Name string `json:"name"` } `json:"address"` } ) body.Address.Name = name if err := as.client.Post(buildString("address/deposit/", as.assetCode), &body, &addresses); err != nil { return nil, err } return addresses[0], nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (handler *Handler) CreateAddress(ctx context.Context, in *candyland_grpc.CreateAddressRequest) (*candyland_grpc.CreateAddressReply, error) {\n\terr := handler.CreateAddressUsecase.Create(in.UserId, in.CountryId, in.StateId, in.CityId, in.StreetId, in.Number, in.Complement.String())\n\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn nil, err\n\t}\n\n\tres := &candyland_grpc.CreateAddressReply{\n\t\tWasCreated: true,\n\t}\n\n\treturn res, nil\n}", "func CreateAddress() *addresspb.Address {\n\ta := addresspb.Address{\n\t\tCorrespondanceAddr: &addresspb.Location{\n\t\t\tLocation: \"loc 1\",\n\t\t\tCity: &addresspb.City{\n\t\t\t\tName: \"Mumbai\",\n\t\t\t\tZipCode: \"400005\",\n\t\t\t\tRegion: addresspb.Division_WEST,\n\t\t\t},\n\t\t},\n\n\t\tAdditionalAddr: []*addresspb.Location{\n\t\t\t{\n\t\t\t\tLocation: \"loc 2\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Srinagar\",\n\t\t\t\t\tZipCode: \"190001\",\n\t\t\t\t\tRegion: addresspb.Division_NORTH,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tLocation: \"loc 3\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Imphal\",\n\t\t\t\t\tZipCode: \"795001\",\n\t\t\t\t\tRegion: addresspb.Division_EAST,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tLocation: \"loc 4\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Mysore\",\n\t\t\t\t\tZipCode: \"570001\",\n\t\t\t\t\tRegion: addresspb.Division_SOUTH,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &a\n}", "func CreateAddress(address models.RequestAddress) (bool, models.ResponseAddress, error) {\n\n\t//Create request\n\trequest := models.Request{}\n\trequest.AddBody(address)\n\trequest.SetUri(\"https://api.easypost.com/v2/addresses\")\n\trequest.SetMethod(\"POST\")\n\n\t//Send request\n\tresponseBody, err := SendRequest(request)\n\n\t//Initialize response address\n\tresponseAddress := models.ResponseAddress{}\n\n\terr = json.Unmarshal(responseBody, &responseAddress)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn false, responseAddress, fmt.Errorf(\"Unrecognized response from easypost %s\", err.Error())\n\t}\n\n\treturn responseAddress.Verifications.Delivery.Success, responseAddress, err\n}", "func (as *ApiService) CreateDepositAddress(currency string) (*ApiResponse, error) {\n\treq := NewRequest(http.MethodPost, \"/api/v1/deposit-addresses\", map[string]string{\"currency\": currency})\n\treturn as.Call(req)\n}", "func (id *Public) CreateAddress(version, stream uint64) {\n\tid.Address.Version = version\n\tid.Address.Stream = stream\n\tcopy(id.Address.Ripe[:], id.hash())\n}", "func (as *AddressbookService) CreateAddress(addr models.Address) (models.Address, error) {\n\treturn as.repo.AddAddress(addr)\n}", "func CreateAddress(id ID, owner []byte) (swarm.Address, error) {\n\tsum, err := hash(id, owner)\n\tif err != nil {\n\t\treturn swarm.ZeroAddress, err\n\t}\n\treturn swarm.NewAddress(sum), nil\n}", "func (c *Client) Address(assetCode ...string) *AddressService {\n\tif len(assetCode) == 0 {\n\t\tassetCode[0] = \"\"\n\t}\n\n\treturn &AddressService{service{c}, assetCode[0]}\n}", "func CreateAddress(amount int) (string, []Wallet) {\n\n\twallets := []Wallet{}\n\tfor i := 0; i < amount; i++ {\n\t\twif, _ := network.CreatePrivateKey()\n\t\taddress, _ := network.GetAddress(wif)\n\t\tvar wallet = Wallet{ADDRESS: address.EncodeAddress(), PRIVKEY: wif.String()}\n\t\twallets = append(wallets, wallet)\n\t}\n\n\tjson := ConvertToJSON(&wallets)\n\n\tlog.Println(\"Generated\", amount, \"addresses\")\n\n\treturn json, wallets\n\n}", "func createAddress(creator *Account) Word256 {\n\tnonce := creator.Nonce\n\tcreator.Nonce += 1\n\ttemp := make([]byte, 32+8)\n\tcopy(temp, creator.Address[:])\n\tPutUint64BE(temp[32:], nonce)\n\treturn LeftPadWord256(sha3.Sha3(temp)[:20])\n}", "func CreateAddrEntry(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tvar address Address\n\terr := json.NewDecoder(r.Body).Decode(&address)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\t_, ok := GetEntry(address.FirstName, address.LastName)\n\tif ok {\n\t\thttp.Error(w, fmt.Sprintf(\"Duplicate entry for firstName: %s, lastName: %s\", address.FirstName, address.LastName), 400)\n\t\treturn\n\t}\n\tUpdateEntry(address)\n\tjson.NewEncoder(w).Encode(address)\n}", "func (service AccountsService) Create(a Account) (*Response, Account, error) {\n\treq, err := service.client.newRequest(\"POST\", \"accounts\", nil, a)\n\tif err != nil {\n\t\treturn nil, Account{}, err\n\t}\n\n\tvar dest Account\n\tres, err := service.client.do(req, &dest)\n\n\tdest.BillingInfo = nil\n\n\treturn res, dest, err\n}", "func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {\n\tb, err := opts.ToAddressScopeCreateMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\tresp, err := client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{\n\t\tOkCodes: []int{201},\n\t})\n\t_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)\n\treturn\n}", "func CreateAddress(b types.Address, nonce *big.Int) types.Address {\n\tdata, _ := rlp.EncodeToBytes([]interface{}{b, nonce})\n\treturn types.BytesToAddress(keccak.Keccak256(data)[12:])\n}", "func CreateAddress(a *AddressDAL) (*AddressDAL, error) {\n\tresult := db.DB().Create(a)\n\tif result.Error != nil {\n\t\treturn nil, result.Error\n\t}\n\treturn a, nil\n}", "func (address *Address) CreateAddress(db *gorm.DB) (*Address, error) {\n\n\terr := db.Debug().Create(&address).Error\n\tif err != nil {\n\t\treturn &Address{}, err\n\t}\n\n\treturn address, nil\n}", "func (s *SmartContract) CreateAsset(ctx contractapi.TransactionContextInterface, id string, color string, size int, appraisedValue int) error {\n\n\t// Demonstrate the use of Attribute-Based Access Control (ABAC) by checking\n\t// to see if the caller has the \"abac.creator\" attribute with a value of true;\n\t// if not, return an error.\n\n\terr := ctx.GetClientIdentity().AssertAttributeValue(\"abac.creator\", \"true\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"submitting client not authorized to create asset, does not have abac.creator role\")\n\t}\n\n\texists, err := s.AssetExists(ctx, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif exists {\n\t\treturn fmt.Errorf(\"the asset %s already exists\", id)\n\t}\n\n\t// Get ID of submitting client identity\n\tclientID, err := s.GetSubmittingClientIdentity(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tasset := Asset{\n\t\tID: id,\n\t\tColor: color,\n\t\tSize: size,\n\t\tOwner: clientID,\n\t\tAppraisedValue: appraisedValue,\n\t}\n\tassetJSON, err := json.Marshal(asset)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ctx.GetStub().PutState(id, assetJSON)\n}", "func CreateAddress(b common.Address, nonce uint64) common.Address {\n\tdata, _ := rlp.EncodeToBytes([]interface{}{b, nonce})\n\taddr := common.BytesToAddress(Keccak512(data)[:])\n\treturn common.DarmaAddressToContractAddress(addr)\n}", "func (mc *MoacChain) createChainAddress(tradePassword string) (addr string, err error) {\n\n\tdefer func() {\n\t\tif re := recover(); re != nil {\n\t\t\terr = re.(error)\n\t\t}\n\t}()\n\n\terr = rpcClient.Call(&addr, \"personal_newAccount\", tradePassword)\n\n\treturn addr, err\n}", "func (c *Constructor) newAddress(ctx context.Context) (string, error) {\n\tkp, err := keys.GenerateKeypair(c.curveType)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w unable to generate keypair\", err)\n\t}\n\n\taddress, _, err := c.helper.Derive(\n\t\tctx,\n\t\tc.network,\n\t\tkp.PublicKey,\n\t\tnil,\n\t)\n\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: unable to derive address\", err)\n\t}\n\n\terr = c.helper.StoreKey(ctx, address, kp)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: unable to store address\", err)\n\t}\n\n\tif err := c.handler.AddressCreated(ctx, address); err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: could not handle address creation\", err)\n\t}\n\n\treturn address, nil\n}", "func (a *AccountClient) Create(accountParams AccountParams) (*Resource, error) {\n\n\tresp, err := a.client.R().\n\t\tSetResult(&Resource{}).\n\t\tSetBody(map[string]AccountParams{\"data\": accountParams}).\n\t\tPost(\"/v1/organisation/accounts\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create account failed: %s\", err)\n\t}\n\n\tif resp.Error() != nil {\n\t\treturn nil, getAPIError(resp)\n\t}\n\n\treturn resp.Result().(*Resource), nil\n}", "func (e *ExternalAssetTypeService) Create(name string, externalAssetType *ExternalAssetType) (*ExternalAssetType, *Response, error) {\n\tif externalAssetType == nil {\n\t\texternalAssetType = &ExternalAssetType{}\n\t}\n\texternalAssetType.Name = name\n\n\tendpoint := \"/assets/external/type\"\n\tresp, err := e.client.postRequestDecode(endpoint, externalAssetType)\n\treturn externalAssetType, resp, err\n}", "func CreateAddress(address string) string {\n\tnumeric := []string{\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"}\n\talpha := []string{\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\"}\n\n\tindexZero := RandomIndex(0, 10)\n\tindexOne := RandomIndex(0, 10)\n\tindexTwo := RandomIndex(0, 10)\n\n\treturn fmt.Sprintf(\"%s %s%s%s\", address, alpha[indexZero], numeric[indexOne], alpha[indexTwo])\n}", "func (lu *litUiClient) NewAddress() (string, error) {\n\n\t// cointype of 0 means default, not mainnet.\n\t// this is ugly but does prevent mainnet use for now.\n\n\tvar cointype, numadrs uint32\n\n\t// if no arguments given, generate 1 new address.\n\t// if no cointype given, assume type 1 (testnet)\n\n\tnumadrs = 1\n\n\treply := new(litrpc.AddressReply)\n\n\targs := new(litrpc.AddressArgs)\n\targs.CoinType = cointype\n\targs.NumToMake = numadrs\n\n\tfmt.Printf(\"adr cointye: %d num:%d\\n\", args.CoinType, args.NumToMake)\n\terr := lu.rpccon.Call(\"LitRPC.Address\", args, reply)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tresponse := reply.WitAddresses[0]\n\t//\tfmt.Fprintf(color.Output, \"new adr(s): %s\\nold: %s\\n\",\n\t//\t\tlnutil.Address(reply.WitAddresses), lnutil.Address(reply.LegacyAddresses))\n\treturn response, nil // reply.WitAddresses[]\n\n}", "func (sc StoreController) Create(c *gin.Context) {\n\tlog.Debug().Caller().Msg(\"stores create\")\n\tp, err := sc.Storeservice.Create(c)\n\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"InvalidAddress\") {\n\t\t\tc.AbortWithStatus(http.StatusNotFound)\n\t\t\tlog.Error().Caller().Err(err).Send()\n\t\t} else {\n\t\t\tc.AbortWithStatus(http.StatusBadRequest)\n\t\t\tlog.Error().Caller().Err(err).Send()\n\t\t}\n\t} else {\n\t\tc.JSON(http.StatusCreated, p)\n\t}\n}", "func CreateAddressPool(ctx iris.Context) {\n\turi := ctx.Request().RequestURI\n\tfabricID := ctx.Params().Get(\"id\")\n\tif _, err := capmodel.GetFabric(fabricID); err != nil {\n\t\terrMsg := fmt.Sprintf(\"failed to fetch fabric data for uri %s: %s\", uri, err.Error())\n\t\tcreateDbErrResp(ctx, err, errMsg, []interface{}{\"Fabric\", fabricID})\n\t\treturn\n\t}\n\tvar addresspoolData model.AddressPool\n\terr := ctx.ReadJSON(&addresspoolData)\n\tif err != nil {\n\t\terrorMessage := \"error while trying to get JSON body from the request: \" + err.Error()\n\t\tlog.Error(errorMessage)\n\t\tresp := updateErrorResponse(response.MalformedJSON, errorMessage, nil)\n\t\tctx.StatusCode(http.StatusBadRequest)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\t// Todo :Add required validation for the request params\n\tmissingAttribute, err := validateAddressPoolRequest(addresspoolData)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\tresp := updateErrorResponse(response.PropertyMissing, err.Error(), []interface{}{missingAttribute})\n\t\tctx.StatusCode(http.StatusBadRequest)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\tif addresspoolData.Ethernet.IPv4.GatewayIPAddress != \"\" {\n\t\tif _, _, err := net.ParseCIDR(addresspoolData.Ethernet.IPv4.GatewayIPAddress); err != nil {\n\t\t\terrorMessage := \"Invalid value for GatewayIPAddress:\" + err.Error()\n\t\t\tlog.Errorf(errorMessage)\n\t\t\tresp := updateErrorResponse(response.PropertyValueFormatError, errorMessage, []interface{}{addresspoolData.Ethernet.IPv4.GatewayIPAddress, \"GatewayIPAddress\"})\n\t\t\tctx.StatusCode(http.StatusBadRequest)\n\t\t\tctx.JSON(resp)\n\t\t\treturn\n\n\t\t}\n\t\tif addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Lower != addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Upper {\n\t\t\terrorMessage := fmt.Sprintf(\"Requested VLANIdentifierAddressRange Lower %d is not equal to Upper %d\", addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Lower, addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Upper)\n\t\t\tlog.Error(errorMessage)\n\t\t\tresp := updateErrorResponse(response.PropertyUnknown, errorMessage, []interface{}{\"VLANIdentifierAddressRange\"})\n\t\t\tctx.StatusCode(http.StatusBadRequest)\n\t\t\tctx.JSON(resp)\n\t\t\treturn\n\t\t}\n\t\taddressPools, err := capmodel.GetAllAddressPools(fabricID)\n\t\tif err != nil {\n\t\t\terrMsg := fmt.Sprintf(\"failed to fetch AddressPool data for uri %s: %s\", uri, err.Error())\n\t\t\tcreateDbErrResp(ctx, err, errMsg, []interface{}{\"Fabric\", fabricID})\n\t\t\treturn\n\t\t}\n\t\tfor _, data := range addressPools {\n\t\t\tif data.Ethernet.IPv4.GatewayIPAddress == addresspoolData.Ethernet.IPv4.GatewayIPAddress {\n\t\t\t\terrorMessage := \"Requested GatewayIPAddress is already present in the addresspool \" + data.ODataID\n\t\t\t\tlog.Error(errorMessage)\n\t\t\t\tresp := updateErrorResponse(response.ResourceAlreadyExists, errorMessage, []interface{}{\"AddressPool\", \"GatewayIPAddress\", addresspoolData.Ethernet.IPv4.GatewayIPAddress})\n\t\t\t\tctx.StatusCode(http.StatusConflict)\n\t\t\t\tctx.JSON(resp)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tif addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Lower > addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Upper {\n\t\terrorMessage := fmt.Sprintf(\"Requested VLANIdentifierAddressRange Lower %d is greater than Upper %d\", addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Lower, addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Upper)\n\t\tlog.Error(errorMessage)\n\t\tresp := updateErrorResponse(response.PropertyUnknown, errorMessage, []interface{}{\"VLANIdentifierAddressRange\"})\n\t\tctx.StatusCode(http.StatusBadRequest)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\t// validate the VLANIdentifierAddressRange lower value\n\tresp, statusCode := validateVLANIdentifierAddressRange(addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Lower, addresspoolData.Ethernet.IPv4.VLANIdentifierAddressRange.Upper)\n\tif statusCode != http.StatusOK {\n\t\tctx.StatusCode(statusCode)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\n\taddressPoolID := uuid.NewV4().String()\n\taddresspoolData.ODataContext = \"/ODIM/v1/$metadata#AddressPool.AddressPool\"\n\taddresspoolData.ODataType = \"#AddressPool.v1_1_0.AddressPool\"\n\taddresspoolData.ODataID = fmt.Sprintf(\"%s/%s\", uri, addressPoolID)\n\taddresspoolData.ID = addressPoolID\n\n\tif err = capmodel.SaveAddressPool(fabricID, addresspoolData.ODataID, &addresspoolData); err != nil {\n\t\terrMsg := fmt.Sprintf(\"failed to save AddressPool data for uri %s: %s\", uri, err.Error())\n\t\tcreateDbErrResp(ctx, err, errMsg, []interface{}{\"Fabric\", fabricID})\n\t\treturn\n\t}\n\n\tcommon.SetResponseHeader(ctx, map[string]string{\n\t\t\"Location\": addresspoolData.ODataID,\n\t})\n\n\tctx.StatusCode(http.StatusCreated)\n\tctx.JSON(addresspoolData)\n}", "func NewAddress(street string) *Address {\n // Just return a dummy for STUB\n return &Address{}\n}", "func makeAddress(keyPair *keypair.KeyPair, testnet bool) *account.Account {\n\n\treturn &account.Account{\n\t\tAccountInterface: &account.ED25519Account{\n\t\t\tTest: testnet,\n\t\t\tPublicKey: keyPair.PublicKey[:],\n\t\t},\n\t}\n}", "func (n *Node) CreateAddress() *Node {\n\tkeystore := n.client.KeystoreAPI()\n\tif _, err := keystore.CreateUser(n.UserPass); err != nil {\n\t\tpanic(stacktrace.Propagate(err, \"Could not create user for node.\"))\n\t}\n\n\txAddress, err := n.client.XChainAPI().CreateAddress(n.UserPass)\n\tif err != nil {\n\t\tpanic(stacktrace.Propagate(err, \"Could not create user address in the XChainAPI.\"))\n\t}\n\tn.XAddress = xAddress\n\n\tpAddress, err := n.client.PChainAPI().CreateAddress(n.UserPass)\n\tif err != nil {\n\t\tpanic(stacktrace.Propagate(err, \"Could not create user address in the PChainAPI.\"))\n\t}\n\tn.PAddress = pAddress\n\treturn n\n}", "func NewAddress(i, ii int) *Address {\n\tn := strconv.Itoa(i)\n\treturn &Address{\n\t\tId: time.Now().UnixNano(),\n\t\tStreet: \"10\" + n + \" Somewhere Lane\",\n\t\tCity: \"Awesome City \" + n,\n\t\tState: func() string {\n\t\t\tif i%2 == 0 {\n\t\t\t\treturn \"PA\"\n\t\t\t}\n\t\t\treturn \"CA\"\n\t\t}(),\n\t\tZip: ii,\n\t}\n}", "func (c *Client) Addresses(ctx context.Context, foreignID, currency string) (Address, error) {\n\treqBody := map[string]string{\n\t\t\"foreign_id\": foreignID,\n\t\t\"currency\": currency,\n\t}\n\n\treqJSON, err := json.Marshal(reqBody)\n\tif err != nil {\n\t\treturn Address{}, fmt.Errorf(\"request body marshaling error: %w\", err)\n\t}\n\n\taddressesURL, err := joinURL(c.api, addressesEndpoint)\n\tif err != nil {\n\t\treturn Address{}, fmt.Errorf(\"request url creating error: %w\", err)\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, addressesURL.String(), bytes.NewBuffer(reqJSON))\n\tif err != nil {\n\t\treturn Address{}, fmt.Errorf(\"request creating error: %w\", err)\n\t}\n\n\tsig, err := createHmac(c.secret, reqJSON)\n\tif err != nil {\n\t\treturn Address{}, fmt.Errorf(\"hmac signature creationg error: %w\", err)\n\t}\n\n\treq.Header.Set(contentTypeHeader, jsonContentType)\n\treq.Header.Set(keyHeader, c.apiKey)\n\treq.Header.Set(signatureHeader, sig)\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn Address{}, fmt.Errorf(\"request error: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\terr = ensureSuccessResponse(resp)\n\tif err != nil {\n\t\treturn Address{}, fmt.Errorf(\"request failed: %w\", err)\n\t}\n\n\trespBody := struct {\n\t\tData Address `json:\"data\"`\n\t}{}\n\n\terr = json.NewDecoder(resp.Body).Decode(&respBody)\n\tif err != nil {\n\t\treturn Address{}, fmt.Errorf(\"response unmarshaling error: %w\", err)\n\t}\n\n\treturn respBody.Data, nil\n}", "func (a *APIClient) Create(account AccountData) (created AccountData, err error) {\n\n\trel := &url.URL{Path: \"/v1/organisation/accounts\"}\n\turl := a.BaseURL.ResolveReference(rel)\n\n\tjsonPayload, err := json.Marshal(account)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn AccountData{}, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", url.String(), bytes.NewBuffer(jsonPayload))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn AccountData{}, err\n\t}\n\n\tresp, err := a.HTTPClient.Do(req)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn AccountData{}, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn AccountData{}, err\n\t}\n\n\terr = json.Unmarshal(body, &created)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn AccountData{}, err\n\t}\n\n\treturn created, nil\n}", "func (_BaseAccessWalletFactory *BaseAccessWalletFactoryCaller) Creator(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _BaseAccessWalletFactory.contract.Call(opts, &out, \"creator\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func (ms MockSOC) Address() boson.Address {\n\taddr, _ := soc.CreateAddress(ms.ID, ms.Owner)\n\treturn addr\n}", "func (as *Service) Create(name, iamRole, externalID string) (*Account, error) {\n\n\tbody := map[string]map[string]string{\n\t\t\"account\": {\"name\": name},\n\t}\n\n\tlog.Printf(\"Making request %v\\n\", body)\n\treq, err := as.httpClient.NewRequest(http.MethodPost, \"/setup/account\", &body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar v common.Response\n\t_, err = as.httpClient.Do(req, &v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(v.Response.Items) == 0 {\n\t\treturn nil, errors.New(\"Couldn't create account\")\n\t}\n\tvar account Account\n\n\tfmt.Println(string(v.Response.Items[0]))\n\n\terr = json.Unmarshal(v.Response.Items[0], &account)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttime.Sleep(time.Second * 5)\n\n\terr = as.setupCloudCredentials(account.ID, iamRole, externalID)\n\n\tif err != nil {\n\t\t_ = as.Delete(account.ID)\n\t\treturn nil, err\n\t}\n\n\treturn &account, nil\n}", "func (i *ProjectIPServiceOp) Create(projectID string, ipReservationReq *IPReservationCreateRequest) (*IPAddressReservation, *Response, error) {\n\tif validateErr := ValidateUUID(projectID); validateErr != nil {\n\t\treturn nil, nil, validateErr\n\t}\n\tapiPath := path.Join(projectBasePath, projectID, ipBasePath)\n\tipr := new(IPAddressReservation)\n\n\tresp, err := i.client.DoRequest(\"POST\", apiPath, ipReservationReq, ipr)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\treturn ipr, resp, err\n}", "func (c *Client) CreateAsset(asset *Asset) (*Asset, error) {\n\tvar out AssetItem\n\terr := c.WriteObject(\"/api/v2/assets\", \"POST\", asset, &out)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &out.Asset, nil\n}", "func (r *PoolNAPTRResource) Create(item Pool) error {\n\tif err := r.c.ModQuery(\"POST\", BasePath+PoolNAPTREndpoint, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (eth *Eth) NewAddress(set bool) string {\n\tnewpair := crypto.GenerateNewKeyPair()\n\taddr := ethutil.Bytes2Hex(newpair.Address())\n\tring := eth.keyManager.KeyRing()\n\tring.AddKeyPair(newpair)\n\tif set {\n\t\teth.SetAddressN(ring.Len() - 1)\n\t}\n\treturn addr\n}", "func CustomerAddressCreate(ctx context.Context, db *mongo.Database, cAddress CustomerAddress) error {\n\tcol := db.Collection(AddressCollection)\n\t_, err := col.InsertOne(ctx, cAddress)\n\tif err != nil {\n\t\tif merr, ok := err.(mongo.WriteException); ok {\n\t\t\terrCode := merr.WriteErrors[0].Code\n\t\t\tif errCode == 11000 {\n\t\t\t\treturn ErrAddressExists\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}", "func (transaction *Transaction) create(srcAccountId, destAccountId string, amount float32) *Transaction {\n\n\tfmt.Println(\"creating transaction\")\n\ttransaction.srcAccountId = srcAccountId\n\ttransaction.destAccountId = destAccountId\n\ttransaction.amount = amount\n\treturn transaction\n}", "func Create(w http.ResponseWriter, r *http.Request) {\n\n\taccountDecoder := json.NewDecoder(r.Body)\n\tvar accData Account\n\terr := accountDecoder.Decode(&accData)\n\tif err != nil {\n\t\tlog.Fatalln(\"error:\", err)\n\t}\n\taccData.CreateAccount()\n\tfmt.Fprintf(w, \"Account added successfully\")\n}", "func ExampleSnowball_CreateAddress_shared00() {\n\tsvc := snowball.New(session.New())\n\tinput := &snowball.CreateAddressInput{\n\t\tAddress: &snowball.Address{\n\t\t\tCity: aws.String(\"Seattle\"),\n\t\t\tCompany: aws.String(\"My Company's Name\"),\n\t\t\tCountry: aws.String(\"USA\"),\n\t\t\tName: aws.String(\"My Name\"),\n\t\t\tPhoneNumber: aws.String(\"425-555-5555\"),\n\t\t\tPostalCode: aws.String(\"98101\"),\n\t\t\tStateOrProvince: aws.String(\"WA\"),\n\t\t\tStreet1: aws.String(\"123 Main Street\"),\n\t\t},\n\t}\n\n\tresult, err := svc.CreateAddress(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase snowball.ErrCodeInvalidAddressException:\n\t\t\t\tfmt.Println(snowball.ErrCodeInvalidAddressException, aerr.Error())\n\t\t\tcase snowball.ErrCodeUnsupportedAddressException:\n\t\t\t\tfmt.Println(snowball.ErrCodeUnsupportedAddressException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (act Account) Create(nr Application) (Resource, error) {\n\tvar r Resource\n\tif nr.Validate() != nil {\n\t\treturn r, nr.Validate()\n\t}\n\terr := common.SendPostRequest(fmt.Sprintf(applications.Create, act.AccountSid), nr, act, &r)\n\treturn r, err\n}", "func (_BaseAccessWallet *BaseAccessWalletCaller) Creator(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _BaseAccessWallet.contract.Call(opts, &out, \"creator\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func (dcr *ExchangeWallet) NewAddress() (string, error) {\n\treturn dcr.DepositAddress()\n}", "func (c *Client) Create(ctx context.Context, params *razorpay.OrderParams) (*razorpay.Order, error) {\n\torder := &razorpay.Order{}\n\terr := c.Call(ctx, http.MethodPost, \"/orders\", params, order)\n\treturn order, err\n}", "func (c *PartyController) Create(ctx *app.CreatePartyContext) error {\n\t// PartyController_Create: start_implement\n\n\terr := c.P.PinService().CreateParty(&pinbase.PartyCreate{\n\t\tID: pinbase.Hash(ctx.Payload.Hash),\n\t\tDescription: ctx.Payload.Description,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// PartyController_Create: end_implement\n\tctx.ResponseData.Header().Set(\"Location\", app.PartyHref(ctx.Payload.Hash))\n\treturn ctx.Created()\n}", "func (e Endpoints) PostAddress(ctx context.Context, profileID string, a Address) error {\n\n\t// TODO: Create detailed ref spec\n\trequest := postAddressRequest{ProfileID: profileID, Address: a}\n\n\tresponse, err := e.PostAddressEndpoint(ctx, request)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp := response.(postAddressResponse)\n\n\treturn resp.Err\n}", "func (a *Account) NewAddress() (btcutil.Address, error) {\n\t// Get current block's height and hash.\n\trpcc, err := accessClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbs, err := rpcc.BlockStamp()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get next address from wallet.\n\taddr, err := a.KeyStore.NextChainedAddress(&bs, cfg.KeypoolSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Immediately write updated wallet to disk.\n\tAcctMgr.ds.ScheduleWalletWrite(a)\n\tif err := AcctMgr.ds.FlushAccount(a); err != nil {\n\t\treturn nil, fmt.Errorf(\"account write failed: %v\", err)\n\t}\n\n\t// Mark this new address as belonging to this account.\n\tAcctMgr.MarkAddressForAccount(addr, a)\n\n\t// Request updates from btcd for new transactions sent to this address.\n\tif err := rpcc.NotifyReceived([]btcutil.Address{addr}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn addr, nil\n}", "func NewAddresser(assetID uint32, acctXPub string, keyIndexer KeyIndexer, network dex.Network) (Addresser, uint32, error) {\n\tdrv, ok := drivers[assetID]\n\tif !ok {\n\t\treturn nil, 0, fmt.Errorf(\"unknown asset driver %d\", assetID)\n\t}\n\taf, ok := drv.(AddresserFactory)\n\tif !ok {\n\t\treturn nil, 0, fmt.Errorf(\"asset does not support NewAddresser\")\n\t}\n\treturn af.NewAddresser(acctXPub, keyIndexer, network)\n}", "func Create(account *Account) (Data, error) {\n\tpayload, err := json.Marshal(Data{Account: *account})\n\tif err != nil {\n\t\treturn Data{}, err\n\t}\n\n\tresponseStatus, responsePayload, err := doRequest(&request{\n\t\tmethod: \"POST\",\n\t\tresource: \"v1/organisation/accounts/\",\n\t\tpayload: payload,\n\t})\n\n\tif err != nil {\n\t\treturn Data{}, err\n\t}\n\n\treturn handleResponseData(responsePayload, http.StatusCreated, responseStatus)\n}", "func MakeAddress(accman *wallet.Manager, account string) (wallet.Account, error) {\n\t// If the specified account is a valid address, return it\n\tif helper.IsHexAddress(account) {\n\t\treturn wallet.Account{Address: helper.HexToAddress(account)}, nil\n\t}\n\t// Otherwise try to interpret the account as a keystore index\n\tindex, err := strconv.Atoi(account)\n\tif err != nil {\n\t\treturn wallet.Account{}, fmt.Errorf(\"invalid account address or index %q\", account)\n\t}\n\treturn accman.AccountByIndex(index)\n}", "func NewAddress() platformservices.Address {\n\n\tvar lat = 37.7917146\n\tvar lng = -122.397054\n\n\treturn platformservices.Address{\n\t\tAddressType: platformservices.AddressTypeLegal,\n\t\tStreetAddress: \"100 Main Street\",\n\t\tCity: \"San Francisco\",\n\t\tState: \"CA\",\n\t\tCountry: \"US\",\n\t\tPostalCode: \"94100\",\n\t\tLatitude: &lat,\n\t\tLongitude: &lng,\n\t}\n}", "func (agr *apiGatewayResource) Create(request *http.Request) (string, restful.Attributes, error) {\n\tctx := request.Context()\n\tapiGatewayInfo, err := agr.getAPIGatewayInfoFromRequest(request)\n\tif err != nil {\n\t\tagr.Logger.WarnWithCtx(ctx, \"Failed to get api gateway config and status from body\", \"err\", err)\n\t\treturn \"\", nil, err\n\t}\n\n\treturn agr.createAPIGateway(request, apiGatewayInfo)\n}", "func (idx *ExistsAddrIndex) Create(dbTx database.Tx) error {\n\t_, err := dbTx.Metadata().CreateBucket(existsAddrIndexKey)\n\treturn err\n}", "func (p *Poloniex) GenerateNewAddress(ctx context.Context, curr string) (string, error) {\n\ttype Response struct {\n\t\tSuccess int\n\t\tError string\n\t\tResponse string\n\t}\n\tresp := Response{}\n\tvalues := url.Values{}\n\tvalues.Set(\"currency\", curr)\n\n\terr := p.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, poloniexGenerateNewAddress, values, &resp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.Error != \"\" {\n\t\treturn \"\", errors.New(resp.Error)\n\t}\n\n\treturn resp.Response, nil\n}", "func (a *Account) NewAddress() (btcutil.Address, error) {\n\t// Get current block's height and hash.\n\tbs, err := GetCurBlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get next address from wallet.\n\taddr, err := a.Wallet.NextChainedAddress(&bs, cfg.KeypoolSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Immediately write updated wallet to disk.\n\tAcctMgr.ds.ScheduleWalletWrite(a)\n\tif err := AcctMgr.ds.FlushAccount(a); err != nil {\n\t\treturn nil, fmt.Errorf(\"account write failed: %v\", err)\n\t}\n\n\t// Mark this new address as belonging to this account.\n\tAcctMgr.MarkAddressForAccount(addr, a)\n\n\t// Request updates from btcd for new transactions sent to this address.\n\ta.ReqNewTxsForAddress(addr)\n\n\treturn addr, nil\n}", "func CreateAsset(\n\ttrx storage.Transaction,\n\tissueTxId merkle.Digest,\n\tissueBlockNumber uint64,\n\tassetId transactionrecord.AssetIdentifier,\n\tnewOwner *account.Account,\n) {\n\t// ensure single threaded\n\ttoLock.Lock()\n\tdefer toLock.Unlock()\n\n\tnewData := &AssetOwnerData{\n\t\ttransferBlockNumber: issueBlockNumber,\n\t\tissueTxId: issueTxId,\n\t\tissueBlockNumber: issueBlockNumber,\n\t\tassetId: assetId,\n\t}\n\n\t// store to database\n\tcreate(trx, issueTxId, newData, newOwner)\n}", "func (_BaseFactory *BaseFactoryCaller) Creator(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _BaseFactory.contract.Call(opts, &out, \"creator\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func (h *HitBTC) GenerateNewAddress(ctx context.Context, currency string) (DepositCryptoAddresses, error) {\n\tresp := DepositCryptoAddresses{}\n\terr := h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost,\n\t\tapiV2CryptoAddress+\"/\"+currency,\n\t\turl.Values{},\n\t\totherRequests,\n\t\t&resp)\n\n\treturn resp, err\n}", "func (a *ApiDB) CreateContract(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tp := MODELS.CREATE_UPDATE_CONTRACT_REQUEST{}\n\terr := json.NewDecoder(r.Body).Decode(&p)\n\tif err != nil {\n\t\tio.WriteString(w, `{\"message\": \"wrong format!\"}`)\n\t\treturn\n\t}\n\n\tresult := BUSINESS.CreateContract(a.Db, p)\n\tif result {\n\t\tio.WriteString(w, ` { \"status\": 200,\n \"message\": \"Create contract success\",\n \"data\": {\n \"status\": 1\n }\n}\n`)\n\t} else {\n\t\tio.WriteString(w, `{ \"message\": \"Can’t create contract\"}`)\n\t}\n}", "func (addressManager *AddressManager) NewAddress() address.Address {\n\treturn addressManager.Address(addressManager.lastAddressIndex + 1)\n}", "func (w *WalletChaincode) create(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tif len(args) < 6 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting at least 2\")\n\t}\n\taddress := args[2]\n\taccountKey := buildAccountKey(address)\n\tif err := stub.PutState(accountKey, []byte(address)); err != nil {\n\t\treturn shim.Error(fmt.Sprintf(\"Error putting data for key [%s]: %s\", accountKey, err))\n\t}\n\tfmt.Println(\"create an account: \", accountKey)\n\n\twalletKey := buildWalletKey(args[0], args[1], address)\n\tif err := stub.PutState(walletKey, []byte(args[5])); err != nil {\n\t\treturn shim.Error(fmt.Sprintf(\"Error putting data for key [%s]: %s\", walletKey, err))\n\t}\n\tfmt.Println(\"create a wallet: \", walletKey)\n\n\t// seqBytes, err := stub.GetState(SEQUENCE)\n\t// if err != nil {\n\t// \treturn shim.Error(\"Failed to get state\")\n\t// }\n\t// if seqBytes == nil {\n\t// \treturn shim.Error(\"Entity not found\")\n\t// }\n\t// seq, _ := strconv.ParseInt(string(seqBytes), 10, 64)\n\tseq := atomic.AddUint64(&w.Sequence, 1)\n\tsequenceKey := buildSequenceKey(seq)\n\tjsonTx := \"{\\\"sequence\\\":\\\"\" + strconv.FormatUint(seq, 10) + \"\\\",\\\"txid\\\":\\\"\" + string(stub.GetTxID()) + \"\\\"}\"\n\tif err := stub.PutState(sequenceKey, []byte(jsonTx)); err != nil {\n\t\treturn shim.Error(fmt.Sprintf(\"Error putting data for key [%s]: %s\", walletKey, err))\n\t}\n\n\tfmt.Println(\"create success: \", stub.GetTxID())\n\treturn shim.Success([]byte(fmt.Sprintf(\"{\\\"wallet\\\":\\\"%s\\\", \\\"txid\\\":\\\"%s\\\"}\", walletKey, stub.GetTxID())))\n}", "func (_Editable *EditableCaller) Creator(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _Editable.contract.Call(opts, &out, \"creator\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func (_BaseLibraryFactory *BaseLibraryFactoryCaller) Creator(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _BaseLibraryFactory.contract.Call(opts, &out, \"creator\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func (f *Factory) Address() string { return f.address }", "func (pl *PaymentLink) Create(data map[string]interface{}, extraHeaders map[string]string) (map[string]interface{}, error) {\n\turl := fmt.Sprintf(\"/%s%s\", constants.VERSION_V1, constants.PaymentLink_URL)\n\treturn pl.Request.Post(url, data, extraHeaders)\n}", "func (s *Server) CreateContract(w http.ResponseWriter, r *http.Request) {\n\tvar reqData CreateContractRequest\n\tif err := json.NewDecoder(r.Body).Decode(&reqData); err != nil {\n\t\tweb.RespondError(w, r, s.logger, http.StatusInternalServerError, fmt.Errorf(\"decode request body: %w\", err))\n\t\treturn\n\t}\n\n\tif err := contract.CreateContract(r.Context(), s.dbc, reqData.NodePublicKey, reqData.EntityID); err != nil {\n\t\tweb.RespondError(w, r, s.logger, http.StatusInternalServerError, fmt.Errorf(\"create entity: %w\", err))\n\t\treturn\n\t}\n\n\tweb.Respond(w, r, s.logger, http.StatusCreated, nil)\n}", "func (_BaseLibrary *BaseLibraryCaller) Creator(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _BaseLibrary.contract.Call(opts, &out, \"creator\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func CreateAsset(met metadata.MetaData, creator *Holder, sym string, name string, desc string) *Asset {\n\tout := Asset{\n\t\tMet: met.(*concrete_metadata.MetaData),\n\t\tCreator: creator,\n\t\tSym: sym,\n\t\tName: name,\n\t\tDesc: desc,\n\t}\n\n\treturn &out\n}", "func CreateAddress2(b types.Address, salt [32]byte, code []byte) types.Address {\n\treturn types.BytesToAddress(keccak.Keccak256([]byte{0xff}, b.Bytes(), salt[:], keccak.Keccak256(code))[12:])\n}", "func NewAccountAddress(\n\taccountConfiguration *signing.Configuration,\n\tkeyPath signing.RelativeKeypath,\n\tnet *chaincfg.Params,\n\tlog *logrus.Entry,\n) *AccountAddress {\n\n\tvar address btcutil.Address\n\tvar redeemScript []byte\n\tconfiguration, err := accountConfiguration.Derive(keyPath)\n\tif err != nil {\n\t\tlog.WithError(err).Panic(\"Failed to derive the configuration.\")\n\t}\n\tlog = log.WithFields(logrus.Fields{\n\t\t\"key-path\": configuration.AbsoluteKeypath().Encode(),\n\t\t\"configuration\": configuration.String(),\n\t})\n\tlog.Debug(\"Creating new account address\")\n\n\tpublicKeyHash := btcutil.Hash160(configuration.PublicKey().SerializeCompressed())\n\tswitch configuration.ScriptType() {\n\tcase signing.ScriptTypeP2PKH:\n\t\taddress, err = btcutil.NewAddressPubKeyHash(publicKeyHash, net)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to get P2PKH addr. from public key hash.\")\n\t\t}\n\tcase signing.ScriptTypeP2WPKHP2SH:\n\t\tvar segwitAddress *btcutil.AddressWitnessPubKeyHash\n\t\tsegwitAddress, err = btcutil.NewAddressWitnessPubKeyHash(publicKeyHash, net)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to get p2wpkh-p2sh addr. from publ. key hash.\")\n\t\t}\n\t\tredeemScript, err = txscript.PayToAddrScript(segwitAddress)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to get redeem script for segwit address.\")\n\t\t}\n\t\taddress, err = btcutil.NewAddressScriptHash(redeemScript, net)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to get a P2SH address for segwit.\")\n\t\t}\n\tcase signing.ScriptTypeP2WPKH:\n\t\taddress, err = btcutil.NewAddressWitnessPubKeyHash(publicKeyHash, net)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to get p2wpkh addr. from publ. key hash.\")\n\t\t}\n\tcase signing.ScriptTypeP2TR:\n\t\toutputKey := txscript.ComputeTaprootKeyNoScript(configuration.PublicKey())\n\t\taddress, err = btcutil.NewAddressTaproot(schnorr.SerializePubKey(outputKey), net)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Panic(\"Failed to get p2tr addr\")\n\t\t}\n\tdefault:\n\t\tlog.Panic(fmt.Sprintf(\"Unrecognized script type: %s\", configuration.ScriptType()))\n\t}\n\n\treturn &AccountAddress{\n\t\tAddress: address,\n\t\tAccountConfiguration: accountConfiguration,\n\t\tConfiguration: configuration,\n\t\tredeemScript: redeemScript,\n\t\tlog: log,\n\t}\n}", "func (client *PublicIPAddressesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress, options *PublicIPAddressesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif publicIPAddressName == \"\" {\n\t\treturn nil, errors.New(\"parameter publicIPAddressName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{publicIpAddressName}\", url.PathEscape(publicIPAddressName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func NewAddress(pk ctypes.PubKey) *Address {\n\ta := Address{pk}\n\treturn &a\n}", "func (s *SOC) Address() (swarm.Address, error) {\n\tif len(s.owner) != crypto.AddressSize {\n\t\treturn swarm.ZeroAddress, errInvalidAddress\n\t}\n\treturn CreateAddress(s.id, s.owner)\n}", "func (r *CompaniesService) Create(company *Company) *CompaniesCreateCall {\n\tc := &CompaniesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.company = company\n\treturn c\n}", "func CreateContract(rpcURL string, sk string, data []byte, gasPrice uint64, gasLimit uint64) (common.Hash, error) {\n\treturn SendTx(rpcURL, sk, \"\", 0, data, gasPrice, gasLimit)\n}", "func (b *Backend) NewAddress() wallet.Address {\n\taddr := Address{}\n\treturn &addr\n}", "func CreateP2SHAddress(c Currency, script []byte) (string, error) {\n\taddress := base58.CheckEncode(btcutil.Hash160(script), c.Chaincfg.ScriptHashAddrID)\n\treturn address, nil\n}", "func (c *Client) Create(ctx context.Context, p *CreatePayload) (res *Person, err error) {\n\tvar ires interface{}\n\tires, err = c.CreateEndpoint(ctx, p)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ires.(*Person), nil\n}", "func NewAddress(city, state string) *Address {\n\treturn &Address{\n\t\tCity: city,\n\t\tState: state,\n\t}\n}", "func (s *Service) Create(newAccountDefinition *model.NewAccountDefinition) *CreateOp {\n\treturn &CreateOp{\n\t\tCredential: s.credential,\n\t\tMethod: \"POST\",\n\t\tPath: \"/v2/accounts\",\n\t\tPayload: newAccountDefinition,\n\t\tQueryOpts: make(url.Values),\n\t\tVersion: esign.APIv2,\n\t}\n}", "func (Interface *LineInterface) CreateWallet() {\n\twallets, _ := wallet.CreateWallets()\n\taddress := wallets.AddWallet()\n\twallets.SaveFile()\n\tfmt.Printf(\"NEW ADDRESS: %s\\n\", address)\n}", "func (planetDeliveryRest *PlanetDeliveryRest) Create(w http.ResponseWriter, r *http.Request) {\n\tvar planet entity.Planet\n\n\terr := json.NewDecoder(r.Body).Decode(&planet)\n\tif err != nil {\n\t\tError(w, \"Failed to decode JSON\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tplanetToInsert := *entity.NewPlanet(planet.Name, planet.Climate, planet.Terrain)\n\n\tnewPlanet, err := planetDeliveryRest.planetUsecase.Create(r.Context(), planetToInsert)\n\tif err != nil {\n\t\tError(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tJSON(w, newPlanet, http.StatusCreated)\n}", "func (s *serviceOutlite) EntityCreate(input *schemas.SchemaOutlet) (*models.ModelOutlet, schemas.SchemaDatabaseError) {\n\tvar outlet schemas.SchemaOutlet\n\toutlet.Name = input.Name\n\toutlet.Phone = input.Phone\n\toutlet.Address = input.Address\n\toutlet.MerchatID = input.MerchatID\n\n\tres, err := s.outlet.EntityCreate(&outlet)\n\treturn res, err\n}", "func (w *Wallet) NewAddress(account uint32,\n\tscope waddrmgr.KeyScope) (btcutil.Address, er.R) {\n\n\tchainClient, err := w.requireChainClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\taddr btcutil.Address\n\t\tprops *waddrmgr.AccountProperties\n\t)\n\terr = walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) er.R {\n\t\taddrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey)\n\t\tvar err er.R\n\t\taddr, props, err = w.newAddress(addrmgrNs, account, scope)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Notify the rpc server about the newly created address.\n\terr = chainClient.NotifyReceived([]btcutil.Address{addr})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tw.NtfnServer.notifyAccountProperties(props)\n\n\treturn addr, nil\n}", "func CreateAddressScope(t *testing.T, client *gophercloud.ServiceClient) (*addressscopes.AddressScope, error) {\n\taddressScopeName := tools.RandomString(\"TESTACC-\", 8)\n\tcreateOpts := addressscopes.CreateOpts{\n\t\tName: addressScopeName,\n\t\tIPVersion: 4,\n\t}\n\n\tt.Logf(\"Attempting to create an address-scope: %s\", addressScopeName)\n\n\taddressScope, err := addressscopes.Create(client, createOpts).Extract()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt.Logf(\"Successfully created the addressscopes.\")\n\n\tth.AssertEquals(t, addressScope.Name, addressScopeName)\n\tth.AssertEquals(t, addressScope.IPVersion, int(gophercloud.IPv4))\n\n\treturn addressScope, nil\n}", "func (lu *litUiClient) Address() (string, error) {\n\n\t// cointype of 0 means default, not mainnet.\n\t// this is ugly but does prevent mainnet use for now.\n\n\tvar cointype, numadrs uint32\n\n\t// if no arguments given, generate 1 new address.\n\t// if no cointype given, assume type 1 (testnet)\n\n\tnumadrs = 0\n\n\treply := new(litrpc.AddressReply)\n\n\targs := new(litrpc.AddressArgs)\n\targs.CoinType = cointype\n\targs.NumToMake = numadrs\n\n\tfmt.Printf(\"adr cointye: %d num:%d\\n\", args.CoinType, args.NumToMake)\n\terr := lu.rpccon.Call(\"LitRPC.Address\", args, reply)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tresponse := reply.WitAddresses[len(reply.WitAddresses)-1]\n\treturn response, nil\n}", "func (c *MyAssetContract) CreateMyAsset(ctx contractapi.TransactionContextInterface, myAssetID string, value string) error {\n\texists, err := c.MyAssetExists(ctx, myAssetID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not read from world state. %s\", err)\n\t} else if exists {\n\t\treturn fmt.Errorf(\"The asset %s already exists\", myAssetID)\n\t}\n\n\tmyAsset := new(MyAsset)\n\tmyAsset.Value = value\n\n\tbytes, _ := json.Marshal(myAsset)\n\n\treturn ctx.GetStub().PutState(myAssetID, bytes)\n}", "func (n *NetworkServiceHandler) Create(ctx context.Context, regionID, description, cidrBlock string) (*Network, error) {\n\n\turi := \"/v1/network/create\"\n\n\tvalues := url.Values{\n\t\t\"DCID\": {regionID},\n\t}\n\n\t// Optional\n\tif cidrBlock != \"\" {\n\t\t_, ipNet, err := net.ParseCIDR(cidrBlock)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif v4Subnet := ipNet.IP.To4(); v4Subnet != nil {\n\t\t\tvalues.Add(\"v4_subnet\", v4Subnet.String())\n\t\t}\n\t\tmask, _ := ipNet.Mask.Size()\n\t\tvalues.Add(\"v4_subnet_mask\", strconv.Itoa(mask))\n\t}\n\n\tif description != \"\" {\n\t\tvalues.Add(\"description\", description)\n\t}\n\n\treq, err := n.client.NewRequest(ctx, http.MethodPost, uri, values)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnetwork := new(Network)\n\terr = n.client.DoWithContext(ctx, req, network)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn network, nil\n}", "func (_BaseContentFactory *BaseContentFactoryCaller) Creator(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _BaseContentFactory.contract.Call(opts, &out, \"creator\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func (s *CheckoutServiceOp) Create(checkout Checkout) (*Checkout, error) {\n\tpath := fmt.Sprintf(\"%s.json\", checkoutsBasePath)\n\twrappedData := CheckoutResource{Checkout: &checkout}\n\tresource := new(CheckoutResource)\n\terr := s.client.Post(path, wrappedData, resource)\n\treturn resource.Checkout, err\n}", "func (vr *VirtualResource) Create(item VirtualServerConfig) error {\n\tif err := vr.c.ModQuery(\"POST\", BasePath+VirtualEndpoint, item); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func op_CREATE(pc *uint64, in *interpreter, ctx *callCtx) uint64 {\n\tstack := ctx.stack\n\n\tvalue, offset, size := stack.Pop(), stack.Pop(), stack.Pop()\n\tinput := ctx.memory.GetCopy(offset.Uint64(), size.Uint64())\n\n\tin.evm.create(ctx.contract, input, &value)\n\n\t// returned address from create\n\taddress := in.evm.create_addr.get(in.evm.level + 1)\n\n\taddr := new(uint256.Int).SetBytes(address.Bytes())\n\tstack.Push(addr)\n\n\treturn 0\n}", "func (wallet *Wallet) CreateAsset(asset Asset) (assetColor ledgerstate.Color, err error) {\n\tif asset.Amount == 0 {\n\t\terr = errors.New(\"required to provide the amount when trying to create an asset\")\n\n\t\treturn\n\t}\n\n\tif asset.Name == \"\" {\n\t\terr = errors.New(\"required to provide a name when trying to create an asset\")\n\n\t\treturn\n\t}\n\n\ttx, err := wallet.SendFunds(\n\t\tDestination(wallet.ReceiveAddress(), asset.Amount, ledgerstate.ColorMint),\n\t)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// this only works if there is only one MINT output in the transaction\n\tassetColor = ledgerstate.ColorIOTA\n\tfor _, output := range tx.Essence().Outputs() {\n\t\toutput.Balances().ForEach(func(color ledgerstate.Color, balance uint64) bool {\n\t\t\tif color == ledgerstate.ColorMint {\n\t\t\t\tdigest := blake2b.Sum256(output.ID().Bytes())\n\t\t\t\tassetColor, _, err = ledgerstate.ColorFromBytes(digest[:])\n\t\t\t}\n\t\t\treturn true\n\t\t})\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif assetColor != ledgerstate.ColorIOTA {\n\t\twallet.assetRegistry.RegisterAsset(assetColor, asset)\n\t}\n\n\treturn\n}", "func CreateReleaseEipSegmentAddressRequest() (request *ReleaseEipSegmentAddressRequest) {\n\trequest = &ReleaseEipSegmentAddressRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Vpc\", \"2016-04-28\", \"ReleaseEipSegmentAddress\", \"vpc\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (r *OrganizationsService) Create(googlecloudapigeev1organization *GoogleCloudApigeeV1Organization) *OrganizationsCreateCall {\n\tc := &OrganizationsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.googlecloudapigeev1organization = googlecloudapigeev1organization\n\treturn c\n}", "func (addressManager *AddressManager) Address(addressIndex uint64) address.Address {\n\t// update lastUnspentAddressIndex if necessary\n\taddressManager.spentAddressIndexes(addressIndex)\n\n\treturn addressManager.seed.Address(addressIndex)\n}", "func (client *Client) CreateSmartContract(contract *ContractConfiguration) (_ *Response, err error) {\n\tpath := \"/contract\"\n\turi := fmt.Sprintf(\"%s%s\", client.apiBaseURL, path)\n\tb, err := json.Marshal(contract)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := client.httpClient.Post(uri, \"content/json\", bytes.NewBuffer(b))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\t_ = resp.Body.Close()\n\t}()\n\tvar statusMessage []byte\n\tstatusMessage, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresponse := Response{\n\t\tResponse: statusMessage,\n\t\tStatus: resp.StatusCode,\n\t\tOK: 200 <= resp.StatusCode && 300 > resp.StatusCode,\n\t}\n\treturn &response, err\n}" ]
[ "0.660583", "0.65768284", "0.655611", "0.6459867", "0.6374091", "0.6367556", "0.6301462", "0.62489724", "0.62117654", "0.6106471", "0.60594046", "0.6023272", "0.60168755", "0.60069805", "0.5987559", "0.5911845", "0.5869427", "0.5815969", "0.5815888", "0.57545626", "0.57401866", "0.5701813", "0.56968653", "0.5658971", "0.5631528", "0.56228966", "0.5617451", "0.55719334", "0.5559565", "0.55457634", "0.5519083", "0.5495622", "0.5468455", "0.54226816", "0.54176146", "0.54170996", "0.54032004", "0.53940773", "0.53865725", "0.53764737", "0.5370866", "0.5368728", "0.53640395", "0.5359734", "0.5352188", "0.5339282", "0.5308125", "0.5307646", "0.5305161", "0.53004134", "0.52951604", "0.5278469", "0.52724963", "0.5253072", "0.5232919", "0.5230944", "0.5220084", "0.5212395", "0.5202409", "0.5187587", "0.5180557", "0.5173938", "0.517191", "0.5167127", "0.5162654", "0.5152753", "0.515256", "0.51474184", "0.5136245", "0.5133833", "0.51297957", "0.511852", "0.5117613", "0.51100665", "0.51089656", "0.51070434", "0.51015645", "0.5100754", "0.5087843", "0.50848246", "0.5080746", "0.5080233", "0.50756437", "0.50729704", "0.50721425", "0.50693136", "0.5065623", "0.5064168", "0.50636464", "0.5062529", "0.5057743", "0.5055746", "0.50496346", "0.504041", "0.50402397", "0.5030658", "0.502603", "0.5025736", "0.5023435", "0.5022829" ]
0.81269246
0
GetActive will get all active addresses for an asset
func (as *AddressService) GetActive() ([]*Address, error) { return as.getAddresses("deposit") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *AccountsListCall) Active(active bool) *AccountsListCall {\n\tc.urlParams_.Set(\"active\", fmt.Sprint(active))\n\treturn c\n}", "func (w *Wallet) activeData(dbtx walletdb.ReadTx) ([]btcutil.Address, []wtxmgr.Credit, er.R) {\n\taddrmgrNs := dbtx.ReadBucket(waddrmgrNamespaceKey)\n\ttxmgrNs := dbtx.ReadBucket(wtxmgrNamespaceKey)\n\n\tvar addrs []btcutil.Address\n\terr := w.Manager.ForEachActiveAddress(addrmgrNs, func(addr btcutil.Address) er.R {\n\t\taddrs = append(addrs, addr)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tunspent, err := w.TxStore.GetUnspentOutputs(txmgrNs)\n\treturn addrs, unspent, err\n}", "func (c *AdsListCall) Active(active bool) *AdsListCall {\n\tc.urlParams_.Set(\"active\", fmt.Sprint(active))\n\treturn c\n}", "func (c *CreativesListCall) Active(active bool) *CreativesListCall {\n\tc.urlParams_.Set(\"active\", fmt.Sprint(active))\n\treturn c\n}", "func (a *Account) ActivePaymentAddresses() map[string]struct{} {\n\tinfos := a.KeyStore.ActiveAddresses()\n\n\taddrs := make(map[string]struct{}, len(infos))\n\tfor _, info := range infos {\n\t\taddrs[info.Address().EncodeAddress()] = struct{}{}\n\t}\n\n\treturn addrs\n}", "func (a *Account) ActivePaymentAddresses() map[string]struct{} {\n\tinfos := a.ActiveAddresses()\n\n\taddrs := make(map[string]struct{}, len(infos))\n\tfor _, info := range infos {\n\t\taddrs[info.Address().EncodeAddress()] = struct{}{}\n\t}\n\n\treturn addrs\n}", "func ActiveAddresses() map[string]net.Interface {\n\tresult := make(map[string]net.Interface)\n\tif iFaces, err := net.Interfaces(); err == nil {\n\t\tfor _, iFace := range iFaces {\n\t\t\tconst interesting = net.FlagUp | net.FlagBroadcast\n\t\t\tif iFace.Flags&interesting == interesting {\n\t\t\t\tif name := Address(iFace); name != \"\" {\n\t\t\t\t\tresult[name] = iFace\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}", "func (k Keeper) GetActiveValidatorList(ctx sdk.Context) ([]sdk.AccAddress, error) {\n\tvar result []sdk.AccAddress\n\tstore := ctx.KVStore(k.dataStoreKey)\n\tit := store.Iterator(nil, nil)\n\tdefer it.Close()\n\tfor ; it.Valid(); it.Next() {\n\t\tvar value types.Info\n\t\tif err := proto.Unmarshal(it.Value(), &value); err != nil {\n\t\t\tpanic(errors.Wrap(err, \"cannot unmarshal info\"))\n\t\t}\n\t\tif !value.IsActive() {\n\t\t\tcontinue\n\t\t}\n\t\taddr := sdk.AccAddress(it.Key())\n\t\tresult = append(result, addr)\n\t}\n\n\treturn result, nil\n}", "func (c *RemarketingListsListCall) Active(active bool) *RemarketingListsListCall {\n\tc.urlParams_.Set(\"active\", fmt.Sprint(active))\n\treturn c\n}", "func (eth *Eth) ActiveAddress() string {\n\tkeypair := eth.keyManager.KeyPair()\n\taddr := ethutil.Bytes2Hex(keypair.Address())\n\treturn addr\n}", "func (mod *EthModule) ActiveAddress() string {\n\treturn mod.eth.ActiveAddress()\n}", "func (c *TargetableRemarketingListsListCall) Active(active bool) *TargetableRemarketingListsListCall {\n\tc.urlParams_.Set(\"active\", fmt.Sprint(active))\n\treturn c\n}", "func (am *AccountManager) RescanActiveAddresses() error {\n\tvar job *RescanJob\n\tfor _, a := range am.AllAccounts() {\n\t\tacctJob, err := a.RescanActiveJob()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif job == nil {\n\t\t\tjob = acctJob\n\t\t} else {\n\t\t\tjob.Merge(acctJob)\n\t\t}\n\t}\n\tif job != nil {\n\t\t// Submit merged job and block until rescan completes.\n\t\tjobFinished := am.rm.SubmitJob(job)\n\t\t<-jobFinished\n\t}\n\n\treturn nil\n}", "func (v Account) GetActiveOffers(params AccountGetActiveOffersParams) (*AccountGetActiveOffersResponse, error) {\n\tr, err := v.API.Request(\"account.getActiveOffers\", params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp AccountGetActiveOffersResponse\n\terr = json.Unmarshal(r, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (o *InlineResponse20075StatsBilling) GetActive() string {\n\tif o == nil || o.Active == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Active\n}", "func (cluster *HttpCluster) Active() []string {\n\tcluster.RLock()\n\tdefer cluster.RUnlock()\n\tmember := cluster.active\n\tlist := make([]string, 0)\n\tfor i := 0; i < cluster.size; i++ {\n\t\tif member.status == MEMBER_AVAILABLE {\n\t\t\tlist = append(list, member.hostname)\n\t\t}\n\t}\n\treturn list\n}", "func (_NodeSpace *NodeSpaceCaller) ActiveNodeAddresses(opts *bind.CallOpts, arg0 *big.Int) (common.Address, error) {\n\tvar out []interface{}\n\terr := _NodeSpace.contract.Call(opts, &out, \"activeNodeAddresses\", arg0)\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func (_BaseContentSpace *BaseContentSpaceCaller) ActiveNodeAddresses(opts *bind.CallOpts, arg0 *big.Int) (common.Address, error) {\n\tvar out []interface{}\n\terr := _BaseContentSpace.contract.Call(opts, &out, \"activeNodeAddresses\", arg0)\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func (c *DirectorySitesListCall) Active(active bool) *DirectorySitesListCall {\n\tc.urlParams_.Set(\"active\", fmt.Sprint(active))\n\treturn c\n}", "func (a *Account) SortedActivePaymentAddresses() []string {\n\tinfos := a.KeyStore.SortedActiveAddresses()\n\n\taddrs := make([]string, len(infos))\n\tfor i, info := range infos {\n\t\taddrs[i] = info.Address().EncodeAddress()\n\t}\n\n\treturn addrs\n}", "func (r Virtual_Guest) GetActiveTransactions() (resp []datatypes.Provisioning_Version1_Transaction, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest\", \"getActiveTransactions\", nil, &r.Options, &resp)\n\treturn\n}", "func (qs InstantprofileQS) ActiveGe(v bool) InstantprofileQS {\n\treturn qs.filter(`\"active\" >=`, v)\n}", "func (a *Account) SortedActivePaymentAddresses() []string {\n\tinfos := a.Wallet.SortedActiveAddresses()\n\n\taddrs := make([]string, len(infos))\n\tfor i, info := range infos {\n\t\taddrs[i] = info.Address().EncodeAddress()\n\t}\n\n\treturn addrs\n}", "func (o *Swapspace) GetActive(ctx context.Context) (active bool, err error) {\n\terr = o.object.CallWithContext(ctx, \"org.freedesktop.DBus.Properties.Get\", 0, InterfaceSwapspace, \"Active\").Store(&active)\n\treturn\n}", "func (rs *rootResolver) AccountsActive() (hexutil.Uint64, error) {\n\treturn repository.R().AccountsActive()\n}", "func (s *Pool) getActive(key interface{}) (*GroupInstance, bool) {\n\tactive, exists := s.actives.Load(key)\n\tif !exists {\n\t\treturn nil, false\n\t} else {\n\t\tins := active.(*GroupInstance)\n\t\treturn ins, !ins.IsRetired()\n\t}\n}", "func (svc *inmemService) GetAddresses(ctx context.Context, profileID string) ([]Address, error) {\n\n\t// Get a Read Lock on the svc for atomic read access to the datastore\n\tsvc.mtx.RLock()\n\n\t// Immediately set up a lock release to occur when the function finishes\n\tdefer svc.mtx.RUnlock()\n\n\t// Check to make sure there is a profile that corresponds to the passed in profile and save the found profile to a profile variable\n\tprofile, ok := svc.profiles[profileID]\n\n\t// If no profile was found for the passed in ID\n\tif !ok {\n\n\t\t// Return error informing the caller that the profile to which the addresses should have been associated was not found\n\t\treturn nil, ErrNotFound\n\t}\n\n\t// Return all addresses associated with the profile that was passed in and a nil error value\n\treturn profile.Addresses, nil\n}", "func (c Calendars) Active() Calendars {\n\tcals := make(Calendars)\n\tfor k, cal := range c {\n\t\tif cal.active {\n\t\t\tcals[k] = cal\n\t\t}\n\t}\n\treturn cals\n}", "func (s *Identity) AccountsGET(w http.ResponseWriter, r *http.Request) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\twriteResponse(s.addresses, w, r)\n}", "func (a *addrBook) getAddresses() []*addrInfo {\n\ta.mu.Lock()\n\tdefer a.mu.Unlock()\n\n\taddrIndexLen := len(a.addrIndex)\n\tif addrIndexLen == 0 {\n\t\treturn nil\n\t}\n\n\taddrs := make([]*addrInfo, 0, addrIndexLen)\n\tfor _, v := range a.addrIndex {\n\t\taddrs = append(addrs, v.Addr)\n\t}\n\n\treturn addrs\n}", "func (o ServiceEmailsOnPushOutput) Active() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v *ServiceEmailsOnPush) pulumi.BoolOutput { return v.Active }).(pulumi.BoolOutput)\n}", "func (c Client) GetActiveListings() {\n\tc.makeGetRequest(\"listings/active\")\n}", "func (config *Config) GetActive() *CacheConfig {\n\tactive := config.GetActiveProfile()\n\n\tInitDevSpaceConfig(config, active)\n\treturn config.Profiles[active]\n}", "func (t *Tracer) GetActiveConnections(_ string) (*network.Connections, error) {\n\treturn nil, ebpf.ErrNotImplemented\n}", "func (c *AccountUserProfilesListCall) Active(active bool) *AccountUserProfilesListCall {\n\tc.urlParams_.Set(\"active\", fmt.Sprint(active))\n\treturn c\n}", "func (us *UserService) GetAllActive(ctx context.Context) ([]user.User, error) {\n\tctx, cancel := context.WithTimeout(ctx, waitTime*time.Second)\n\tdefer cancel()\n\n\tusers, err := us.repository.GetAllActive(ctx)\n\tif err != nil {\n\t\tus.log.Error(err)\n\t\treturn nil, err\n\t}\n\n\treturn users, nil\n}", "func (r Virtual_Guest) GetActiveTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest\", \"getActiveTransaction\", nil, &r.Options, &resp)\n\treturn\n}", "func (o *CommitteeInfoResponse) GetActive() bool {\n\tif o == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\n\treturn o.Active\n}", "func (c *Contract) IsActive() bool {\n\treturn c.Status == \"Active\"\n}", "func (c *Controller) ActiveOffers() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\toffers, err := c.offerService.Active()\n\n\t\tif err != nil {\n\t\t\trespond(w, r, http.StatusInternalServerError, ErrProcessingFailed.Error())\n\t\t\treturn\n\t\t}\n\n\t\trespond(w, r, http.StatusOK, offers)\n\t\treturn\n\t}\n}", "func (o *InlineResponse20075StatsBilling) SetActive(v string) {\n\to.Active = &v\n}", "func (qs InstantprofileQS) ActiveEq(v bool) InstantprofileQS {\n\treturn qs.filter(`\"active\" =`, v)\n}", "func (o *RuleActionStore) GetActive() bool {\n\tif o == nil || o.Active == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.Active\n}", "func (acn *Account) Active() bool {\n\treturn acn.active\n}", "func (w *Wallet) SortedActivePaymentAddresses() ([]string, er.R) {\n\tvar addrStrs []string\n\terr := walletdb.View(w.db, func(tx walletdb.ReadTx) er.R {\n\t\taddrmgrNs := tx.ReadBucket(waddrmgrNamespaceKey)\n\t\treturn w.Manager.ForEachActiveAddress(addrmgrNs, func(addr btcutil.Address) er.R {\n\t\t\taddrStrs = append(addrStrs, addr.EncodeAddress())\n\t\t\treturn nil\n\t\t})\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsort.Strings(addrStrs)\n\treturn addrStrs, nil\n}", "func (h *HitBTC) GetActiveorders(ctx context.Context, currency string) ([]Order, error) {\n\tvar resp []Order\n\terr := h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodGet,\n\t\torders+\"?symbol=\"+currency,\n\t\turl.Values{},\n\t\ttradingRequests,\n\t\t&resp)\n\n\treturn resp, err\n}", "func (as *AddressService) GetSaved() ([]*Address, error) {\n\treturn as.getAddresses(\"withdraw\")\n}", "func (la *leastActive) Get(ctx context.Context, opts grpc.BalancerGetOptions) (addr grpc.Address, put func(), err error) {\n\n\tvar ch chan struct{}\n\tla.mu.Lock()\n\tif la.done {\n\t\tla.mu.Unlock()\n\t\terr = grpc.ErrClientConnClosing\n\t\treturn\n\t}\n\n\tif len(la.addrs) > 0 {\n\t\tif la.next >= len(la.addrs) {\n\t\t\tla.next = 0\n\t\t}\n\t\tnext := la.next\n\t\tfor {\n\t\t\ta := la.addrs[next]\n\t\t\tnext = la.getNext()\n\t\t\tif a.Connected {\n\t\t\t\taddr = a.Addr\n\t\t\t\tla.next = next\n\t\t\t\ta.AddActive()\n\t\t\t\tput = func() {\n\t\t\t\t\ta.Lock()\n\t\t\t\t\ta.SubActive()\n\t\t\t\t\ta.UnLock()\n\t\t\t\t\tfmt.Println(\"rpc call return\")\n\t\t\t\t}\n\t\t\t\tla.mu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif next == la.next {\n\t\t\t\t// Has iterated all the possible address but none is connected.\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif !opts.BlockingWait {\n\t\tif len(la.addrs) == 0 {\n\t\t\tla.mu.Unlock()\n\t\t\terr = grpc.Errorf(codes.Unavailable, \"there is no address available\")\n\t\t\treturn\n\t\t}\n\n\t\t// Returns the next addr on la.addrs for failfast RPCs.\n\t\taddr = la.addrs[la.next].Addr\n\t\tla.addrs[la.next].AddActive()\n\t\tput = func() {\n\t\t\tla.addrs[la.next].Lock()\n\t\t\tla.addrs[la.next].SubActive()\n\t\t\tla.addrs[la.next].UnLock()\n\t\t\tfmt.Println(\"rpc call return\")\n\t\t}\n\t\tla.next = la.getNext()\n\t\tla.mu.Unlock()\n\t\tfmt.Println(\"return2:\", addr.Addr)\n\t\treturn\n\t}\n\t// Wait on la.waitCh for non-failfast RPCs.\n\tif la.waitCh == nil {\n\t\tch = make(chan struct{})\n\t\tla.waitCh = ch\n\t} else {\n\t\tch = la.waitCh\n\t}\n\tla.mu.Unlock()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\terr = ctx.Err()\n\t\t\treturn\n\t\tcase <-ch:\n\t\t\tla.mu.Lock()\n\t\t\tif la.done {\n\t\t\t\tla.mu.Unlock()\n\t\t\t\terr = grpc.ErrClientConnClosing\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif len(la.addrs) > 0 {\n\n\t\t\t\tif la.next >= len(la.addrs) {\n\t\t\t\t\tla.next = 0\n\t\t\t\t}\n\t\t\t\tnext := la.next\n\t\t\t\tfor {\n\t\t\t\t\ta := la.addrs[next]\n\t\t\t\t\tnext = la.getNext()\n\t\t\t\t\tif a.Connected {\n\t\t\t\t\t\taddr = a.Addr\n\t\t\t\t\t\tla.next = next\n\t\t\t\t\t\ta.AddActive()\n\t\t\t\t\t\tput = func() {\n\t\t\t\t\t\t\ta.Lock()\n\t\t\t\t\t\t\ta.SubActive()\n\t\t\t\t\t\t\ta.UnLock()\n\t\t\t\t\t\t\tfmt.Println(\"rpc call return\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tla.mu.Unlock()\n\t\t\t\t\t\tfmt.Println(\"return3:\", addr.Addr)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif next == la.next {\n\t\t\t\t\t\t// Has iterated all the possible address but none is connected.\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// The newly added addr got removed by Down() again.\n\t\t\tif la.waitCh == nil {\n\t\t\t\tch = make(chan struct{})\n\t\t\t\tla.waitCh = ch\n\t\t\t} else {\n\t\t\t\tch = la.waitCh\n\t\t\t}\n\t\t\tla.mu.Unlock()\n\t\t}\n\t}\n}", "func ActiveScan(urls,apis string, options OptionsZAP){\n\tRun(urls,apis,AScanAPI, options)\n}", "func (q *Queue) ActiveTools() map[string]common.Tool {\n\tq.RLock()\n\tdefer q.RUnlock()\n\n\t// Cycle through all the attached resources for unique tools\n\tvar tools = make(map[string]common.Tool)\n\tfor _, res := range q.pool {\n\t\t// Check if the tool is active for jobs (AKA running or paused)\n\t\tif res.Status != common.STATUS_QUIT {\n\t\t\t// Resource is paused or running so get the tools it provides\n\t\t\tfor uuid, t := range res.Tools {\n\t\t\t\t// Check if tool already exists in the tools map\n\t\t\t\t_, ok := tools[uuid]\n\t\t\t\tif !ok {\n\t\t\t\t\t// Tool doesn't exit already so add it\n\t\t\t\t\ttools[uuid] = t\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tools\n}", "func GetAll(query map[string]int64) ([]Models.Agent, error) {\n\tvar agents []Models.Agent\n\n\tqueryAgent := `\n\t\tSELECT agent.id, agent.code AS code, agent.name AS name, \n\t\tcontact.city_id AS city_id, contact.country_id AS country_id, \n\t\tcity.name AS city_name, country.name AS country_name\n\t\tFROM companies AS agent\n\t\tLEFT JOIN contacts AS contact ON contact.id = agent.contact_id\n\t\tLEFT JOIN cities AS city ON city.id = contact.city_id\n\t\tLEFT JOIN countries AS country ON country.id = contact.country_id\n\t\tWHERE agent.status = 'ACTIVE' and agent.company_type_id = $1\n\t`\n\trows, err := dbMod.DB.Query(queryAgent, query[\"type\"])\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\tfor rows.Next() {\n\t\tvar agent Models.Agent\n\n\t\tif err = rows.Scan(\n\t\t\t&agent.ID, &agent.Code, &agent.Name, &agent.CityID,\n\t\t\t&agent.CountryID, &agent.CityName, &agent.CountryName,\n\t\t); err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tagents = append(agents, agent)\n\t}\n\n\treturn agents, nil\n}", "func (a *addrBook) getAddresses() []*node.Info {\n\ta.mtx.Lock()\n\tdefer a.mtx.Unlock()\n\n\taddrIndexLen := len(a.addrIndex)\n\tif addrIndexLen == 0 {\n\t\treturn nil\n\t}\n\n\taddrs := make([]*node.Info, 0, addrIndexLen)\n\tfor _, v := range a.addrIndex {\n\t\taddrs = append(addrs, v.na)\n\t}\n\n\treturn addrs\n}", "func (o JobStatusPtrOutput) Active() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v JobStatus) *int { return v.Active }).(pulumi.IntPtrOutput)\n}", "func GetAllAddresses() []*AddressDAL {\n\taddresses := []*AddressDAL{}\n\tdb.DB().Find(&addresses)\n\treturn addresses\n}", "func (_UsersData *UsersDataFilterer) FilterOnSetActive(opts *bind.FilterOpts) (*UsersDataOnSetActiveIterator, error) {\n\n\tlogs, sub, err := _UsersData.contract.FilterLogs(opts, \"onSetActive\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &UsersDataOnSetActiveIterator{contract: _UsersData.contract, event: \"onSetActive\", logs: logs, sub: sub}, nil\n}", "func (a Accounts) Get(asset string) *Account {\n\tfor i := range a.Datas {\n\t\tif a.Datas[i].Balance.Currency == asset {\n\t\t\treturn &a.Datas[i]\n\t\t}\n\t}\n\treturn nil\n}", "func (c *CoordinatorHelper) AllAddresses(\n\tctx context.Context,\n\tdbTx storage.DatabaseTransaction,\n) ([]string, error) {\n\treturn c.keyStorage.GetAllAddressesTransactional(ctx, dbTx)\n}", "func Active(nomad *NomadServer, job *Job, host *Host) bool {\n\tallocs := Allocs(nomad)\n\tfor _, alloc := range allocs {\n\t\tif alloc.NodeID == host.ID && strings.Contains(alloc.Name, job.Name) {\n\t\t\tif alloc.DesiredStatus != \"stop\" && strings.Contains(alloc.Name, \"worker\") {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (p *Poloniex) GetActiveLoans(ctx context.Context) (ActiveLoans, error) {\n\tresult := ActiveLoans{}\n\treturn result, p.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, poloniexActiveLoans, url.Values{}, &result)\n}", "func Active() bool {\n\treturn isActive\n}", "func GetActiveAttrib(program uint32, index uint32, bufSize int32, length *int32, size *int32, xtype *uint32, name *uint8) {\n\tsyscall.Syscall9(gpGetActiveAttrib, 7, uintptr(program), uintptr(index), uintptr(bufSize), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(xtype)), uintptr(unsafe.Pointer(name)), 0, 0)\n}", "func Activate(options MetadataOptions, context *Context) (*http.Response, error) {\n\tvar (\n\t\tassetMetadata *model.PlanetAssetMetadata\n\t\terr error\n\t)\n\tif assetMetadata, err = GetPlanetAssets(options, context); err != nil {\n\t\treturn nil, err\n\t}\n\treturn planetRequest(planetRequestInput{method: \"POST\", inputURL: assetMetadata.ActivationURL.String()}, context)\n}", "func (w *Wallet) AllAddress() []string {\n\tadrs := make([]string, 0, len(w.AddressChange)+len(w.AddressPublic))\n\tfor adr := range w.AddressChange {\n\t\tadrs = append(adrs, adr)\n\t}\n\tfor adr := range w.AddressPublic {\n\t\tadrs = append(adrs, adr)\n\t}\n\treturn adrs\n}", "func (ids identities) getAddresses() []common.Address {\n\taddresses := make([]common.Address, len(ids))\n\tfor i := 0; i < len(ids); i++ {\n\t\taddresses[i] = ids[i].addr\n\t}\n\treturn addresses\n}", "func (o JobStatusOutput) Active() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v JobStatus) *int { return v.Active }).(pulumi.IntPtrOutput)\n}", "func GetActiveReports() []Report {\n\treturn reports\n}", "func (s *SmartContract) GetAllAssets(ctx contractapi.TransactionContextInterface) ([]*Asset, error) {\n\t// range query with empty string for startKey and endKey does an\n\t// open-ended query of all assets in the chaincode namespace.\n\tresultsIterator, err := ctx.GetStub().GetStateByRange(\"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resultsIterator.Close()\n\n\tvar assets []*Asset\n\tfor resultsIterator.HasNext() {\n\t\tqueryResponse, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar asset Asset\n\t\terr = json.Unmarshal(queryResponse.Value, &asset)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tassets = append(assets, &asset)\n\t}\n\n\treturn assets, nil\n}", "func (c Calendars) dumpActive() error {\n\tpath, err := c.configPath()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tactive := make([]string, 0, len(c))\n\tfor id, cal := range c {\n\t\tif cal.active {\n\t\t\tactive = append(active, id)\n\t\t}\n\t}\n\n\treturn writeLines(active, path)\n}", "func GetActiveAttrib(program uint32, index uint32, bufSize int32, length *int32, size *int32, xtype *uint32, name *uint8) {\n\tC.glowGetActiveAttrib(gpGetActiveAttrib, (C.GLuint)(program), (C.GLuint)(index), (C.GLsizei)(bufSize), (*C.GLsizei)(unsafe.Pointer(length)), (*C.GLint)(unsafe.Pointer(size)), (*C.GLenum)(unsafe.Pointer(xtype)), (*C.GLchar)(unsafe.Pointer(name)))\n}", "func GetActiveAttrib(program uint32, index uint32, bufSize int32, length *int32, size *int32, xtype *uint32, name *uint8) {\n\tC.glowGetActiveAttrib(gpGetActiveAttrib, (C.GLuint)(program), (C.GLuint)(index), (C.GLsizei)(bufSize), (*C.GLsizei)(unsafe.Pointer(length)), (*C.GLint)(unsafe.Pointer(size)), (*C.GLenum)(unsafe.Pointer(xtype)), (*C.GLchar)(unsafe.Pointer(name)))\n}", "func (e Endpoints) GetAddresses(ctx context.Context, profileID string) ([]Address, error) {\n\n\t// TODO: Create detailed ref spec\n\trequest := getAddressesRequest{ProfileID: profileID}\n\n\tresponse, err := e.GetAddressesEndpoint(ctx, request)\n\n\tif err != nil {\n\t\treturn []Address{}, nil\n\t}\n\n\tresp := response.(getAddressesResponse)\n\n\treturn resp.Addresses, resp.Err\n\n}", "func GetQueryActiveValidators(route string, cdc *codec.Codec) *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"active-validators\",\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\t\t\tbz, _, err := cliCtx.Query(fmt.Sprintf(\"custom/%s/%s\", route, types.QueryActiveValidators))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn printOutput(cliCtx, cdc, bz, &[]types.QueryActiveValidatorResult{})\n\t\t},\n\t}\n}", "func (program Program) GetActiveAttributes() int32 {\n\tvar params int32\n\tgl.GetProgramiv(uint32(program), gl.ACTIVE_ATTRIBUTES, &params)\n\treturn params\n}", "func GetCmdQueryActive(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: oracle.QueryActive,\n\t\tArgs: cobra.NoArgs,\n\t\tShort: \"Query the active list of Terra assets recognized by the oracle\",\n\t\tLong: strings.TrimSpace(`\nQuery the active list of Terra assets recognized by the oracle.\n\n$ terracli query oracle active\n`),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\tres, err := cliCtx.QueryWithData(fmt.Sprintf(\"custom/%s/%s\", queryRoute, oracle.QueryActive), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvar actives oracle.QueryActiveResponse\n\t\t\tcdc.MustUnmarshalJSON(res, &actives)\n\t\t\treturn cliCtx.PrintOutput(actives)\n\t\t},\n\t}\n\n\treturn cmd\n}", "func (o *CommitteeInfoResponse) SetActive(v bool) {\n\to.Active = v\n}", "func (c *Client) SiteActiveClients(site string, filterMac string) (*SiteActiveClientsResponse, error) {\n\textPath := \"stat/sta\"\n\tif filterMac != \"\" {\n\t\textPath = extPath + \"/\" + strings.ToLower(filterMac)\n\t}\n\n\tvar resp SiteActiveClientsResponse\n\terr := c.doSiteRequest(http.MethodGet, site, extPath, nil, &resp)\n\treturn &resp, err\n}", "func (pg *PGStorage) GetAddresses(sql string, args ...interface{}) ([]*Address, error) {\n\tsql = \"SELECT id, updated_at, hash, income, outcome, ballance FROM address \" + sql\n\n\trows, err := pg.con.Query(sql, args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taddresses := make([]*Address, 0)\n\n\tfor rows.Next() {\n\t\ta := &Address{}\n\t\tif err := rows.Scan(\n\t\t\t&a.ID,\n\t\t\t&a.UpdatedAt,\n\t\t\t&a.Hash,\n\t\t\t&a.Income,\n\t\t\t&a.Outcome,\n\t\t\t&a.Ballance,\n\t\t); err != nil {\n\t\t\treturn addresses, err\n\t\t}\n\t\taddresses = append(addresses, a)\n\t}\n\treturn addresses, err\n}", "func (_Posminer *PosminerCaller) ActiveUsers(opts *bind.CallOpts) (struct {\n\tLastTime *big.Int\n\tActiveNum *big.Int\n}, error) {\n\tret := new(struct {\n\t\tLastTime *big.Int\n\t\tActiveNum *big.Int\n\t})\n\tout := ret\n\terr := _Posminer.contract.Call(opts, out, \"ActiveUsers\")\n\treturn *ret, err\n}", "func GetActiveAttrib(program Uint, index Uint, bufSize Sizei, length *Sizei, size *Int, kind *Enum, name []byte) {\n\tcprogram, _ := (C.GLuint)(program), cgoAllocsUnknown\n\tcindex, _ := (C.GLuint)(index), cgoAllocsUnknown\n\tcbufSize, _ := (C.GLsizei)(bufSize), cgoAllocsUnknown\n\tclength, _ := (*C.GLsizei)(unsafe.Pointer(length)), cgoAllocsUnknown\n\tcsize, _ := (*C.GLint)(unsafe.Pointer(size)), cgoAllocsUnknown\n\tckind, _ := (*C.GLenum)(unsafe.Pointer(kind)), cgoAllocsUnknown\n\tcname, _ := (*C.GLchar)(unsafe.Pointer((*sliceHeader)(unsafe.Pointer(&name)).Data)), cgoAllocsUnknown\n\tC.glGetActiveAttrib(cprogram, cindex, cbufSize, clength, csize, ckind, cname)\n}", "func (c *Component) GetActive() bool {\n\treturn c.active\n}", "func Active() bool {\n\treturn DefaultTracer.Active()\n}", "func (o GoogleCloudRetailV2alphaConditionOutput) ActiveTimeRange() GoogleCloudRetailV2alphaConditionTimeRangeArrayOutput {\n\treturn o.ApplyT(func(v GoogleCloudRetailV2alphaCondition) []GoogleCloudRetailV2alphaConditionTimeRange {\n\t\treturn v.ActiveTimeRange\n\t}).(GoogleCloudRetailV2alphaConditionTimeRangeArrayOutput)\n}", "func (o GoogleCloudRetailV2alphaConditionPtrOutput) ActiveTimeRange() GoogleCloudRetailV2alphaConditionTimeRangeArrayOutput {\n\treturn o.ApplyT(func(v *GoogleCloudRetailV2alphaCondition) []GoogleCloudRetailV2alphaConditionTimeRange {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ActiveTimeRange\n\t}).(GoogleCloudRetailV2alphaConditionTimeRangeArrayOutput)\n}", "func GetActiveAttrib(p Program, index uint32) (name string, size int, ty Enum) {\n\tvar length, si int32\n\tvar typ uint32\n\tname = strings.Repeat(\"\\x00\", 256)\n\tcname := gl.Str(name)\n\tgl.GetActiveAttrib(p.Value, uint32(index), int32(len(name)-1), &length, &si, &typ, cname)\n\tname = name[:strings.IndexRune(name, 0)]\n\treturn name, int(si), Enum(typ)\n}", "func (cli *CLI) listAddresses() {\n\twallets, _ := wallet.CreateWallets()\n\tfor address := range wallets {\n\t\tfmt.Println(address)\n\t}\n}", "func (o *AllocationList) GetCashActive() float64 {\n\tif o == nil {\n\t\tvar ret float64\n\t\treturn ret\n\t}\n\n\treturn o.CashActive\n}", "func (as *AddressbookService) GetUserAddresses(id string) ([]models.Address, error) {\n\treturn as.repo.GetUserAddresses(id)\n}", "func (ws *Wallets) GetAddresses() []string {\n\tvar addresses []string\n\n\tfor address := range ws.Wallets {\n\t\taddresses = append(addresses, address)\n\t}\n\n\treturn addresses\n}", "func (o *InlineResponse20075StatsBilling) HasActive() bool {\n\tif o != nil && o.Active != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (log *inMemorySagaLog) GetActiveSagas() ([]string, error) {\n\tlog.mutex.RLock()\n\tdefer log.mutex.RUnlock()\n\n\tkeys := make([]string, 0, len(log.sagas))\n\n\tfor key, _ := range log.sagas {\n\t\tkeys = append(keys, key)\n\t}\n\n\treturn keys, nil\n}", "func (s UserSet) Active() bool {\n\tres, _ := s.RecordCollection.Get(models.NewFieldName(\"Active\", \"active\")).(bool)\n\treturn res\n}", "func (o IntegrationEmailsOnPushOutput) Active() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v *IntegrationEmailsOnPush) pulumi.BoolOutput { return v.Active }).(pulumi.BoolOutput)\n}", "func (sc *TraceScope) Active() bool {\n\treturn sc.trc.Active()\n}", "func (c *Client) GetAddressesAdv(crypto string, addresses []string, options map[string]string) (resp *DataAddresses, e error) {\n\tif e = c.ValidateCrypto(crypto); e != nil {\n\t\treturn\n\t}\n\n\tresp = &DataAddresses{}\n\tvar path = crypto + \"/dashboards/addresses/\" + strings.Join(addresses, \",\")\n\treturn resp, c.LoadResponse(path, resp, options)\n}", "func CommandShowActive(conf Config, ctx, query Query) error {\n\tts, err := LoadTaskSet(conf.Repo, conf.IDsFile, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tquery = query.Merge(ctx)\n\tts.Filter(query)\n\tts.FilterByStatus(STATUS_ACTIVE)\n\tts.DisplayByNext(ctx, true)\n\n\treturn nil\n}", "func (d *Database) GetActiveAlerts() []interface{} {\n\tAlerts := make([]interface{}, 0)\n\n\trows, err := d.db.Query(\n\t\t`SELECT id, name, type, content, active, allow_dismiss, registered_only FROM alert WHERE active IS TRUE;`,\n\t)\n\n\tif err == nil {\n\t\tdefer rows.Close()\n\t\tfor rows.Next() {\n\t\t\tvar a Alert\n\n\t\t\tif err := rows.Scan(\n\t\t\t\t&a.AlertID,\n\t\t\t\t&a.Name,\n\t\t\t\t&a.Type,\n\t\t\t\t&a.Content,\n\t\t\t\t&a.Active,\n\t\t\t\t&a.AllowDismiss,\n\t\t\t\t&a.RegisteredOnly,\n\t\t\t); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\tAlerts = append(Alerts, &a)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn Alerts\n}", "func (a *Account) RescanActiveJob() (*RescanJob, error) {\n\t// Determine the block necesary to start the rescan for all active\n\t// addresses.\n\theight := a.KeyStore.SyncHeight()\n\n\tactives := a.KeyStore.SortedActiveAddresses()\n\taddrs := make([]btcutil.Address, 0, len(actives))\n\tfor i := range actives {\n\t\taddrs = append(addrs, actives[i].Address())\n\t}\n\n\tunspents, err := a.TxStore.UnspentOutputs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toutpoints := make([]*btcwire.OutPoint, 0, len(unspents))\n\tfor _, c := range unspents {\n\t\toutpoints = append(outpoints, c.OutPoint())\n\t}\n\n\tjob := &RescanJob{\n\t\tAddresses: map[*Account][]btcutil.Address{a: addrs},\n\t\tOutPoints: outpoints,\n\t\tStartHeight: height,\n\t}\n\treturn job, nil\n}", "func GetAllAddress(s *aklib.DBConfig) (map[string]*Address, error) {\n\tadrs := make(map[string]*Address)\n\terr := s.DB.View(func(txn *badger.Txn) error {\n\t\tit := txn.NewIterator(badger.DefaultIteratorOptions)\n\t\tdefer it.Close()\n\t\tprefix := []byte{byte(db.HeaderWalletAddress)}\n\t\tfor it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {\n\t\t\titem := it.Item()\n\t\t\tv, err := item.ValueCopy(nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvar adr Address\n\t\t\tif err := arypack.Unmarshal(v, &adr); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tadrs[string(item.Key()[1:])] = &adr\n\t\t}\n\t\treturn nil\n\t})\n\treturn adrs, err\n}", "func (k Keeper) GetActiveValidators(ctx sdk.Context) ([]types.Validator, error) {\n\tvar result []types.Validator\n\tstore := ctx.KVStore(k.dataStoreKey)\n\tproposed := k.GetBlocksProposedByAll(ctx)\n\n\tit := store.Iterator(nil, nil)\n\tdefer it.Close()\n\tfor ; it.Valid(); it.Next() {\n\t\tvar value types.Info\n\t\tif err := proto.Unmarshal(it.Value(), &value); err != nil {\n\t\t\tpanic(errors.Wrap(err, \"cannot unmarshal info\"))\n\t\t}\n\t\tif !value.IsActive() {\n\t\t\tcontinue\n\t\t}\n\t\taddr := sdk.AccAddress(it.Key())\n\t\tresult = append(result, types.GenesisValidatorFromD(addr, value, proposed[addr.String()]))\n\t}\n\n\treturn result, nil\n}", "func (cp *Pool) Active() int64 {\n\tp := cp.pool()\n\tif p == nil {\n\t\treturn 0\n\t}\n\treturn p.Active()\n}" ]
[ "0.595874", "0.5908878", "0.5823863", "0.57964134", "0.57462436", "0.56345075", "0.5600323", "0.54584813", "0.5417571", "0.5400139", "0.5300115", "0.5276993", "0.52485746", "0.5168865", "0.51660424", "0.5158932", "0.513461", "0.51050174", "0.510044", "0.50979775", "0.5083802", "0.50743717", "0.50718147", "0.50339484", "0.5015984", "0.50102144", "0.49731946", "0.49728903", "0.4953585", "0.494153", "0.49409685", "0.49168965", "0.49122596", "0.4873417", "0.48632032", "0.48534423", "0.48363534", "0.48313692", "0.48164368", "0.47893137", "0.47804013", "0.47796163", "0.47794685", "0.47572595", "0.47550905", "0.47473383", "0.47460762", "0.47421563", "0.47122967", "0.47016904", "0.4699449", "0.46926662", "0.46820837", "0.46813592", "0.467019", "0.46675697", "0.4664755", "0.46585095", "0.46543655", "0.46541473", "0.4649693", "0.46442407", "0.46397185", "0.46186763", "0.46138364", "0.46125892", "0.46124482", "0.45967773", "0.4596502", "0.4596502", "0.4592222", "0.45911655", "0.4589562", "0.4588202", "0.4580803", "0.4572748", "0.4562038", "0.4553841", "0.4553514", "0.45425364", "0.45322478", "0.452852", "0.45211962", "0.45176795", "0.4507924", "0.45066744", "0.45057034", "0.45025882", "0.4500148", "0.4490988", "0.44887844", "0.44865918", "0.44818497", "0.4468172", "0.446273", "0.4462003", "0.44607437", "0.44570866", "0.44552597", "0.4450716" ]
0.7209901
0
GetSaved will get all saved addresses for an asset
func (as *AddressService) GetSaved() ([]*Address, error) { return as.getAddresses("withdraw") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetSavedSitemaps() []string {\n\treturn savedSitemaps\n}", "func (ws *Wallets) GetAddresses() []string {\n\tvar addresses []string\n\n\tfor address := range ws.Wallets {\n\t\taddresses = append(addresses, address)\n\t}\n\n\treturn addresses\n}", "func (svc *inmemService) GetAddresses(ctx context.Context, profileID string) ([]Address, error) {\n\n\t// Get a Read Lock on the svc for atomic read access to the datastore\n\tsvc.mtx.RLock()\n\n\t// Immediately set up a lock release to occur when the function finishes\n\tdefer svc.mtx.RUnlock()\n\n\t// Check to make sure there is a profile that corresponds to the passed in profile and save the found profile to a profile variable\n\tprofile, ok := svc.profiles[profileID]\n\n\t// If no profile was found for the passed in ID\n\tif !ok {\n\n\t\t// Return error informing the caller that the profile to which the addresses should have been associated was not found\n\t\treturn nil, ErrNotFound\n\t}\n\n\t// Return all addresses associated with the profile that was passed in and a nil error value\n\treturn profile.Addresses, nil\n}", "func (w *XPubWallet) GetAddresses() []cipher.Addresser {\n\treturn w.Entries.getAddresses()\n}", "func (c *ChunkRef) Save() ([]datastore.Property, error) {\n\treturn datastore.SaveStruct(c)\n}", "func (as *AddressService) GetActive() ([]*Address, error) {\n\treturn as.getAddresses(\"deposit\")\n}", "func (mock *StoreServiceMock) SaveCalls() []struct {\n\tEntry ytfeed.Entry\n} {\n\tvar calls []struct {\n\t\tEntry ytfeed.Entry\n\t}\n\tmock.lockSave.RLock()\n\tcalls = mock.calls.Save\n\tmock.lockSave.RUnlock()\n\treturn calls\n}", "func (account *Account) Saved() *SavedMedia {\r\n\treturn &SavedMedia{\r\n\t\tinst: account.inst,\r\n\t\tendpoint: urlFeedSaved,\r\n\t\terr: nil,\r\n\t}\r\n}", "func (s *Store) GetBulk(suffixes []string) ([]string, error) {\n\tanchorBytes, err := s.store.GetBulk(suffixes...)\n\tif err != nil {\n\t\treturn nil, orberrors.NewTransient(fmt.Errorf(\"failed to get did anchor reference: %w\", err))\n\t}\n\n\tanchors := make([]string, len(suffixes))\n\n\tfor i, a := range anchorBytes {\n\t\tif a == nil {\n\t\t\tanchors[i] = \"\"\n\t\t} else {\n\t\t\tanchors[i] = string(a)\n\t\t}\n\t}\n\n\tlogger.Debugf(\"retrieved latest anchors%s for suffixes%s\", anchors, suffixes)\n\n\treturn anchors, nil\n}", "func (pg *PGStorage) GetAddresses(sql string, args ...interface{}) ([]*Address, error) {\n\tsql = \"SELECT id, updated_at, hash, income, outcome, ballance FROM address \" + sql\n\n\trows, err := pg.con.Query(sql, args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taddresses := make([]*Address, 0)\n\n\tfor rows.Next() {\n\t\ta := &Address{}\n\t\tif err := rows.Scan(\n\t\t\t&a.ID,\n\t\t\t&a.UpdatedAt,\n\t\t\t&a.Hash,\n\t\t\t&a.Income,\n\t\t\t&a.Outcome,\n\t\t\t&a.Ballance,\n\t\t); err != nil {\n\t\t\treturn addresses, err\n\t\t}\n\t\taddresses = append(addresses, a)\n\t}\n\treturn addresses, err\n}", "func (s *Service) GetAddresses(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tsizeStr := r.FormValue(\"size\")\n\tprefix := r.FormValue(\"prefix\")\n\tif sizeStr == \"\" {\n\t\tsizeStr = defaultPageSize\n\t}\n\tdata := &Data{}\n\tdefer func() {\n\t\tif err := json.NewEncoder(w).Encode(data.Addresses); err != nil {\n\t\t\tutils.Logger().Warn().Err(err).Msg(\"cannot JSON-encode addresses\")\n\t\t}\n\t}()\n\n\tsize, err := strconv.Atoi(sizeStr)\n\tif err != nil || size > maxAddresses {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tdata.Addresses, err = s.Storage.GetAddresses(size, prefix)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tutils.Logger().Warn().Err(err).Msg(\"wasn't able to fetch addresses from storage\")\n\t\treturn\n\t}\n}", "func (l *localLinker) Save(ctx context.Context, req *pbd.SaveRequest) (*pbd.Empty, error) {\n\tl.store[req.Key] = req.Value\n\treturn &pbd.Empty{}, nil\n}", "func (u *Unmarshal) AssetsExportGet(uuid string, chunk string) ([]byte, error) {\n\ts := u.NewService()\n\traw, err := s.AssetsExportGet(uuid, chunk)\n\treturn raw, err\n}", "func (mp *Map) Save() ([]datastore.Property, error) {\n\tvar d []datastore.Property\n\tfor k, v := range *mp {\n\t\td = append(d, datastore.Property{\n\t\t\tName: k,\n\t\t\tValue: v,\n\t\t\t// Force property to not be indexed. This allows more freedom of the\n\t\t\t// property name, i.e. may contain \".\".\n\t\t\tNoIndex: true,\n\t\t})\n\t}\n\treturn d, nil\n}", "func (m *Drive) GetBundles()([]DriveItemable) {\n return m.bundles\n}", "func (fs *FileAddrStorage) GetAddresses() ([]NetAddr, error) {\n\tbytes, err := ioutil.ReadFile(fs.filePath)\n\tif err != nil {\n\t\treturn nil, trace.ConvertSystemError(err)\n\t}\n\tvar addrs []NetAddr\n\tif len(bytes) > 0 {\n\t\terr = json.Unmarshal(bytes, &addrs)\n\t\tif err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t}\n\treturn addrs, nil\n}", "func FindSavedDesks(ctx context.Context, ac *uiauto.Context) ([]uiauto.NodeInfo, error) {\n\tsavedDeskItemView := nodewith.ClassName(\"SavedDeskItemView\")\n\tsavedDeskItemViewInfo, err := ac.NodesInfo(ctx, savedDeskItemView)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to find SavedDeskItemView\")\n\t}\n\treturn savedDeskItemViewInfo, nil\n}", "func (ws *WalletStore) Save() {\n\tvar buffer bytes.Buffer\n\tgob.Register(elliptic.P256())\n\tencoder := gob.NewEncoder(&buffer)\n\terr := encoder.Encode(ws.Wallets)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfile := ws.Config.GetWalletStoreFile(ws.NodeID)\n\terr = ioutil.WriteFile(file, buffer.Bytes(), 0644)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (serv *TransactionService) SaveAll(dirPath string, trs []trs.Transaction) ([]stored_transactions.Transaction, error) {\n\tout := []stored_transactions.Transaction{}\n\tfor _, oneTrs := range trs {\n\t\toneObjDirPath := filepath.Join(dirPath, oneTrs.GetMetaData().GetID().String())\n\t\toneObj, oneObjErr := serv.Save(oneObjDirPath, oneTrs)\n\t\tif oneObjErr != nil {\n\t\t\treturn nil, oneObjErr\n\t\t}\n\n\t\tout = append(out, oneObj)\n\t}\n\n\treturn out, nil\n}", "func (cli *OpsGenieAlertV2Client) ListSavedSearches(req alertsv2.LisSavedSearchRequest) (*savedsearches.ListSavedSearchResponse, error) {\n\tvar response savedsearches.ListSavedSearchResponse\n\terr := cli.sendGetRequest(&req, &response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response, nil\n}", "func (m *MemoryRewardStorage) GetAll() []rewards.Reward {\n\treturn m.rewards\n}", "func (wlt *Wallet) GetAddresses() []cipher.Address {\n\taddrs := make([]cipher.Address, len(wlt.Entries))\n\tfor i, e := range wlt.Entries {\n\t\taddrs[i] = e.Address\n\t}\n\treturn addrs\n}", "func (r *Reserve) Save(s *Store) error {\n\tdata, _ := json.Marshal(s)\n\tif err := ioutil.WriteFile(r.path, data, 0644); err != nil {\n\t\treturn fmt.Errorf(\"Failed to set %s: %s\", r.name, err)\n\t}\n\treturn nil\n}", "func (mapping *ApplicationAccountStoreMapping) Save() error {\n\turl := buildRelativeURL(\"accountStoreMappings\")\n\tif mapping.Href != \"\" {\n\t\turl = mapping.Href\n\t}\n\n\treturn client.post(url, mapping, mapping)\n}", "func (fcb *FileCacheBackend) Save(ob types.OutgoingBatch) error {\n\tfilename := fcb.getFilename(fcb.getCacheFilename(ob))\n\tlog.Printf(\"Saving to %s\", filename)\n\tfile, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0600)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to save to %s - %s\", filename, err)\n\t}\n\tdefer file.Close()\n\tfor _, item := range ob.Values {\n\t\tfile.WriteString(item + \"\\n\")\n\t}\n\treturn nil\n}", "func (a *addrBook) getAddresses() []*addrInfo {\n\ta.mu.Lock()\n\tdefer a.mu.Unlock()\n\n\taddrIndexLen := len(a.addrIndex)\n\tif addrIndexLen == 0 {\n\t\treturn nil\n\t}\n\n\taddrs := make([]*addrInfo, 0, addrIndexLen)\n\tfor _, v := range a.addrIndex {\n\t\taddrs = append(addrs, v.Addr)\n\t}\n\n\treturn addrs\n}", "func GetAllAddresses() []*AddressDAL {\n\taddresses := []*AddressDAL{}\n\tdb.DB().Find(&addresses)\n\treturn addresses\n}", "func (b *Backend) Save(root internal.Root) ([]internal.Game, error) {\n\tvar gameJSON []byte\n\tvar games []internal.Game\n\terr := b.DB.Update(func(txn *badger.Txn) error {\n\t\tvar err error\n\t\tfor _, date := range root.Dates {\n\t\t\tdateString := date.DateString\n\t\t\tfor _, game := range date.Games {\n\t\t\t\tkey := fmt.Sprintf(\"%s:%d:%d\", dateString, game.Teams[\"away\"].Team.ID, game.Teams[\"home\"].Team.ID)\n\t\t\t\tgameJSON, err = json.Marshal(game)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ttxn.Set([]byte(key), gameJSON)\n\t\t\t\tgames = append(games, game)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn games, err\n}", "func (m *Printer) GetShares()([]PrinterShareable) {\n val, err := m.GetBackingStore().Get(\"shares\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]PrinterShareable)\n }\n return nil\n}", "func (o *Service) GetAddresses() []string {\n\tif o == nil || o.Addresses == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.Addresses\n}", "func GetAddressList() []common.Address {\n\treturn allSkrAddress\n}", "func (d *DiskStorage) GetAll() (map[string]Entry, error) {\n\treturn d.memStorage.GetAll()\n}", "func All(w http.ResponseWriter, r *http.Request) {\n\tvar result []Location\n\terr := store.Find(&result, bolthold.Where(\"Serial\").Eq(\"ce011711bd1668d80c\").Index(\"Serial\"))\n\tif err != nil {\n\t\tfmt.Println(\"Err\")\n\t\tfmt.Println(err)\n\t}\n\n\tjson.NewEncoder(w).Encode(result)\n\n}", "func (e Endpoints) GetAddresses(ctx context.Context, profileID string) ([]Address, error) {\n\n\t// TODO: Create detailed ref spec\n\trequest := getAddressesRequest{ProfileID: profileID}\n\n\tresponse, err := e.GetAddressesEndpoint(ctx, request)\n\n\tif err != nil {\n\t\treturn []Address{}, nil\n\t}\n\n\tresp := response.(getAddressesResponse)\n\n\treturn resp.Addresses, resp.Err\n\n}", "func (w *Wallet) AllAddress() []string {\n\tadrs := make([]string, 0, len(w.AddressChange)+len(w.AddressPublic))\n\tfor adr := range w.AddressChange {\n\t\tadrs = append(adrs, adr)\n\t}\n\tfor adr := range w.AddressPublic {\n\t\tadrs = append(adrs, adr)\n\t}\n\treturn adrs\n}", "func Save(data []*JSON) error {\n\treturn save(data, \"./extracted_jsons.json\")\n}", "func (epcb *EntryPointCreateBulk) SaveX(ctx context.Context) []*EntryPoint {\n\tv, err := epcb.Save(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn v\n}", "func (s *Storage) GetAll() (uuids [][]byte, entries []Entry, err error) {\n\treturn s.innerGet(time.Now(), true)\n}", "func (sq *allSavedQueriesCached) get() map[string]api.SavedQuerySpecAndConfig {\n\tsq.mu.Lock()\n\tdefer sq.mu.Unlock()\n\n\tcpy := make(map[string]api.SavedQuerySpecAndConfig, len(sq.allSavedQueries))\n\tfor k, v := range sq.allSavedQueries {\n\t\tcpy[k] = v\n\t}\n\treturn cpy\n}", "func (a *Aliases) Save() error {\n\tlog.Debug().Msg(\"[Config] Saving Aliases...\")\n\treturn a.SaveAliases(K9sAlias)\n}", "func (m *IGApiManager) GetSavedPosts() (items []IGItem, err error) {\n\tb, err := getHTTPResponse(urlSaved, m.dsUserId, m.sessionid, m.csrftoken)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tspp := savedPostsResp{}\n\terr = json.Unmarshal(b, &spp)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, item := range spp.Items {\n\t\titems = append(items, item.Item)\n\t}\n\n\tfor spp.MoreAvailable {\n\t\turl := urlSaved + \"?max_id=\" + spp.NextMaxId\n\t\tb, err = getHTTPResponse(url, m.dsUserId, m.sessionid, m.csrftoken)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tspp = savedPostsResp{}\n\t\terr = json.Unmarshal(b, &spp)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tfor _, item := range spp.Items {\n\t\t\titems = append(items, item.Item)\n\t\t}\n\t\tlog.Println(\"fetched\", len(items), \"items\")\n\t\t// sleep 500ms to prevent http 429\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n\n\treturn\n}", "func (r *Repository) Save(url string, toAddr, ccAddr []string, content string, status int) error {\n\treturn nil\n}", "func GetAllAddress(s *aklib.DBConfig) (map[string]*Address, error) {\n\tadrs := make(map[string]*Address)\n\terr := s.DB.View(func(txn *badger.Txn) error {\n\t\tit := txn.NewIterator(badger.DefaultIteratorOptions)\n\t\tdefer it.Close()\n\t\tprefix := []byte{byte(db.HeaderWalletAddress)}\n\t\tfor it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {\n\t\t\titem := it.Item()\n\t\t\tv, err := item.ValueCopy(nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvar adr Address\n\t\t\tif err := arypack.Unmarshal(v, &adr); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tadrs[string(item.Key()[1:])] = &adr\n\t\t}\n\t\treturn nil\n\t})\n\treturn adrs, err\n}", "func (a Accounts) Get(asset string) *Account {\n\tfor i := range a.Datas {\n\t\tif a.Datas[i].Balance.Currency == asset {\n\t\t\treturn &a.Datas[i]\n\t\t}\n\t}\n\treturn nil\n}", "func (_obj *Apipayments) Payments_getSavedInfo(params *TLpayments_getSavedInfo, _opt ...map[string]string) (ret Payments_SavedInfo, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"payments_getSavedInfo\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (s *Client) GetUserSavedTracks(ctx context.Context, token *oauth2.Token, q *QParams) (*models.UserSavedTracks, error) {\n\tvar endpoint *url.URL = s.API.UserSavedTracksURL\n\t//in go structs containing primitive types are copied by value\n\t//https://stackoverflow.com/questions/51635766/how-do-i-copy-a-struct-in-golang\n\tlog.Println(\"SAME ADDRESS ?????\")\n\tlog.Println(&endpoint == &s.API.UserSavedTracksURL)\n\tif q != nil {\n\t\tparams := url.Values{}\n\t\tif q.Limit != nil {\n\t\t\tvar l int = *(q).Limit\n\t\t\tvalid := (l >= 1) && (l <= 50)\n\t\t\tif valid {\n\t\t\t\tparams.Set(\"limit\", strconv.Itoa(l))\n\t\t\t}\n\t\t}\n\t\tif q.Offset != nil {\n\t\t\tvar offset int = *(q).Offset\n\t\t\tif offset > 0 {\n\t\t\t\tparams.Set(\"offset\", strconv.Itoa(offset))\n\t\t\t}\n\t\t}\n\t\tif q.Market != nil {\n\t\t\tvar m string = *(q).Market\n\t\t\tif validMarketOpt(m) {\n\t\t\t\tparams.Set(\"market\", *(q).Market)\n\t\t\t}\n\t\t}\n\n\t\tendpoint.RawQuery = params.Encode()\n\t}\n\turl := endpoint.String()\n\tlog.Println(url)\n\tlog.Printf(\"User saved tracks url: %v\\n\", url)\n\n\ttracks := &models.UserSavedTracks{}\n\n\thttpClient := s.Config.Client(ctx, token)\n\tresp, err := httpClient.Get(url)\n\tif resp.StatusCode >= http.StatusBadRequest {\n\t\tlog.Println(\"status code todo:return err\")\n\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif err := json.NewDecoder(resp.Body).Decode(tracks); err != nil {\n\t\tlog.Printf(\"Could not decode body: %v\\n\", err)\n\t\treturn nil, err\n\t}\n\n\treturn tracks, nil\n\n}", "func (g *Group) Save() [][]string {\n\tres := make([][]string, len(g.PriorityStudents)+len(g.Students))\n\tvar i int\n\tsaveStudents(g.PriorityStudents, res, &i)\n\tsaveStudents(g.Students, res, &i)\n\treturn res\n}", "func (app *service) Save(state State) error {\n\tjs, err := app.adapter.ToJSON(state)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tchainHash := state.Chain()\n\tindex := state.Height()\n\tpath := filePath(chainHash, index)\n\treturn app.fileService.Save(path, js)\n}", "func (c *CoordinatorHelper) AllAddresses(\n\tctx context.Context,\n\tdbTx storage.DatabaseTransaction,\n) ([]string, error) {\n\treturn c.keyStorage.GetAllAddressesTransactional(ctx, dbTx)\n}", "func (db *STXOsDB) GetAddrAll(hash *Uint168) ([]*STXO, error) {\n\tdb.RLock()\n\tdefer db.RUnlock()\n\n\tsql := \"SELECT OutPoint, Value, LockTime, AtHeight, SpendHash, SpendHeight FROM STXOs WHERE ScriptHash=?\"\n\trows, err := db.Query(sql, hash.Bytes())\n\tif err != nil {\n\t\treturn []*STXO{}, err\n\t}\n\tdefer rows.Close()\n\n\treturn db.getSTXOs(rows)\n}", "func (ipset *IPSet) Save() error {\n\tstdout, err := ipset.run(\"save\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tipset.Sets = parseIPSetSave(ipset, stdout)\n\treturn nil\n}", "func (m *User) GetImAddresses()([]string) {\n return m.imAddresses\n}", "func (mock *CacheRepositoryMock) SaveCalls() []struct {\n\tKey string\n\tValue interface{}\n} {\n\tvar calls []struct {\n\t\tKey string\n\t\tValue interface{}\n\t}\n\tmock.lockSave.RLock()\n\tcalls = mock.calls.Save\n\tmock.lockSave.RUnlock()\n\treturn calls\n}", "func (s *IdeaStorage) GetAll() ([]*models.Idea, error) {\n\treturn s.ideas, nil\n}", "func (m *MemoryStorage) GetAll() (map[string]Entry, error) {\n\treturn m.entries, nil\n}", "func (mock *RepositoryMock) SaveCalls() []struct {\n\tCtx context.Context\n\tID string\n\tURL string\n} {\n\tvar calls []struct {\n\t\tCtx context.Context\n\t\tID string\n\t\tURL string\n\t}\n\tmock.lockSave.RLock()\n\tcalls = mock.calls.Save\n\tmock.lockSave.RUnlock()\n\treturn calls\n}", "func getAllCompanySymbols() ([]string){\r\n return []string{}\r\n}", "func (wcb *WalletCreateBulk) SaveX(ctx context.Context) []*Wallet {\n\tv, err := wcb.Save(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn v\n}", "func (p *ProviderParams) Save() ([]datastore.Property, error) {\n\tvar result []datastore.Property\n\tfor k, v := range *p {\n\t\tresult = append(result, datastore.Property{\n\t\t\tName: k,\n\t\t\tValue: v,\n\t\t})\n\t}\n\treturn result, nil\n}", "func (m *BookingBusiness) GetAddress()(PhysicalAddressable) {\n val, err := m.GetBackingStore().Get(\"address\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(PhysicalAddressable)\n }\n return nil\n}", "func (app *service) Save(genesis Genesis) error {\n\t_, err := app.repository.Retrieve()\n\tif err == nil {\n\t\treturn errors.New(\"there is already a Genesis instance\")\n\t}\n\n\tbill := genesis.Bill()\n\terr = app.billService.Save(bill)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttrGenesis, err := app.adapter.ToTransfer(genesis)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn app.trService.Save(trGenesis)\n}", "func (file *File) SnapshotSaved() {\n\tif file.buffHist != nil {\n\t\tfile.buffHist.SnapshotSaved()\n\t}\n}", "func (b Banai) Save(fileName string) (string, error) {\n\tabs, e := filepath.Abs(fileName)\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tstashID := uuid.NewString()\n\n\te = fsutils.CopyfsItem(abs, stashID)\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\treturn stashID, nil\n}", "func (msg MsgSellAsset) GetSigners() []sdk.AccAddress {\n\taddr, err := sdk.AccAddressFromBech32(msg.Seller)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn []sdk.AccAddress{addr}\n}", "func (model *GrogModel) AllAssets() ([]*Asset, error) {\n\tvar foundAssets []*Asset\n\n\trows, rowsErr := model.db.DB.Query(`select name, mimeType, content, serve_external, rendered,\n\t\tadded, modified from Assets`)\n\tif rowsErr != nil {\n\t\treturn nil, fmt.Errorf(\"error loading all assets: %v\", rowsErr)\n\t}\n\n\tdefer rows.Close()\n\n\tvar (\n\t\tname string\n\t\tmimeType string\n\t\tcontent = make([]byte, 0)\n\t\tserveExternal int64\n\t\trendered int64\n\t\tadded int64\n\t\tmodified int64\n\t)\n\n\tfor rows.Next() {\n\t\tif rows.Scan(&name, &mimeType, &content, &serveExternal, &rendered, &added, &modified) != sql.ErrNoRows {\n\t\t\tfoundAsset := model.NewAsset(name, mimeType)\n\t\t\tfoundAsset.Content = content\n\t\t\tif serveExternal == 1 {\n\t\t\t\tfoundAsset.ServeExternal = true\n\t\t\t} else {\n\t\t\t\tfoundAsset.ServeExternal = false\n\t\t\t}\n\n\t\t\tif rendered == 1 {\n\t\t\t\tfoundAsset.Rendered = true\n\t\t\t} else {\n\t\t\t\tfoundAsset.Rendered = false\n\t\t\t}\n\n\t\t\tfoundAsset.Added.Set(time.Unix(added, 0))\n\t\t\tfoundAsset.Modified.Set(time.Unix(modified, 0))\n\n\t\t\tif foundAssets == nil {\n\t\t\t\tfoundAssets = make([]*Asset, 0)\n\t\t\t}\n\t\t\tfoundAssets = append(foundAssets, foundAsset)\n\t\t}\n\t}\n\n\treturn foundAssets, nil\n}", "func (ref *DidAnchor) GetBulk(suffixes []string) ([]string, error) {\n\tref.RLock()\n\tdefer ref.RUnlock()\n\n\tanchors := make([]string, len(suffixes))\n\n\tfor i, suffix := range suffixes {\n\t\tanchor, ok := ref.m[suffix]\n\t\tif !ok {\n\t\t\tanchors[i] = \"\"\n\t\t} else {\n\t\t\tanchors[i] = anchor\n\t\t}\n\t}\n\n\treturn anchors, nil\n}", "func (kvStore *KVStore) DumpStore() []KVPair {\n els := make([]KVPair, len(kvStore.mapping))\n\n i := 0\n for k, v := range kvStore.mapping {\n els[i] = KVPair{k, *v}\n i++\n }\n\n return els\n}", "func (s Storage) GetAll() Storage {\n\treturn s\n}", "func (m *BookingBusiness) GetServices()([]BookingServiceable) {\n val, err := m.GetBackingStore().Get(\"services\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]BookingServiceable)\n }\n return nil\n}", "func (m *VirtualEndpoint) GetSnapshots()([]CloudPcSnapshotable) {\n val, err := m.GetBackingStore().Get(\"snapshots\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]CloudPcSnapshotable)\n }\n return nil\n}", "func (t *Table) Save(key string, s string) ([]byte, error) {\n\tp := fmt.Sprintf(\"&object=%s&key=%s&%s\", t.Name, key, s)\n\treturn t.SaveBulk(p)\n}", "func (s *Store) GetAll() Dict {\n\treturn *s.data\n}", "func (b *Bookmarks) Save() error {\n\treturn b.WriteToFile(b.Filename)\n}", "func (wallet *Wallet) ExportState() []byte {\n\tmarshalUtil := marshalutil.New()\n\tmarshalUtil.WriteBytes(wallet.Seed().Bytes())\n\tmarshalUtil.WriteUint64(wallet.AddressManager().lastAddressIndex)\n\tmarshalUtil.WriteBytes(wallet.assetRegistry.Bytes())\n\tmarshalUtil.WriteBytes(*(*[]byte)(unsafe.Pointer(&wallet.addressManager.spentAddresses)))\n\n\treturn marshalUtil.Bytes()\n}", "func (posts Posts) Save() (err error) {\n\tpaths := pathsInLocal()\n\tfor _, post := range posts {\n\t\terr = post.Save(paths)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func (m *CountryNamedLocation) GetCountriesAndRegions()([]string) {\n val, err := m.GetBackingStore().Get(\"countriesAndRegions\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]string)\n }\n return nil\n}", "func (i *Item) Save() (map[string]bigquery.Value, string, error) {\n\treturn map[string]bigquery.Value{\n\t\t\"value\": i.value,\n\t\t\"metricname\": i.metricname,\n\t\t\"timestamp\": i.timestamp,\n\t\t\"tags\": i.tags,\n\t}, \"\", nil\n}", "func GetKnownAddresses() (data []network.NetAddress, err error) {\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket(knownAddresses)\n\t\tc := b.Cursor()\n\t\tfor k, v := c.First(); k != nil; k, v = c.Next() {\n\t\t\taddr := network.NetAddress{}\n\t\t\tterr := json.Unmarshal(v, &addr.Lastseen)\n\t\t\tif terr != nil {\n\t\t\t\treturn fmt.Errorf(\"get known adrresses unmarshal: %s\", terr)\n\t\t\t}\n\t\t\ts := strings.Split(string(k[:]), \":\")\n\t\t\taddr.Ip = s[0]\n\t\t\taddr.Port = s[1]\n\t\t\tdata = append(data, addr)\n\t\t}\n\t\treturn nil\n\t})\n\treturn\n}", "func Save() {\n\tdata := Savedata{\n\t\tName: GS.current.name,\n\t\tGamestate: GS.current,\n\t}\n\n\tf, err := json.MarshalIndent(data, \"\", \" \")\n\tcheck(err)\n\tioutil.WriteFile(\"data/savegame.json\", f, 0644)\n}", "func (s store) Save() {\n\ts.writeToDisk()\n}", "func (tx *ReleaseFromEndowment) GetAccountAddresses(app *App) ([]string, error) {\n\trfea, err := tx.GetSource(app)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"getting RFE SV\")\n\t}\n\treturn []string{rfea.String(), tx.Destination.String()}, nil\n}", "func (s *Service) GetAll(ctx context.Context) ([]types.Visit, error) {\n\treturn s.repo.FindAll(ctx)\n}", "func (m *PasswordResetModel) SaveAll(ctx context.Context, passwordResets []PasswordResetN) ([]int64, error) {\n\tids := make([]int64, 0)\n\tfor _, passwordReset := range passwordResets {\n\t\tid, err := m.Save(ctx, passwordReset)\n\t\tif err != nil {\n\t\t\treturn ids, err\n\t\t}\n\n\t\tids = append(ids, id)\n\t}\n\n\treturn ids, nil\n}", "func (s *Identity) AccountsGET(w http.ResponseWriter, r *http.Request) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\twriteResponse(s.addresses, w, r)\n}", "func (t *Table) SaveBulk(s string) ([]byte, error) {\n\tu := \"https://%s/save\"\n\tx := fmt.Sprintf(u, t.Host)\n\n\tw := bytes.NewBufferString(\"?json\")\n\t_, _ = w.WriteString(s)\n\tb := bytes.NewReader(w.Bytes())\n\t//log.Printf(\"SaveBulk: writing %s\\n\", w.String())\n\treq, err := http.NewRequest(\"POST\", x, b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Salsa's API needs these cookies to verify authentication.\n\tfor _, c := range t.Cookies {\n\t\treq.AddCookie(c)\n\t}\n\t// TODO: figure out what to do with the an error response from /save.\n\tif t.API.Verbose {\n\t\tfmt.Printf(\"SaveBulk: %v%v\\n\", x, w.String())\n\t}\n\tresp, err := t.Client.Do(req)\n\tvar body []byte\n\tif err == nil {\n\t\tdefer resp.Body.Close()\n\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t}\n\n\treturn body, err\n}", "func (ids identities) getAddresses() []common.Address {\n\taddresses := make([]common.Address, len(ids))\n\tfor i := 0; i < len(ids); i++ {\n\t\taddresses[i] = ids[i].addr\n\t}\n\treturn addresses\n}", "func (acb *AreahistoryCreateBulk) SaveX(ctx context.Context) []*Areahistory {\n\tv, err := acb.Save(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn v\n}", "func (s *Store) GetAddress() string {\n\tgeoAddresses := []string{}\n\tif s.Address.Valid {\n\t\tgeoAddresses = append(geoAddresses, s.Address.String)\n\t}\n\tif s.City.Valid {\n\t\tgeoAddresses = append(geoAddresses, s.City.String)\n\t}\n\tif s.State.Valid {\n\t\tgeoAddresses = append(geoAddresses, s.State.String)\n\t}\n\tgeoAddress := strings.Join(geoAddresses, \", \")\n\treturn geoAddress\n}", "func (hs *HistoryService) All(actionType string) ([]*TransactionHistory, error) {\n\tvar transHist []*TransactionHistory\n\tif err := hs.client.Get(buildString(\"history/\", actionType, \"/\", strconv.Itoa(hs.assetId)),\n\t\t&transHist); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn transHist, nil\n}", "func (e *Entity) Export() map[string]interface{} {\n\ta := make(map[string]interface{})\n\te.lock.Lock()\n\tdefer e.lock.Unlock()\n\n\tfor label, attribute := range e.Attributes {\n\t\ta[label] = attribute.Get()\n\t}\n\n\treturn a\n}", "func SaveAll(args ...interface{}) error {\n\treturn doAll(Save, args...)\n}", "func (ws *WebServer) LoadSavedAcct(dbPath, network string) (string, error) {\n\tseed, err := ws.GetSeedFromDB(network)\n\tif err != nil {\n\t\tws.log.Errorf(\"LoadSavedAcct:%s\", ErrCombind(ErrorBoltDBGetSeed, err))\n\t\treturn \"\", ErrCombind(ErrorBoltDBGetSeed, err)\n\t}\n\tif seed == \"\" {\n\t\tws.log.Errorf(\"LoadSavedAcct:%s\", ErrorGetEmptySeed)\n\t\treturn \"\", ErrorGetEmptySeed\n\t}\n\ta, err := sdk.AccountFromSeed(seed)\n\tif err != nil {\n\t\tws.log.Errorf(\"LoadSavedAcct:%s\", ErrCombind(ErrorGetAccountFromSeed, err))\n\t\treturn \"\", ErrorGetAccountFromSeed\n\t}\n\terr = ws.SetAccount(a.AccountNumber(), seed, network)\n\n\tif err != nil {\n\t\tws.log.Errorf(\"LoadSavedAcct:%s\", ErrorSetAccount)\n\t\treturn \"\", ErrorSetAccount\n\t}\n\n\t//Save to file and load to memory\n\tseedFile := filepath.Join(ws.rootPath, \"bitmarkd\", network, \"proof.sign\")\n\tf, err := os.OpenFile(seedFile, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600)\n\tdefer f.Close()\n\tif err != nil {\n\t\tws.log.Errorf(\"LoadSavedAcct:%s\", ErrCombind(ErrorOpenSeedFile, err))\n\t\treturn \"\", ErrCombind(ErrorOpenSeedFile, err)\n\t}\n\t_, err = f.WriteString(fmt.Sprintf(\"SEED:%s\", seed))\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn a.AccountNumber(), nil\n}", "func (e *Account) Save() error { return ent.SaveEnt(e) }", "func (e *Account) Save() error { return ent.SaveEnt(e) }", "func (x SyntheticMonitorEntity) GetAssets() []SyntheticsSyntheticMonitorAsset {\n\treturn x.Assets\n}", "func (m *Group) GetSites()([]Siteable) {\n return m.sites\n}", "func (app *application) getAssets(w http.ResponseWriter, r *http.Request) {\n\tdata, err := app.assets.GetAssets()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tj, err := json.Marshal(data)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(j)\n}", "func (b *Bucket) Save(key string, data []byte) (*brazier.Item, error) {\n\terr := b.node.Set(\"items\", key, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &brazier.Item{\n\t\tKey: key,\n\t\tData: data,\n\t}, nil\n}", "func (f *fetcher) Save(data []byte) {\n\t// Implementation can be application dependent\n\t// eg. you may implement connect to a redis server here\n\tfmt.Println(string(data))\n}", "func (s *SmartContract) GetAllAssets(ctx contractapi.TransactionContextInterface) ([]*Asset, error) {\n\t// range query with empty string for startKey and endKey does an\n\t// open-ended query of all assets in the chaincode namespace.\n\tresultsIterator, err := ctx.GetStub().GetStateByRange(\"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resultsIterator.Close()\n\n\tvar assets []*Asset\n\tfor resultsIterator.HasNext() {\n\t\tqueryResponse, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar asset Asset\n\t\terr = json.Unmarshal(queryResponse.Value, &asset)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tassets = append(assets, &asset)\n\t}\n\n\treturn assets, nil\n}" ]
[ "0.58940905", "0.5235095", "0.5105615", "0.5036253", "0.49775848", "0.49759227", "0.49557313", "0.4901856", "0.486662", "0.4844012", "0.4823038", "0.48178643", "0.4812065", "0.48031896", "0.4785494", "0.4763713", "0.47625273", "0.4760581", "0.47561753", "0.47330952", "0.469473", "0.46820766", "0.4680319", "0.46653792", "0.4661126", "0.46358964", "0.4624875", "0.46125457", "0.4601281", "0.4592749", "0.45822728", "0.45730448", "0.4572989", "0.4559922", "0.45395473", "0.45362678", "0.45261464", "0.45226303", "0.4514699", "0.451352", "0.45085317", "0.45025754", "0.44843957", "0.44665658", "0.44637108", "0.4463597", "0.44613796", "0.44544", "0.44433022", "0.44391564", "0.4438538", "0.44370687", "0.44354263", "0.44265434", "0.44195443", "0.4415129", "0.44106522", "0.4403775", "0.43972513", "0.4395367", "0.43935835", "0.43915075", "0.4389519", "0.43685636", "0.43655357", "0.43645906", "0.43586996", "0.4354265", "0.4343437", "0.4342848", "0.43427768", "0.43382692", "0.4327075", "0.43202302", "0.4318073", "0.4318044", "0.43159783", "0.431219", "0.43107656", "0.4308178", "0.43056905", "0.43051666", "0.42932996", "0.42903912", "0.42864853", "0.4285549", "0.4282645", "0.42807963", "0.42783225", "0.42767692", "0.4276436", "0.42760253", "0.4274359", "0.4274359", "0.42704517", "0.42698768", "0.42671508", "0.42669052", "0.42666888", "0.42648873" ]
0.7589577
0
Remove will remove a withdrawal adddress given the id of the address
func (as *AddressService) Remove(addressID int) error { if err := as.client.Delete(buildString("address/withdraw/", strconv.Itoa(addressID))); err != nil { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *wsClientFilter) removeAddress(a btcutil.Address) {\n\tswitch a := a.(type) {\n\tcase *btcutil.AddressPubKeyHash:\n\t\tdelete(f.pubKeyHashes, *a.Hash160())\n\t\treturn\n\tcase *btcutil.AddressScriptHash:\n\t\tdelete(f.scriptHashes, *a.Hash160())\n\t\treturn\n\tcase *btcutil.AddressPubKey:\n\t\tserializedPubKey := a.ScriptAddress()\n\t\tswitch len(serializedPubKey) {\n\t\tcase 33: // compressed\n\t\t\tvar compressedPubKey [33]byte\n\t\t\tcopy(compressedPubKey[:], serializedPubKey)\n\t\t\tdelete(f.compressedPubKeys, compressedPubKey)\n\t\t\treturn\n\t\tcase 65: // uncompressed\n\t\t\tvar uncompressedPubKey [65]byte\n\t\t\tcopy(uncompressedPubKey[:], serializedPubKey)\n\t\t\tdelete(f.uncompressedPubKeys, uncompressedPubKey)\n\t\t\treturn\n\t\t}\n\t}\n\n\tdelete(f.otherAddresses, a.EncodeAddress())\n}", "func DeleteAddressByID(ID int)error{\n\tsql := \"delete from address where id = ?\"\n\t_,err := utils.Db.Exec(sql,ID)\n\tif err != nil{\n\t\treturn err\n\t}\n\treturn nil\n}", "func (ua *UserAddress) Delete(ctx context.Context, key ...interface{}) error {\n\tvar err error\n\tvar dbConn *sql.DB\n\n\t// if deleted, bail\n\tif ua._deleted {\n\t\treturn nil\n\t}\n\n\ttx, err := components.M.GetConnFromCtx(ctx)\n\tif err != nil {\n\t\tdbConn, err = components.M.GetMasterConn()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttableName, err := GetUserAddressTableName(key...)\n\tif err != nil {\n\t\treturn err\n\t}\n\t//1\n\n\t// sql query with composite primary key\n\tsqlstr := `UPDATE ` + tableName + ` SET is_del = 1 WHERE uaid = ?`\n\n\t// run query\n\tutils.GetTraceLog(ctx).Debug(\"DB\", zap.String(\"SQL\", fmt.Sprint(sqlstr, ua.Uaid)))\n\tif tx != nil {\n\t\t_, err = tx.Exec(sqlstr, ua.Uaid)\n\t} else {\n\t\t_, err = dbConn.Exec(sqlstr, ua.Uaid)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set deleted\n\tua._deleted = true\n\n\treturn nil\n}", "func DeleteDesainByID(e echo.Context) error {\n\tvar desain db.Desains\n\tid, _ := strconv.Atoi(e.Param(\"id\"))\n\tconfig.DB.Where(\"id = ?\", id).Delete(&desain)\n\treturn e.JSON(http.StatusOK, map[string]interface{}{\n\t\t\"desain\": desain,\n\t\t\"message\": \"Data Berhasil Dihapus\",\n\t})\n}", "func (f *wsClientFilter) removeAddressStr(s string, params *chaincfg.Params) {\n\ta, err := btcutil.DecodeAddress(s, params)\n\tif err == nil {\n\t\tf.removeAddress(a)\n\t} else {\n\t\tdelete(f.otherAddresses, s)\n\t}\n}", "func (twd *TCPWaveDriver) ReleaseAddress(conf NetConfig, ip string, mac string) (string,error){\n glog.Infof(\"Ip delete request with ip = %s\", ip)\n err := twd.ObjMgr.DeleteIPAddress(ip, \"\", conf.IPAM.Org)\n if err!=nil{\n glog.Error(err)\n return \"\", err\n }\n return ip,nil\n}", "func (s *svcBook) DelByID(pId *uint) (uint, error) {\n\treturn (*s.pRepo).DelByID(pId)\n}", "func DeleteAddress(id int32) error {\n\ta, err := GetAddress(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresult := db.DB().Delete(a)\n\treturn result.Error\n}", "func (o *Address) Delete(exec boil.Executor) error {\n\tif o == nil {\n\t\treturn errors.New(\"sqlboiler: no Address provided for delete\")\n\t}\n\n\tif err := o.doBeforeDeleteHooks(exec); err != nil {\n\t\treturn err\n\t}\n\n\targs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), addressPrimaryKeyMapping)\n\tsql := \"DELETE FROM `address` WHERE `address_id`=?\"\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, args...)\n\t}\n\n\t_, err := exec.Exec(sql, args...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"sqlboiler: unable to delete from address\")\n\t}\n\n\tif err := o.doAfterDeleteHooks(exec); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (t *ToDoList) Remove(id string) {\n\tidx := findDealIndex(t.Deals, id)\n\tif idx == -1 {\n\t\tpanic(\"Deal is not found\")\n\t}\n\tt.Deals[idx] = t.Deals[len(t.Deals)-1]\n\tt.Deals[len(t.Deals)-1] = nil\n\tt.Deals = t.Deals[:len(t.Deals)-1]\n}", "func (t *Transaction) remove(db meddler.DB) error {\n lender, err := GetUserById(db, t.LenderId)\n if err != nil {\n return err\n }\n debtor, err := GetUserById(db, t.DebtorId)\n if err != nil {\n return err\n }\n\n // reverse the balance updates due to this transaction\n lender.UpdateBalance(db, -(t.Amount))\n debtor.UpdateBalance(db, t.Amount)\n\n // remove the transaction from the db\n _, err = db.Exec(\"DELETE FROM transactions WHERE id = ?\", t.Id)\n if err != nil {\n return err\n }\n t = nil\n\n return nil\n}", "func RemoveByID(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tfmt.Fprint(w, \"Welcome!\\n\")\n}", "func (oiuo *OrderInfoUpdateOne) RemoveOrderAddress(o ...*OrderAddress) *OrderInfoUpdateOne {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn oiuo.RemoveOrderAddresIDs(ids...)\n}", "func (t *Tenants) Del(id string) error {\n\treturn t.store.Del(id)\n}", "func (oiu *OrderInfoUpdate) RemoveOrderAddress(o ...*OrderAddress) *OrderInfoUpdate {\n\tids := make([]int, len(o))\n\tfor i := range o {\n\t\tids[i] = o[i].ID\n\t}\n\treturn oiu.RemoveOrderAddresIDs(ids...)\n}", "func RemoveAddress(addrs []Address, a Address) []Address {\n\ti := IndexOfAddress(addrs, a)\n\tif i == -1 {\n\t\treturn addrs\n\t}\n\n\treturn append(addrs[:i], addrs[i+1:]...)\n}", "func (uuo *UserUpdateOne) RemoveUserid(c ...*Carservice) *UserUpdateOne {\n\tids := make([]int, len(c))\n\tfor i := range c {\n\t\tids[i] = c[i].ID\n\t}\n\treturn uuo.RemoveUseridIDs(ids...)\n}", "func (s *service) RemoveByID(ctx context.Context, id string) error {\n\tlogger := log.With(s.logger, \"method\", \"RemovebyID\")\n\tif Orders.IsEmpty() {\n\t\tlevel.Error(logger).Log(\"err\", ErrOrderBookIsEmpty)\n\t\treturn ErrOrderBookIsEmpty\n\t}\n\n\tif _, ok := Orders.Get(id); ok {\n\t\tif !ok {\n\t\t\tlevel.Error(logger).Log(\"err\", ok)\n\t\t\treturn ErrOrderNotFound\n\t\t}\n\t}\n\tOrders.Remove(id)\n\tspread.setPrices()\n\treturn nil\n}", "func (*SeatDataAccessObject) DeleteBySeatID(seatID int) {\n\tvar seat Seat\n\t_, err := orm.Table(seat).ID(seatID).Delete(&seat)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (d *DogNZB) Remove(t Type, id string) (*AddRemoveQuery, error) {\n\tb, err := d.get(d.buildURL(\"remove\", t, id))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar q AddRemoveQuery\n\tif err := xml.Unmarshal(b, &q); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// if dognzb sent an error back, we should also error\n\tif q.ErrorCode != \"\" {\n\t\treturn nil, fmt.Errorf(\"%v\", q.ErrorDesc)\n\t}\n\treturn &q, nil\n}", "func (b *BlockQueue) delAddress(address string, balance int64, tx *Tx) {\n\n\t// Remove tx from address index\n\tq := b.addrTx[address]\n\tq.PopFront()\n\t\n\tif q.Len() == 0 {\n\t\tdelete(b.addrTx, address)\n\t}\n\n\t// Update accumulated address balance\n\tnew_balance := b.addrBalance[address] - balance\n\tif new_balance == 0 {\n\t\tdelete(b.addrBalance, address)\n\t} else {\n\t\tb.addrBalance[address] = new_balance\n\t}\n}", "func (uu *UserUpdate) RemoveUserid(c ...*Carservice) *UserUpdate {\n\tids := make([]int, len(c))\n\tfor i := range c {\n\t\tids[i] = c[i].ID\n\t}\n\treturn uu.RemoveUseridIDs(ids...)\n}", "func Del(id string) error {\n\treturn getC().Del(id)\n}", "func Del(id string) error {\n\treturn getC().Del(id)\n}", "func Del(id string) error {\n\treturn getC().Del(id)\n}", "func DeleteAddressPoolInfo(ctx iris.Context) {\n\turi := ctx.Request().RequestURI\n\tfabricID := ctx.Params().Get(\"id\")\n\n\tif _, err := capmodel.GetFabric(fabricID); err != nil {\n\t\terrMsg := fmt.Sprintf(\"failed to fetch fabric data for uri %s: %s\", uri, err.Error())\n\t\tcreateDbErrResp(ctx, err, errMsg, []interface{}{\"Fabric\", fabricID})\n\t\treturn\n\t}\n\n\taddresspoolData, err := capmodel.GetAddressPool(fabricID, uri)\n\tif err != nil {\n\t\terrMsg := fmt.Sprintf(\"failed to fetch AddressPool data for uri %s: %s\", uri, err.Error())\n\t\tcreateDbErrResp(ctx, err, errMsg, []interface{}{\"AddressPool\", fabricID})\n\t\treturn\n\t}\n\tif addresspoolData.Links != nil && len(addresspoolData.Links.Zones) > 0 {\n\t\terrMsg := fmt.Sprintf(\"AddressPool cannot be deleted as there are dependent Zone still tied to it\")\n\t\tlog.Error(errMsg)\n\t\tresp := updateErrorResponse(response.ResourceCannotBeDeleted, errMsg, []interface{}{uri, \"AddressPool\"})\n\t\tctx.StatusCode(http.StatusNotAcceptable)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\t// Todo:Add the validation to verify the links\n\tif err = capmodel.DeleteAddressPool(fabricID, uri); err != nil {\n\t\terrMsg := fmt.Sprintf(\"failed to delete fabric data in DB for uri %s: %s\", uri, err.Error())\n\t\tcreateDbErrResp(ctx, err, errMsg, []interface{}{\"Fabric\", fabricID})\n\t\treturn\n\t}\n\tctx.StatusCode(http.StatusNoContent)\n}", "func (r *LBServiceReconciler) withdrawService(ctx context.Context, sc *ServerWithConfig, key resource.Key) error {\n\tadvertisements := sc.ServiceAnnouncements[key]\n\t// Loop in reverse order so we can delete without effect to the iteration.\n\tfor i := len(advertisements) - 1; i >= 0; i-- {\n\t\tadvertisement := advertisements[i]\n\t\tif err := sc.Server.WithdrawPath(ctx, types.PathRequest{Path: advertisement}); err != nil {\n\t\t\t// Persist remaining advertisements\n\t\t\tsc.ServiceAnnouncements[key] = advertisements\n\t\t\treturn fmt.Errorf(\"failed to withdraw deleted service route: %v: %w\", advertisement.NLRI, err)\n\t\t}\n\n\t\t// Delete the advertisement after each withdraw in case we error half way through\n\t\tadvertisements = slices.Delete(advertisements, i, i+1)\n\t}\n\n\t// If all were withdrawn without error, we can delete the whole svc from the map\n\tdelete(sc.ServiceAnnouncements, key)\n\n\treturn nil\n}", "func DelDPSDetailById(id int64) (err error) {\n\to := orm.NewOrm()\n\tv := DPSDetail{Id: id}\n\t// ascertain id exists in the database\n\tif err = o.Read(&v); err == nil {\n\t\tvar num int64\n\t\tif num, err = o.Delete(&DPSDetail{Id: id}); err == nil {\n\t\t\tfmt.Println(\"Number of records deleted in database:\", num)\n\t\t}\n\t}\n\treturn\n}", "func deleteById(l []ClientType, id string) (list []ClientType) {\n\t/*\n\t * Delete an element of the list by its Id given by 'id'\n\t */\n\t\n\tlist = l\n\tfor index, value := range l {\n\t\tif (value.client_id == id) {\n\t\t\tlist = removeIndex(l, index)\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn list\n}", "func (p *Store) Del(ctx context.Context, round uint64) error {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tdefault:\n\t}\n\n\tconst query = `\n\tDELETE FROM\n\t\tbeacon_details\n\tWHERE\n\t\tbeacon_id = :id AND\n\t\tround = :round`\n\n\tdata := struct {\n\t\tID int `db:\"id\"`\n\t\tRound uint64 `db:\"round\"`\n\t}{\n\t\tID: p.beaconID,\n\t\tRound: round,\n\t}\n\n\t_, err := p.db.NamedExecContext(ctx, query, data)\n\treturn err\n}", "func (m *MemberService) DeleteAddress(ctx context.Context, addressID string) error {\n\tdata, err := m.DELETE(\"/address/\" + addressID).\n\t\tAuth(m.Presign(time.Minute)).\n\t\tDo(ctx).Bytes()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar resp Err\n\tif err := jsoniter.Unmarshal(data, &resp); err != nil {\n\t\treturn err\n\t}\n\n\tif resp.Code > 0 {\n\t\treturn resp\n\t}\n\n\treturn nil\n}", "func (t *Tkeyid) DelFromId(id uint) (key string, ok bool) {\n\tkey, ok = t.idtokey[id]\n\tif ok {\n\t\tdelete(t.idtokey, id)\n\t\tdelete(t.keytoid, key)\n\t}\n\treturn\n}", "func (as *Service) Delete(id string) error {\n\treq, err := as.httpClient.NewRequest(http.MethodDelete, fmt.Sprintf(\"/setup/account/%s\", id), nil)\n\tv := make(map[string]interface{})\n\t_, err = as.httpClient.Do(req, &v)\n\treturn err\n}", "func (dao *DAO) RemoveExpenseByID(id string) error {\r\n\terr := db.C(dao.ExpenseCollection).RemoveId(bson.ObjectIdHex(id))\r\n\r\n\treturn err\r\n}", "func DeleteByID() {\n\n}", "func (gt GtwyMgr) Delete(ctx context.Context, appcontext, remoteAddress string) error {\n\tif EnvDebugOn {\n\t\tlblog.LogEvent(\"GtwyMgr\", \"Delete\", \"info\", \"start\")\n\t}\n\n\t//check the approval list\n\tq := datastore.NewQuery(gt.bc.GetConfigValue(ctx, \"EnvGtwayDsKind\")).\n\t\tNamespace(gt.bc.GetConfigValue(ctx, \"EnvGtwayDsNamespace\")).\n\t\tFilter(\"appcontext =\", appcontext).\n\t\tFilter(\"remoteaddress =\", remoteAddress).\n\t\tKeysOnly()\n\n\tvar arr []Gateway\n\tkeys, err := gt.ds.GetAll(ctx, q, &arr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttx, err := gt.ds.NewTransaction(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := tx.DeleteMulti(keys); err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\tif _, err = tx.Commit(); err != nil {\n\t\treturn err\n\t}\n\n\tif EnvDebugOn {\n\t\tlblog.LogEvent(\"GtwyMgr\", \"Delete\", \"info\", \"end\")\n\t}\n\treturn nil\n}", "func (data *DNSData) Remove(name string) error {\n\tfqdn := dns.Fqdn(name)\n\n\taRecord, ok := data.v4Addresses[fqdn]\n\tif ok {\n\t\tarpa, err := dns.ReverseAddr(aRecord.A.String())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdelete(data.reverseLookups, arpa)\n\t}\n\tdelete(data.v4Addresses, fqdn)\n\n\taaaaRecord, ok := data.v6Addresses[fqdn]\n\tif ok {\n\t\tarpa, err := dns.ReverseAddr(aaaaRecord.AAAA.String())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdelete(data.reverseLookups, arpa)\n\t}\n\tdelete(data.v6Addresses, fqdn)\n\n\treturn nil\n}", "func (al *AddrList) RemoveAddress(address string) {\n\n\tal.slice = removeStringFromSlice(address, al.slice)\n\n\t//re-build the comma-seperated string of addresses\n\tal.csv = toAddrString(al.slice)\n\t\n}", "func (k Keeper) RemoveDeposit(ctx sdk.Context, id uint64) {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DepositKey))\n\tstore.Delete(GetDepositIDBytes(id))\n}", "func (*wsNotificationManager) removeAddrRequest(addrs map[string]map[chan struct{}]*wsClient,\n\twsc *wsClient, addr string) {\n\n\t// Remove the request tracking from the client.\n\tdelete(wsc.addrRequests, addr)\n\n\t// Remove the client from the list to notify.\n\tcmap, ok := addrs[addr]\n\tif !ok {\n\t\trpcsLog.Warnf(\"Attempt to remove nonexistent addr request \"+\n\t\t\t\"<%s> for websocket client %s\", addr, wsc.addr)\n\t\treturn\n\t}\n\tdelete(cmap, wsc.quit)\n\n\t// Remove the map entry altogether if there are no more clients\n\t// interested in it.\n\tif len(cmap) == 0 {\n\t\tdelete(addrs, addr)\n\t}\n}", "func (i *ImmediateCron) Remove(id cron.EntryID) {}", "func removeBobba(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid, err := getRequestID(vars, \"id\")\n\n\t// Set JSON header\n\tw.Header().Set(\"Content-type\", \"application/json\")\n\tif err != nil {\n\t\terrPayload := buildErrorPayload(idEmptyRemove, 200)\n\n\t\tw.Write(errPayload)\n\t\treturn\n\t}\n\n\terr = runRemoveRoutine(id)\n\tif err != nil {\n\t\terrPayload := buildErrorPayload(err.Error(), 200)\n\t\tw.Write(errPayload)\n\t}\n\n\tpayload := buildSuccessPayload(\"success\", 200)\n\tw.Write(payload)\n}", "func RemoveWalletFederationAddress(uc *mw.IcopContext, c *gin.Context) {\n\tvar l RemoveWalletFederationAddressRequest\n\tif err := c.Bind(&l); err != nil {\n\t\tc.JSON(http.StatusBadRequest, cerr.LogAndReturnError(uc.Log, err, cerr.ValidBadInputData, cerr.BindError))\n\t\treturn\n\t}\n\n\tif valid, validErrors := cerr.ValidateStruct(uc.Log, l); !valid {\n\t\tc.JSON(http.StatusBadRequest, validErrors)\n\t\treturn\n\t}\n\n\tuserID := mw.GetAuthUser(c).UserID\n\n\t//remove the wallet\n\treq := &pb.WalletChangeFederationAddressRequest{\n\t\tBase: NewBaseRequest(uc),\n\t\tId: l.ID,\n\t\tUserId: userID,\n\t\tFriendlyId: \"\",\n\t\tDomain: \"\",\n\t}\n\t_, err := dbClient.WalletChangeFederationAddress(c, req)\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, cerr.LogAndReturnError(uc.Log, err, \"Error removing wallet federation address\", cerr.GeneralError))\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, \"{}\")\n}", "func (tr *TestRunner) Del(addr *net.UDPAddr) {\n\ttr.mutex.Lock()\n\tdefer tr.mutex.Unlock()\n\t// Find the element\n\tfor i, v := range tr.targets {\n\t\tif v == addr {\n\t\t\t// Delete the element\n\t\t\t// This doesn't preserve order because it shouldn't matter.\n\t\t\t// Also it's WAY more efficient, especially at scale.\n\t\t\ttr.targets[i] = tr.targets[len(tr.targets)-1]\n\t\t\ttr.targets[len(tr.targets)-1] = nil\n\t\t\ttr.targets = tr.targets[:len(tr.targets)-1]\n\t\t}\n\t}\n}", "func deleteBook(id int) {\n\tfmt.Println(\"func: getBook\")\n\tfor index, book := range books {\n\t\tif book.ID == id {\n\t\t\tbooks = append(books[:index], books[index + 1:]...)\n\t\t\treturn\n\t\t}\n\t}\n}", "func UPSDelete(id string) error {\n\tfor index, item := range ups {\n\t\tif item.ID == id {\n\t\t\tups = append(ups[:index], ups[index+1:]...)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"UPS %s Not Found\", id)\n}", "func deleteItem(id string) error {\n\tdeathrow := findItem(id)\n\tfor i, _ := range items {\n\t\tfound := getXidString(deathrow)\n\t\tif id == found {\n\t\t\titems = append(items[:i], items[i+1:]...)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Could not find item with id of %v to delete\", id)\n}", "func addIDToDeleteRequest(r *resource,\n\tinput *svcsdk.DeleteVpcEndpointsInput) error {\n\tif r.ko.Status.VPCEndpointID == nil {\n\t\treturn errors.New(\"unable to extract VPCEndpointID from resource\")\n\t}\n\tinput.VpcEndpointIds = []*string{r.ko.Status.VPCEndpointID}\n\treturn nil\n}", "func (r *PoolNAPTRResource) Delete(id string) error {\n\tif err := r.c.ModQuery(\"DELETE\", BasePath+PoolNAPTREndpoint+\"/\"+id, nil); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func DeletePoint(db *gorm.DB, id string) error {\n\tpoint := new(model.Point)\n\tif err := db.Where(\"id = ? \", id).Delete(&point).Error; err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\n\t//tag := new(model.Tag)\n\t//if err := db.Where(\"post_id = ? \", id).Delete(&tag).Error; err != nil {\n\t//\tlog.Println(err)\n\t//}\n\n\treturn nil\n}", "func (r *BillingRepository) DeleteBillingById(id string) error {\n\terr := r.C.Remove(bson.M{\"_id\": bson.ObjectIdHex(id)})\n\treturn err\n}", "func (m *UserMutation) RemoveAddresIDs(ids ...int64) {\n\tif m.removedaddress == nil {\n\t\tm.removedaddress = make(map[int64]struct{})\n\t}\n\tfor i := range ids {\n\t\tm.removedaddress[ids[i]] = struct{}{}\n\t}\n}", "func (s ids) remove(id int) ids {\n\tindex := sort.SearchInts([]int(s), id)\n\tif index < len(s) && s[index] == id {\n\t\ts = append(s[:index], s[index+1:]...)\n\t}\n\treturn s\n}", "func (e Endpoints) DeleteAddress(ctx context.Context, profileID string, addressID string) error {\n\trequest := deleteAddressRequest{ProfileID: profileID, AddressID: addressID}\n\n\tresponse, err := e.DeleteAddressEndpoint(ctx, request)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp := response.(deleteAddressResponse)\n\n\treturn resp.Err\n}", "func (s *SweepPruneSet[T]) Remove(id SweepPruneItemID) {\n\titemIndex := uint32(id)\n\titem := &s.items[itemIndex]\n\titem.Position = dprec.Vec3{\n\t\tX: math.Inf(+1),\n\t\tY: math.Inf(+1),\n\t\tZ: math.Inf(+1),\n\t}\n\titem.Radius = 1.0\n\tvar zeroV T\n\titem.Value = zeroV\n\ts.dirtyItemIDs[itemIndex] = struct{}{}\n\ts.freeItemIDs.Push(itemIndex)\n}", "func Remove(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\n\tid := params[\"id\"]\n\n\tif id == \"\" {\n\t\tutils.HTTPResponse(w, http.StatusInternalServerError, \"Bad Request, id is missing.\", false)\n\t\treturn\n\t}\n\n\tresult, err := db.Connection.Exec(utils.BuildString(\"DELETE FROM moedas WHERE id = \", id))\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tutils.HTTPResponse(w, http.StatusInternalServerError, \"Error ocurred while removing data, please try again.\", false)\n\t\treturn\n\t}\n\n\tif rows, _ := result.RowsAffected(); rows == 0 {\n\t\tlog.Println(err.Error())\n\t\tutils.HTTPResponse(w, http.StatusInternalServerError, \"Error ocurred while removing data, please try again.\", false)\n\t\treturn\n\t}\n\n\tutils.HTTPResponse(w, http.StatusOK, \"Currency removed successfully!\", false)\n}", "func (m MariaDB) Remove(ctx context.Context, id string) (int64, error) {\n\tsqlQuery := \"DELETE FROM person WHERE id = ?\"\n\trslt, err := m.Person.ExecContext(ctx, sqlQuery, id)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"could not remove data\")\n\t}\n\tcount, err := rslt.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"rows are not affected\")\n\t}\n\treturn count, nil\n}", "func Remove(name string) error", "func (db *Tool) Delete(id int) error {\n\ttools, err := db.fetchTools()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i, v := range tools {\n\t\tif id == v.ID {\n\t\t\ttools = append(tools[:i], tools[i+1:]...)\n\t\t}\n\t}\n\n\tif err = Save(toolData, tools); err != nil {\n\t\treturn errors.New(\"problem making updates, please try again\")\n\t}\n\n\t// if tool has an associated rental, delete rental\n\tr := Rental{}\n\tif err = r.cascade(id, 0); err != nil {\n\t\treturn errors.New(\"rental not found\")\n\t}\n\n\treturn nil\n}", "func deletePerson(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tpersonID := ps.ByName(\"id\")\n\tfor index, item := range people {\n\t\tif item.ID == personID {\n\t\t\tpeople = append(people[:index], people[index+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(people)\n}", "func DeleteWorkoutByRoutineID(id string, db *sql.DB) {\n\tquery := fmt.Sprintf(`DELETE FROM Workouts WHERE routine_id = \"%s\"`, id)\n\tdb.Exec(query)\n}", "func (b *Bus) Remove(p Passenger) {\n\tdelete(b.passengers, p.SSN)\n\tfmt.Printf(\"%s: unboarded passenger with SSN %q\\n\", b.name, p.SSN)\n}", "func DeleteRequest(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\t a:= p.ByName(\"id\")\n\t ac,_ := strconv.Atoi(a)\n\t sess:=getSession();\n er := sess.DB(\"trip-planner\").C(\"locations\").Remove(bson.M{\"id\": ac})\n if er!=nil {\n \tpanic(er)\n }\n\tw.WriteHeader(200)\n}", "func (p *pool) deleteFromID(id int) error {\n\tconn := p.Get()\n\tdefer conn.Close()\n\n\text, err := redis.String(conn.Do(\"HGET\", key(\"entry\", strconv.Itoa(id)), \"fileext\"))\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"can not get image extension %d: %w\", id, err)\n\t}\n\n\t// Delete from list\n\tif _, err := conn.Do(\"SREM\", key(\"entries\"), id); err != nil {\n\t\treturn xerrors.Errorf(\"can not delete entry id: %w\", err)\n\t}\n\n\t// Delete from redis\n\tif _, err := conn.Do(\"DEL\", key(\"entry\", strconv.Itoa(id))); err != nil {\n\t\treturn xerrors.Errorf(\"can not delete entry: %w\", err)\n\t}\n\n\t// Delete image from disk\n\tfilePath := path.Join(mailimagePath(), \"images\", fmt.Sprintf(\"%d%s\", id, ext))\n\tif err := os.Remove(filePath); err != nil {\n\t\treturn xerrors.Errorf(\"can not delete image from disk: %w\", err)\n\t}\n\n\t// Delete mail fom disk\n\tfilePath = path.Join(mailimagePath(), \"success\", strconv.Itoa(id))\n\tif err := os.Remove(filePath); err != nil {\n\t\treturn xerrors.Errorf(\"can not delete file from disk: %w\", err)\n\t}\n\treturn nil\n}", "func (cs *ClientStore) RemoveByID(id string) (err error) {\n\tcs.cHandler(cs.ccfg.ClientsCName, func(c *mongo.Collection) {\n\t\tif _, cerr := c.DeleteOne(context.TODO(), db.Map{\"id\": id}); cerr != nil {\n\t\t\terr = cerr\n\t\t\treturn\n\t\t}\n\t})\n\n\treturn\n}", "func (b *RaftBalloon) remove(id string) error {\n\tif b.raft.api.State() != raft.Leader {\n\t\treturn ErrNotLeader\n\t}\n\n\tf := b.raft.api.RemoveServer(raft.ServerID(id), 0, 0)\n\tif f.Error() != nil {\n\t\tif f.Error() == raft.ErrNotLeader {\n\t\t\treturn ErrNotLeader\n\t\t}\n\t\treturn f.Error()\n\t}\n\n\tcmd := &commands.MetadataDeleteCommand{Id: id}\n\t_, err := b.raftApply(commands.MetadataDeleteCommandType, cmd)\n\n\treturn err\n}", "func (n *NIC) RemoveAddress(addr tcpip.Address) error {\n\tn.mu.Lock()\n\tr := n.endpoints[NetworkEndpointID{addr}]\n\tif r == nil || !r.holdsInsertRef {\n\t\tn.mu.Unlock()\n\t\treturn tcpip.ErrBadLocalAddress\n\t}\n\n\tr.holdsInsertRef = false\n\tn.mu.Unlock()\n\n\tr.decRef()\n\n\treturn nil\n}", "func (uuo *UserUpdateOne) RemoveUserof(a ...*Ambulance) *UserUpdateOne {\n\tids := make([]int, len(a))\n\tfor i := range a {\n\t\tids[i] = a[i].ID\n\t}\n\treturn uuo.RemoveUserofIDs(ids...)\n}", "func (p *UserStoreClient) RemoveFromBusiness(ctx context.Context, authenticationToken string, emailAddress string) (err error) {\n var _args21 UserStoreRemoveFromBusinessArgs\n _args21.AuthenticationToken = authenticationToken\n _args21.EmailAddress = emailAddress\n var _result22 UserStoreRemoveFromBusinessResult\n if err = p.Client_().Call(ctx, \"removeFromBusiness\", &_args21, &_result22); err != nil {\n return\n }\n switch {\n case _result22.UserException!= nil:\n return _result22.UserException\n case _result22.SystemException!= nil:\n return _result22.SystemException\n case _result22.NotFoundException!= nil:\n return _result22.NotFoundException\n }\n\n return nil\n}", "func (d *DirectAddress) Delete(key int) {\n\tif err := d.validateKey(key); err != nil {\n\t\treturn\n\t}\n\td.array[key-d.uMin] = nil\n}", "func RemovePayee(id bson.ObjectId, payeeID bson.ObjectId) User {\n\tsession, _ := mgo.Dial(\"127.0.0.1\")\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true)\n\tdb := session.DB(\"reimburse-me\").C(\"user\")\n\tuserID := bson.M{\"_id\": id}\n\tchange := bson.M{\"$pull\": bson.M{\n\t\t\"payees\": payeeID,\n\t}}\n\tdb.Update(userID, change)\n\tvar user User\n\tdb.Find(bson.M{\"_id\": id}).One(&user)\n\treturn user\n}", "func removeBook(id uint64){\n\t_onlyCurator()\n\tstate.WriteUint64(REMOVED_KEY, _getRemoved() + 1)\n\tstate.Clear(_bookId(id))\n}", "func (r MockRateRepository) RemoveByPairID(id int64) error {\n\tif id == 2 {\n\t\treturn errors.New(\"id not found\")\n\t}\n\treturn nil\n}", "func RemoveFromArea(key, set string) error {\n\tconn := db.Pool.Get()\n\tdefer conn.Close()\n\n\treturn db.Srem(conn, set, key)\n}", "func DeleteAddrEntry(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t// get query parameters\n\tqueryValues := r.URL.Query()\n\tlastName := queryValues.Get(\"lastName\")\n\tfirstName := queryValues.Get(\"firstName\")\n\taddress, ok := GetEntry(firstName, lastName)\n\tif !ok {\n\t\thttp.Error(w, fmt.Sprintf(\"Entry not found for firstName: %s, lastName: %s\", firstName, lastName), 404)\n\t\treturn\n\t}\n\tDeleteEntry(address)\n\n}", "func (h *Handler) DeleteSavingByUserID(w http.ResponseWriter, r *http.Request, param httprouter.Params) {\n\tuserID, err := strconv.ParseInt(param.ByName(\"userID\"), 10, 64)\n\tif err != nil {\n\t\tlog.Printf(\"[internal][GetUserByID] invalid user_id :%+v\\n\", err)\n\t\treturn\n\t}\n\n\tquery := fmt.Sprintf(\"DELETE FROM savings WHERE user_id = %d\", userID)\n\t_, err = h.DB.Exec(query)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\trenderJSON(w, []byte(`\n\t{\n\t\tstatus: \"success\",\n\t\tmessage: \"Delete book success!\"\n\t}\n\t`), http.StatusOK)\n}", "func (uu *UserUpdate) RemoveUserof(a ...*Ambulance) *UserUpdate {\n\tids := make([]int, len(a))\n\tfor i := range a {\n\t\tids[i] = a[i].ID\n\t}\n\treturn uu.RemoveUserofIDs(ids...)\n}", "func (db DB) RemoveSpend(ctx context.Context, id uint) error {\n\tif !db.checkSpend(ctx, id) {\n\t\treturn common.ErrSpendNotExist\n\t}\n\n\treturn db.db.RunInTransaction(ctx, func(tx *pg.Tx) error {\n\t\tdayID, err := db.selectSpendDayID(ctx, tx, id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = tx.ModelContext(ctx, (*Spend)(nil)).Where(\"id = ?\", id).Delete()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tmonthID, err := db.selectMonthIDByDayID(ctx, tx, dayID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn db.recomputeAndUpdateMonth(tx, monthID)\n\t})\n}", "func (d *Driver) Remove(id string) error {\n\tlogrus.Debugf(\"Remove - id %s\", id)\n\tif strings.HasSuffix(id, \"-init\") {\n\t\treturn nil\n\t}\n\treturn d.ioctl(LayerRemove, \"\", id)\n}", "func (b *Bookmarks) RemoveAt(idx int) {\n\tif idx < 0 || idx >= len(b.Bookmark) {\n\t\treturn\n\t}\n\tb.Bookmark = append(b.Bookmark[:idx], b.Bookmark[idx+1:]...)\n}", "func (oauthClient *OauthClient) Delete(db *gorm.DB, id uuid.UUID) error {\n\tif err := db.Where(\"id = ?\", id).Delete(&oauthClient).Error; err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (bl *blackList) delete(ip string, port int) {\n\tbl.list.Delete(bl.genKey(ip, port))\n}", "func (c *Client) Remove(ctx context.Context, id uint64) error {\n\trequest := protocol.Message{}\n\trequest.Init(4096)\n\tresponse := protocol.Message{}\n\tresponse.Init(4096)\n\n\tprotocol.EncodeRemove(&request, id)\n\n\tif err := c.protocol.Call(ctx, &request, &response); err != nil {\n\t\treturn err\n\t}\n\n\tif err := protocol.DecodeEmpty(&response); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (b *books) Remove(id string) bool {\n\n\tif !b.isOpen() {\n\t\tlog.Panic(\"Repositorio não aberto\")\n\t}\n\n\t_, err := b.db.Exec(\"DELETE FROM CAD_BOOKS WHERE COD_BOOK = ?\", id)\n\n\treturn err == nil\n}", "func (s *PSlice) Remove(addr swarm.Address, po uint8) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\te, i := s.exists(addr)\n\tif !e {\n\t\treturn\n\t}\n\n\ts.peers = append(s.peers[:i], s.peers[i+1:]...)\n\ts.decDeeper(po)\n}", "func (s *AdoptersService) DeleteAdopterByID(ctx context.Context, adopterID int64) (*Response, error) {\n\tu := fmt.Sprintf(\"/adopter/%v\", adopterID)\n\treq, err := s.client.NewRequest(\"DELETE\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.client.Do(ctx, req, nil)\n}", "func (store *ProviderBackedStore) Del(id Identifier) {\n\tkey := id.Value()\n\tif _, existing := store.retriever[key]; existing {\n\t\tdelete(store.retriever, key)\n\t\tnewIDs := make([]Identifier, 0, len(store.ids)-1)\n\t\tfor _, oldID := range store.ids {\n\t\t\tif oldID.Value() != key {\n\t\t\t\tnewIDs = append(newIDs, oldID)\n\t\t\t}\n\t\t}\n\t\tstore.ids = newIDs\n\t}\n}", "func (m *MemberService) UpsertAddress(ctx context.Context, op *WithdrawAddressView) (*WithdrawAddressView, error) {\n\tdata, err := m.POST(\"/address\").\n\t\tBody(op).\n\t\tAuth(m.Presign(time.Minute)).\n\t\tDo(ctx).Bytes()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp struct {\n\t\tErr\n\t\tAddress *WithdrawAddressView `json:\"address\"`\n\t}\n\n\tif err := jsoniter.Unmarshal(data, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.Code > 0 {\n\t\treturn nil, resp.Err\n\t}\n\n\treturn resp.Address, nil\n}", "func (oauthClient *OauthClient) UnscoppedGetByID(db *gorm.DB, id uuid.UUID) error {\n\tif err := db.Unscoped().Where(\"id = ?\", id).Find(&oauthClient).Error; err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func DeletePerson(w http.ResponseWriter, req *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tparams := mux.Vars(req)\n\tid, _ := strconv.Atoi(params[\"id\"])\n\tfor index, person := range models.People {\n\t\tif person.ID == id {\n\t\t\tmodels.People = append(models.People[:index], models.People[index+1:]...)\n\t\t}\n\t}\n}", "func (self PostgresDatabase) UnbanAddr(addr string) (err error) {\n _, err = self.conn.Exec(\"DELETE FROM IPBans WHERE addr >>= $1\", addr)\n return\n}", "func (u *Util) RemoveAndRefund(username, hash string) error {\n\tif hash == \"\" {\n\t\treturn nil\n\t}\n\treturn u.UP.RemovePin(username, hash, \"public\")\n}", "func deletePeople(id int) {\n\tsqlStatement := `\nDELETE FROM people\nWHERE id = $1;`\n\t_, err := Db.Exec(sqlStatement, 1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(\"People Deleted\", id)\n\n}", "func (as *AllState) DeleteBreakByID(id int) bool {\n\tfor i, br := range as.Breaks {\n\t\tif br.ID == id {\n\t\t\tas.Breaks = append(as.Breaks[:i], as.Breaks[i+1:]...)\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (b *BizDAO) DeleteUserByID(id string) error {\n\tquery := bson.M{\"_id\": bson.ObjectIdHex(id)}\n\terr := db.C(UCOLLECTION).Remove(query)\n\treturn err\n}", "func removeFromSorted(l *list.List, id ID) {\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\tc := e.Value.(Contact)\n\t\tif c.NodeID.Equals(id) {\n\t\t\tl.Remove(e)\n\t\t\treturn\n\t\t}\n\t}\n}", "func RemoveUser(userID string) (err error) {\n\n err = checkInit()\n if err != nil {\n return\n }\n\n err = createError(032)\n\n if _, ok := data[\"users\"].(map[string]interface{})[userID]; ok {\n\n delete(data[\"users\"].(map[string]interface{}), userID)\n err = saveDatabase(data)\n\n return\n }\n\n return\n}", "func (a *Client) PostReturnAddressesByReturnAddressIDDelete(params *PostReturnAddressesByReturnAddressIDDeleteParams, authInfo runtime.ClientAuthInfoWriter) (*PostReturnAddressesByReturnAddressIDDeleteOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostReturnAddressesByReturnAddressIDDeleteParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostReturnAddressesByReturnAddressIdDelete\",\n\t\tMethod: \"DELETE\",\n\t\tPathPattern: \"/post/return-addresses/{return_address_id}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PostReturnAddressesByReturnAddressIDDeleteReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PostReturnAddressesByReturnAddressIDDeleteOK), nil\n\n}", "func (i *ProjectIPServiceOp) Remove(ipReservationID string) (*Response, error) {\n\treturn i.Delete(ipReservationID)\n}", "func RemoveIPReservation(id string) error {\n\tclient, err := NewExtPacketClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, e := client.IPReservations.Remove(id)\n\treturn e\n}" ]
[ "0.55589956", "0.5514438", "0.54790044", "0.54399794", "0.5416836", "0.5332153", "0.5321886", "0.53084344", "0.5301175", "0.52927023", "0.52401966", "0.52376306", "0.52170765", "0.5206461", "0.5174361", "0.5145053", "0.5140159", "0.51046467", "0.50969785", "0.5090812", "0.50850946", "0.50722307", "0.505586", "0.505586", "0.505586", "0.502539", "0.5018256", "0.50030375", "0.4993085", "0.4985592", "0.4978683", "0.49722743", "0.49658704", "0.49619463", "0.4945906", "0.4940191", "0.49373347", "0.49347705", "0.49346188", "0.49238172", "0.49147022", "0.49103484", "0.49024457", "0.48505592", "0.4836375", "0.48356935", "0.48217854", "0.4816126", "0.48148903", "0.48118392", "0.48068374", "0.4797477", "0.4782661", "0.47767282", "0.4776184", "0.47715375", "0.47634056", "0.47628054", "0.47620758", "0.47592083", "0.4751241", "0.47509086", "0.4747385", "0.47379392", "0.4737361", "0.47326583", "0.47323605", "0.47231296", "0.47182882", "0.4716061", "0.4709825", "0.47064674", "0.47013164", "0.46998435", "0.46924448", "0.46803713", "0.46778017", "0.4668058", "0.46652818", "0.46628913", "0.4662191", "0.46503863", "0.46426597", "0.464233", "0.4642163", "0.46413487", "0.46344665", "0.46225837", "0.46142694", "0.4612585", "0.4605176", "0.46049637", "0.46024123", "0.4596137", "0.45915803", "0.45910862", "0.45822528", "0.4574587", "0.45691624", "0.45683935" ]
0.7032371
0
VerifyWithdrawal will verify a withdrawal given the verification token
func (as *AddressService) VerifyWithdrawal(token string) error { if err := as.client.Get(buildString("address/withdraw/verify/", token), nil); err != nil { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (_Token *TokenSession) ExecuteWithdrawal() (*types.Transaction, error) {\n\treturn _Token.Contract.ExecuteWithdrawal(&_Token.TransactOpts)\n}", "func (_Token *TokenTransactorSession) ExecuteWithdrawal() (*types.Transaction, error) {\n\treturn _Token.Contract.ExecuteWithdrawal(&_Token.TransactOpts)\n}", "func (_Token *TokenTransactor) ExecuteWithdrawal(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Token.contract.Transact(opts, \"executeWithdrawal\")\n}", "func (broadcast *Broadcast) ValidatorWithdraw(ctx context.Context, username, amount,\n\tprivKeyHex string, seq int64) (*model.BroadcastResponse, error) {\n\tmsg := model.ValidatorWithdrawMsg{\n\t\tUsername: username,\n\t\tAmount: amount,\n\t}\n\treturn broadcast.broadcastTransaction(ctx, msg, privKeyHex, seq, \"\", false)\n}", "func (_Lmc *LmcTransactor) Withdraw(opts *bind.TransactOpts, _tokenAmount *big.Int) (*types.Transaction, error) {\n\treturn _Lmc.contract.Transact(opts, \"withdraw\", _tokenAmount)\n}", "func VerifyTransaction(contract BasicFluenceContract, tx Transaction, minDeposit int64) {\n // checking that the client actually exists in the contract\n var deposit, ok = contract.ClientDeposits[tx.Seal.PublicKey]\n assertTrue(ok)\n\n // checking that the client has enough funds\n assertTrue(deposit >= minDeposit)\n\n // checking that the transaction was signed by this client\n assertTrue(Verify(tx.Seal, Hash(tx.Invoke)))\n}", "func withdraw(ctx iscp.Sandbox) (dict.Dict, error) {\n\tstate := ctx.State()\n\tmustCheckLedger(state, \"accounts.withdraw.begin\")\n\n\tif ctx.Caller().Address().Equals(ctx.ChainID().AsAddress()) {\n\t\t// if the caller is on the same chain, do nothing\n\t\treturn nil, nil\n\t}\n\ttokensToWithdraw, ok := GetAccountBalances(state, ctx.Caller())\n\tif !ok {\n\t\t// empty balance, nothing to withdraw\n\t\treturn nil, nil\n\t}\n\t// will be sending back to default entry point\n\ta := assert.NewAssert(ctx.Log())\n\t// bring balances to the current account (owner's account). It is needed for subsequent Send call\n\ta.Require(MoveBetweenAccounts(state, ctx.Caller(), commonaccount.Get(ctx.ChainID()), tokensToWithdraw),\n\t\t\"accounts.withdraw.inconsistency. failed to move tokens to owner's account\")\n\n\t// add incoming tokens (after fees) to the balances to be withdrawn. Otherwise they would end up in the common account\n\ttokensToWithdraw.AddAll(ctx.IncomingTransfer())\n\t// Send call assumes tokens are in the current account\n\ta.Require(ctx.Send(ctx.Caller().Address(), tokensToWithdraw, &iscp.SendMetadata{\n\t\tTargetContract: ctx.Caller().Hname(),\n\t}), \"accounts.withdraw.inconsistency: failed sending tokens \")\n\n\tctx.Log().Debugf(\"accounts.withdraw.success. Sent to address %s\", tokensToWithdraw.String())\n\n\tmustCheckLedger(state, \"accounts.withdraw.exit\")\n\treturn nil, nil\n}", "func (_WELV9 *WELV9Transactor) Withdraw(opts *bind.TransactOpts, wad *big.Int) (*types.Transaction, error) {\n\treturn _WELV9.contract.Transact(opts, \"withdraw\", wad)\n}", "func TstCheckWithdrawalStatusMatches(t *testing.T, s1, s2 WithdrawalStatus) {\n\tif s1.Fees() != s2.Fees() {\n\t\tt.Fatalf(\"Wrong amount of network fees; want %d, got %d\", s1.Fees(), s2.Fees())\n\t}\n\n\tif !reflect.DeepEqual(s1.Sigs(), s2.Sigs()) {\n\t\tt.Fatalf(\"Wrong tx signatures; got %x, want %x\", s1.Sigs(), s2.Sigs())\n\t}\n\n\tif !reflect.DeepEqual(s1.NextInputAddr(), s2.NextInputAddr()) {\n\t\tt.Fatalf(\"Wrong NextInputAddr; got %v, want %v\", s1.NextInputAddr(), s2.NextInputAddr())\n\t}\n\n\tif !reflect.DeepEqual(s1.NextChangeAddr(), s2.NextChangeAddr()) {\n\t\tt.Fatalf(\"Wrong NextChangeAddr; got %v, want %v\", s1.NextChangeAddr(), s2.NextChangeAddr())\n\t}\n\n\tif !reflect.DeepEqual(s1.Outputs(), s2.Outputs()) {\n\t\tt.Fatalf(\"Wrong WithdrawalOutputs; got %v, want %v\", s1.Outputs(), s2.Outputs())\n\t}\n\n\tif !reflect.DeepEqual(s1.transactions, s2.transactions) {\n\t\tt.Fatalf(\"Wrong transactions; got %v, want %v\", s1.transactions, s2.transactions)\n\t}\n\n\t// The above checks could be replaced by this one, but when they fail the\n\t// failure msg wouldn't give us much clue as to what is not equal, so we do\n\t// the individual checks above and use this one as a catch-all check in case\n\t// we forget to check any of the individual fields.\n\tif !reflect.DeepEqual(s1, s2) {\n\t\tt.Fatalf(\"Wrong WithdrawalStatus; got %v, want %v\", s1, s2)\n\t}\n}", "func (_WandappETH *WandappETHSession) Withdraw(proof []byte) (*types.Transaction, error) {\n\treturn _WandappETH.Contract.Withdraw(&_WandappETH.TransactOpts, proof)\n}", "func (_Lmc *LmcSession) Withdraw(_tokenAmount *big.Int) (*types.Transaction, error) {\n\treturn _Lmc.Contract.Withdraw(&_Lmc.TransactOpts, _tokenAmount)\n}", "func (_WandappETH *WandappETHTransactor) Withdraw(opts *bind.TransactOpts, proof []byte) (*types.Transaction, error) {\n\treturn _WandappETH.contract.Transact(opts, \"withdraw\", proof)\n}", "func (_Lmc *LmcTransactorSession) Withdraw(_tokenAmount *big.Int) (*types.Transaction, error) {\n\treturn _Lmc.Contract.Withdraw(&_Lmc.TransactOpts, _tokenAmount)\n}", "func (_WandappETH *WandappETHTransactorSession) Withdraw(proof []byte) (*types.Transaction, error) {\n\treturn _WandappETH.Contract.Withdraw(&_WandappETH.TransactOpts, proof)\n}", "func (_TokenStakingEscrow *TokenStakingEscrowTransactor) WithdrawRevoked(opts *bind.TransactOpts, operator common.Address) (*types.Transaction, error) {\n\treturn _TokenStakingEscrow.contract.Transact(opts, \"withdrawRevoked\", operator)\n}", "func (_IWETH *IWETHSession) Withdraw(arg0 *big.Int) (*types.Transaction, error) {\r\n\treturn _IWETH.Contract.Withdraw(&_IWETH.TransactOpts, arg0)\r\n}", "func (_EtherDelta *EtherDeltaTransactor) WithdrawToken(opts *bind.TransactOpts, token common.Address, amount *big.Int) (*types.Transaction, error) {\n\treturn _EtherDelta.contract.Transact(opts, \"withdrawToken\", token, amount)\n}", "func (_IWETH *IWETHTransactorSession) Withdraw(arg0 *big.Int) (*types.Transaction, error) {\r\n\treturn _IWETH.Contract.Withdraw(&_IWETH.TransactOpts, arg0)\r\n}", "func (_BondedECDSAKeep *BondedECDSAKeepTransactor) Withdraw(opts *bind.TransactOpts, _member common.Address) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.contract.Transact(opts, \"withdraw\", _member)\n}", "func (_Smartchef *SmartchefTransactor) Withdraw(opts *bind.TransactOpts, _amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.contract.Transact(opts, \"withdraw\", _amount)\n}", "func (_ElvTradable *ElvTradableTransactor) Withdraw(opts *bind.TransactOpts, _amount *big.Int) (*types.Transaction, error) {\n\treturn _ElvTradable.contract.Transact(opts, \"withdraw\", _amount)\n}", "func (sc stakingClient) Withdraw(fromInfo keys.Info, passWd, coinsStr, memo string, accNum, seqNum uint64) (\n\tresp sdk.TxResponse, err error) {\n\tif err = params.CheckKeyParams(fromInfo, passWd); err != nil {\n\t\treturn\n\t}\n\n\tcoin, err := sdk.ParseDecCoin(coinsStr)\n\tif err != nil {\n\t\treturn resp, fmt.Errorf(\"failed : parse Coins [%s] error: %s\", coinsStr, err)\n\t}\n\n\tmsg := types.NewMsgWithdraw(fromInfo.GetAddress(), coin)\n\n\treturn sc.BuildAndBroadcast(fromInfo.GetName(), passWd, memo, []sdk.Msg{msg}, accNum, seqNum)\n\n}", "func (s *Service) WithdrawSuccess(c context.Context, orderNo int64, tradeStatus int) (err error) {\n\tupWithdraw, err := s.dao.QueryUpWithdrawByID(c, orderNo)\n\tif err != nil {\n\t\tlog.Error(\"s.dao.QueryUpWithdrawByID error(%v)\", err)\n\t\treturn\n\t}\n\n\tif tradeStatus != _withdrawSuccess {\n\t\tlog.Info(\"param tradeStatus(%d) != withdraw success(2)\", tradeStatus)\n\t\treturn\n\t}\n\n\tif upWithdraw.State == _withdrawSuccess {\n\t\tlog.Info(\"withdraw has successed already\")\n\t\treturn\n\t}\n\n\ttx, err := s.dao.BeginTran(c)\n\tif err != nil {\n\t\tlog.Error(\"s.dao.BeginTran error(%v)\", err)\n\t\treturn\n\t}\n\n\t// update up_income_withdraw state\n\trows, err := s.dao.TxUpdateUpWithdrawState(tx, orderNo, _withdrawSuccess)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\tlog.Error(\"s.dao.UpdateUpWithdrawState error(%v)\", err)\n\t\treturn\n\t}\n\tif rows != 1 {\n\t\ttx.Rollback()\n\t\tlog.Error(\"s.dao.UpdateUpWithdrawState Update withdraw record error id(%d)\", orderNo)\n\t\treturn\n\t}\n\n\t// update up_account withdraw\n\trows, err = s.dao.TxUpdateUpAccountWithdraw(tx, upWithdraw.MID, upWithdraw.WithdrawIncome)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\tlog.Error(\"s.dao.UpdateUpAccountWithdraw error(%v)\", err)\n\t\treturn\n\t}\n\tif rows != 1 {\n\t\ttx.Rollback()\n\t\tlog.Error(\"s.dao.UpdateUpAccountWithdraw Update up account record error id(%d)\", orderNo)\n\t\treturn\n\t}\n\n\tmaxUpWithdrawDateVersion, err := s.dao.TxQueryMaxUpWithdrawDateVersion(tx, upWithdraw.MID)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\tlog.Error(\"s.dao.QueryMaxUpWithdrawDateVersion error(%v)\", err)\n\t\treturn\n\t}\n\n\ttime := 0\n\tvar version int64\n\tfor {\n\t\tversion, err = s.dao.TxQueryUpAccountVersion(tx, upWithdraw.MID)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\tlog.Error(\"s.dao.QueryUpAccountVersion error(%v)\", err)\n\t\t\treturn\n\t\t}\n\t\tif maxUpWithdrawDateVersion == \"\" {\n\t\t\tmaxUpWithdrawDateVersion = upWithdraw.DateVersion\n\t\t}\n\n\t\trows, err = s.dao.TxUpdateUpAccountUnwithdrawIncome(tx, upWithdraw.MID, maxUpWithdrawDateVersion, version)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\tlog.Error(\"s.dao.UpdateUpAccountUnwithdrawIncome error(%v)\", err)\n\t\t\treturn\n\t\t}\n\t\tif rows == 1 {\n\t\t\tif err = tx.Commit(); err != nil {\n\t\t\t\tlog.Error(\"tx.Commit error\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\ttime++\n\t\tif time >= 10 {\n\t\t\ttx.Rollback()\n\t\t\tlog.Info(\"try to synchronize unwithdraw income 10 times error mid(%d)\", upWithdraw.MID)\n\t\t\terr = fmt.Errorf(\"try to synchronize unwithdraw income 10 times error mid(%d)\", upWithdraw.MID)\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}", "func (_BREMICO *BREMICOTransactor) ConfirmWithdraw(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BREMICO.contract.Transact(opts, \"confirmWithdraw\")\n}", "func (_TokenStakingEscrow *TokenStakingEscrowTransactor) Withdraw(opts *bind.TransactOpts, operator common.Address) (*types.Transaction, error) {\n\treturn _TokenStakingEscrow.contract.Transact(opts, \"withdraw\", operator)\n}", "func (_BREMICO *BREMICOSession) ConfirmWithdraw() (*types.Transaction, error) {\n\treturn _BREMICO.Contract.ConfirmWithdraw(&_BREMICO.TransactOpts)\n}", "func (_IWETH *IWETHTransactor) Withdraw(opts *bind.TransactOpts, arg0 *big.Int) (*types.Transaction, error) {\r\n\treturn _IWETH.contract.Transact(opts, \"withdraw\", arg0)\r\n}", "func (_Wmatic *WmaticTransactor) Withdraw(opts *bind.TransactOpts, wad *big.Int) (*types.Transaction, error) {\n\treturn _Wmatic.contract.Transact(opts, \"withdraw\", wad)\n}", "func (_BondedECDSAKeep *BondedECDSAKeepTransactorSession) Withdraw(_member common.Address) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.Withdraw(&_BondedECDSAKeep.TransactOpts, _member)\n}", "func (s *StorageECR721Fixed) Withdraw() error {\n\tevmTr, err := s.session.Withdraw()\n\tif err != nil {\n\t\terr = errors.Wrapf(err, \"failed to execute %s\", Withdraw)\n\t\tLogger.Error(err)\n\t\treturn err\n\t}\n\n\tLogger.Info(\"Executed \", Withdraw, \" hash: \", evmTr.Hash())\n\n\treturn nil\n}", "func (_BondedECDSAKeep *BondedECDSAKeepSession) Withdraw(_member common.Address) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.Withdraw(&_BondedECDSAKeep.TransactOpts, _member)\n}", "func (orc *Oracle) verifyTotalDailyWithdrawal(ts time.Time, state *OracleState, amount *big.Int) bool {\n\tcurrentAmount := state.TotalWithdrawalAmount.Value.Int\n\n\t// state timestamp always maintains 00:00:00 UTC timestamp\n\tdayStart := time.Unix(state.Timestamp, 0)\n\t// If current timestamp is after state timestamp for a day, assumed that we the total amount\n\t// should be reset first.\n\tif ts.Sub(dayStart).Hours() > 24 {\n\t\tcurrentAmount = big.NewInt(0)\n\t}\n\n\tnextAmount := big.NewInt(0).Add(currentAmount, amount)\n\treturn nextAmount.Cmp(orc.maxTotalDailyWithdrawalAmount.Int) > 0\n}", "func (_Wmatic *WmaticSession) Withdraw(wad *big.Int) (*types.Transaction, error) {\n\treturn _Wmatic.Contract.Withdraw(&_Wmatic.TransactOpts, wad)\n}", "func (_Wmatic *WmaticTransactorSession) Withdraw(wad *big.Int) (*types.Transaction, error) {\n\treturn _Wmatic.Contract.Withdraw(&_Wmatic.TransactOpts, wad)\n}", "func (_Token *TokenSession) InitiateWithdrawal() (*types.Transaction, error) {\n\treturn _Token.Contract.InitiateWithdrawal(&_Token.TransactOpts)\n}", "func (p *TDepositWithdrawServiceClient) AuditDepositWithdraw(ctx context.Context, traceId string, status string, mark string, withdrawId int32) (r bool, err error) {\n var _args5 TDepositWithdrawServiceAuditDepositWithdrawArgs\n _args5.TraceId = traceId\n _args5.Status = status\n _args5.Mark = mark\n _args5.WithdrawId = withdrawId\n var _result6 TDepositWithdrawServiceAuditDepositWithdrawResult\n if err = p.c.Call(ctx, \"auditDepositWithdraw\", &_args5, &_result6); err != nil {\n return\n }\n return _result6.GetSuccess(), nil\n}", "func (orc *Oracle) verifyAccountDailyWithdrawal(ts time.Time, account *Account, amount *big.Int) bool {\n\tcurrentAmount := big.NewInt(0)\n\tif account.TotalWithdrawalAmount != nil {\n\t\tcurrentAmount = account.TotalWithdrawalAmount.Value.Int\n\t}\n\n\t// each daily withdrawal period starts at 00:00:00 UTC\n\tdayStart := time.Unix(account.Timestamp, 0)\n\t// daily limit is reset every 24 hours\n\tif ts.Sub(dayStart).Hours() > 24 {\n\t\tcurrentAmount = big.NewInt(0)\n\t}\n\n\tnextAmount := big.NewInt(0).Add(currentAmount, amount)\n\treturn nextAmount.Cmp(orc.maxPerAccountDailyWithdrawalAmount.Int) > 0\n}", "func (f *Fortune) Withdrawal(amount decimal.Decimal) {\n\tf.active = f.active.Sub(amount)\n}", "func (_DelegateProfile *DelegateProfileTransactorSession) Withdraw() (*types.Transaction, error) {\n\treturn _DelegateProfile.Contract.Withdraw(&_DelegateProfile.TransactOpts)\n}", "func (_DelegateProfile *DelegateProfileTransactor) Withdraw(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _DelegateProfile.contract.Transact(opts, \"withdraw\")\n}", "func (_EtherDelta *EtherDeltaSession) WithdrawToken(token common.Address, amount *big.Int) (*types.Transaction, error) {\n\treturn _EtherDelta.Contract.WithdrawToken(&_EtherDelta.TransactOpts, token, amount)\n}", "func (_BREMICO *BREMICOTransactorSession) ConfirmWithdraw() (*types.Transaction, error) {\n\treturn _BREMICO.Contract.ConfirmWithdraw(&_BREMICO.TransactOpts)\n}", "func (_EtherDelta *EtherDeltaTransactorSession) WithdrawToken(token common.Address, amount *big.Int) (*types.Transaction, error) {\n\treturn _EtherDelta.Contract.WithdrawToken(&_EtherDelta.TransactOpts, token, amount)\n}", "func (_DelegateProfile *DelegateProfileSession) Withdraw() (*types.Transaction, error) {\n\treturn _DelegateProfile.Contract.Withdraw(&_DelegateProfile.TransactOpts)\n}", "func (user User) Withdraw(ctx context.Context, input *TransferInput, pin string) (*Snapshot, error) {\n\tif len(input.TraceID) == 0 {\n\t\tinput.TraceID = uuid.Must(uuid.NewV4()).String()\n\t}\n\tparas := utils.UnselectFields(input)\n\tdata, err := user.RequestWithPIN(ctx, \"POST\", \"/withdrawals\", paras, pin)\n\tif err != nil {\n\t\treturn nil, requestError(err)\n\t}\n\n\tvar resp struct {\n\t\tSnapshot *struct {\n\t\t\t*Snapshot\n\t\t\tMemo string `json:\"memo,omitempty\"`\n\t\t} `json:\"data,omitempty\"`\n\t\tError *Error `json:\"error,omitempty\"`\n\t}\n\tif err = json.Unmarshal(data, &resp); err != nil {\n\t\treturn nil, requestError(err)\n\t} else if resp.Error != nil {\n\t\treturn nil, resp.Error\n\t}\n\n\tresp.Snapshot.Data = resp.Snapshot.Memo\n\tif !input.verify(*resp.Snapshot.Snapshot) {\n\t\treturn nil, traceError()\n\t}\n\n\treturn resp.Snapshot.Snapshot, nil\n}", "func (_ElvTradableLocal *ElvTradableLocalTransactor) Withdraw(opts *bind.TransactOpts, _amount *big.Int) (*types.Transaction, error) {\n\treturn _ElvTradableLocal.contract.Transact(opts, \"withdraw\", _amount)\n}", "func (_Token *TokenTransactorSession) InitiateWithdrawal() (*types.Transaction, error) {\n\treturn _Token.Contract.InitiateWithdrawal(&_Token.TransactOpts)\n}", "func (_Smartchef *SmartchefSession) Withdraw(_amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.Contract.Withdraw(&_Smartchef.TransactOpts, _amount)\n}", "func (_Token *TokenTransactor) InitiateWithdrawal(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Token.contract.Transact(opts, \"initiateWithdrawal\")\n}", "func (_XStaking *XStakingTransactor) Withdraw(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) {\n\treturn _XStaking.contract.Transact(opts, \"withdraw\", amount)\n}", "func (tc *Client) VerifyToken(ctx context.Context, token string,\n\tclient apiclient.HttpRunner) error {\n\n\tl := log.FromContext(ctx)\n\n\t// TODO sanitize token\n\n\turl := utils.JoinURL(tc.conf.TenantAdmAddr, TenantVerifyUri)\n\n\treq, err := http.NewRequest(http.MethodPost, url, nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create request to tenant administrator\")\n\t}\n\n\t// tenant token is passed in Authorization header\n\treq.Header.Add(\"Authorization\", \"Bearer \"+token)\n\n\tctx, cancel := context.WithTimeout(ctx, tc.conf.Timeout)\n\tdefer cancel()\n\n\trsp, err := client.Do(req.WithContext(ctx))\n\tif err != nil {\n\t\tl.Errorf(\"tenantadm request failed: %v\", err)\n\t\treturn errors.Wrap(err, \"request to verify token failed\")\n\t}\n\tdefer rsp.Body.Close()\n\n\tswitch rsp.StatusCode {\n\n\tcase http.StatusUnauthorized: // 401, verification result negative\n\t\tapiErr := rest_utils.ParseApiError(rsp.Body)\n\t\tif !rest_utils.IsApiError(apiErr) {\n\t\t\treturn errors.Errorf(\"failed to parse tenantadm api error response\")\n\t\t}\n\n\t\treturn MakeErrTokenVerificationFailed(apiErr)\n\n\tcase http.StatusOK: // 200, token verified\n\t\treturn nil\n\tdefault:\n\t\treturn errors.Errorf(\"token verification request returned unexpected status %v\",\n\t\t\trsp.StatusCode)\n\t}\n}", "func (_Smartchef *SmartchefTransactorSession) Withdraw(_amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.Contract.Withdraw(&_Smartchef.TransactOpts, _amount)\n}", "func (_Contract *ContractTransactor) Withdraw(opts *bind.TransactOpts, value *big.Int) (*types.Transaction, error) {\n\treturn _Contract.contract.Transact(opts, \"withdraw\", value)\n}", "func Withdraw(interactor account.Interactor) fiber.Handler {\n\n\treturn func(ctx *fiber.Ctx) error {\n\t\tvar userDetails = ctx.Locals(\"userDetails\").(map[string]string)\n\t\tuserId := userDetails[\"userId\"]\n\n\t\tvar p param\n\t\t_ = ctx.BodyParser(&p)\n\n\t\tbalance, err := interactor.Withdraw(uuid.FromStringOrNil(userId), p.Amount)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn ctx.JSON(map[string]interface{}{\n\t\t\t\"message\": fmt.Sprintf(\"Amount successfully withdrawn. New balance %v\", balance),\n\t\t\t\"balance\": balance,\n\t\t\t\"userId\": userId,\n\t\t})\n\t}\n}", "func (c *Client) Withdraw(ctx context.Context, foreignID string, amount float64, currency, address string) (Withdrawal, error) {\n\treqBody := map[string]interface{}{\n\t\t\"foreign_id\": foreignID,\n\t\t\"amount\": amount,\n\t\t\"currency\": currency,\n\t\t\"address\": address,\n\t}\n\n\treqJSON, err := json.Marshal(reqBody)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request body marshaling error: %w\", err)\n\t}\n\n\twithdrawalURL, err := joinURL(c.api, withdrawalEndpoint)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request url creating error: %w\", err)\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, withdrawalURL.String(), bytes.NewBuffer(reqJSON))\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request creating error: %w\", err)\n\t}\n\n\tsig, err := createHmac(c.secret, reqJSON)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"hmac signature creationg error: %w\", err)\n\t}\n\n\treq.Header.Set(contentTypeHeader, jsonContentType)\n\treq.Header.Set(keyHeader, c.apiKey)\n\treq.Header.Set(signatureHeader, sig)\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request error: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\terr = ensureSuccessResponse(resp)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"request failed: %w\", err)\n\t}\n\n\trespBody := struct {\n\t\tData Withdrawal `json:\"data\"`\n\t}{}\n\n\terr = json.NewDecoder(resp.Body).Decode(&respBody)\n\tif err != nil {\n\t\treturn Withdrawal{}, fmt.Errorf(\"response unmarshaling error: %w\", err)\n\t}\n\n\treturn respBody.Data, nil\n}", "func (_TokenStakingEscrow *TokenStakingEscrowSession) WithdrawRevoked(operator common.Address) (*types.Transaction, error) {\n\treturn _TokenStakingEscrow.Contract.WithdrawRevoked(&_TokenStakingEscrow.TransactOpts, operator)\n}", "func (c *Client) Withdraw(ctx context.Context, p *WithdrawRequestBody) (err error) {\n\t_, err = c.WithdrawEndpoint(ctx, p)\n\treturn\n}", "func (_BREMICO *BREMICOTransactor) Withdraw(opts *bind.TransactOpts, _value *big.Int) (*types.Transaction, error) {\n\treturn _BREMICO.contract.Transact(opts, \"withdraw\", _value)\n}", "func (w *Wallet) Withdraw(amount Bitcoin) error {\n\n\tif amount > w.balance {\n\t\treturn errors.New(\"cannot withdraw, insufficient funds\")\n\t}\n\tw.balance -= amount\n\treturn nil\n}", "func (_XStaking *XStakingSession) Withdraw(amount *big.Int) (*types.Transaction, error) {\n\treturn _XStaking.Contract.Withdraw(&_XStaking.TransactOpts, amount)\n}", "func (_Contract *ContractTransactorSession) Withdraw(value *big.Int) (*types.Transaction, error) {\n\treturn _Contract.Contract.Withdraw(&_Contract.TransactOpts, value)\n}", "func (_XStaking *XStakingTransactorSession) Withdraw(amount *big.Int) (*types.Transaction, error) {\n\treturn _XStaking.Contract.Withdraw(&_XStaking.TransactOpts, amount)\n}", "func (_Cakevault *CakevaultTransactor) EmergencyWithdraw(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Cakevault.contract.Transact(opts, \"emergencyWithdraw\")\n}", "func (c *Controller) FullWithdrawal(ctx context.Context) {\n\t_ = c.BGPMgr.ConfigurePeers(ctx, nil, nil) // cannot fail, no need for error handling\n}", "func (c *Controller) FullWithdrawal(ctx context.Context) {\n\t_ = c.BGPMgr.ConfigurePeers(ctx, nil, nil) // cannot fail, no need for error handling\n}", "func (u Usecase) Withdraw(ctx context.Context, accID vos.AccountID, amount vos.Money) error {\n\tconst operation = \"accounts.Usecase.Withdraw\"\n\n\tlog := logger.FromCtx(ctx).WithFields(logrus.Fields{\n\t\t\"accID\": accID,\n\t\t\"amount\": amount.Int(),\n\t})\n\n\tlog.Infoln(\"processing a withdrawal\")\n\n\tif amount <= 0 {\n\t\treturn ErrInvalidAmount\n\t}\n\n\tacc, err := u.GetAccountByID(ctx, accID)\n\tif err != nil {\n\t\treturn domain.Error(operation, err)\n\t}\n\n\tif acc.Balance < amount {\n\t\treturn ErrInsufficientBalance\n\t}\n\n\terr = u.accRepo.Withdraw(ctx, accID, amount)\n\n\tif err != nil {\n\t\treturn domain.Error(operation, err)\n\t}\n\n\tlog.Infoln(\"withdrawal successfully processed\")\n\n\treturn nil\n}", "func ExampleVerifyingToken() {\n\ttoken := \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE0OTc4OTc1NjYsIm5hbWUiOiJ0cnVlIiwidXNlciI6InRydWUifQ.OE3v4g-_Grx18htmaQAOt2y7udW_RUuvv1nELK4Z-Sk\"\n\tValidateToken(token)\n}", "func (_TokenStakingEscrow *TokenStakingEscrowSession) Withdraw(operator common.Address) (*types.Transaction, error) {\n\treturn _TokenStakingEscrow.Contract.Withdraw(&_TokenStakingEscrow.TransactOpts, operator)\n}", "func (_Contract *ContractSession) Withdraw(value *big.Int) (*types.Transaction, error) {\n\treturn _Contract.Contract.Withdraw(&_Contract.TransactOpts, value)\n}", "func (_Cakevault *CakevaultTransactor) Withdraw(opts *bind.TransactOpts, _shares *big.Int) (*types.Transaction, error) {\n\treturn _Cakevault.contract.Transact(opts, \"withdraw\", _shares)\n}", "func (server *OpencxServer) withdrawFromLightning(params *coinparam.Params) (withdrawFunction func(*koblitz.PublicKey, int64) (string, error), err error) {\n\n\twithdrawFunction = func(pubkey *koblitz.PublicKey, amount int64) (txid string, err error) {\n\n\t\tif amount <= 0 {\n\t\t\terr = fmt.Errorf(\"Can't withdraw <= 0\")\n\t\t\treturn\n\t\t}\n\n\t\t// calculate fee, do this using subwallet because the funding will all be done through lit\n\t\t// TODO: figure out if there is redundancy with server.WalletMap and server.ExchangeNode.SubWallet and\n\t\t// if that redundancy is necessary. It might be\n\t\tfee := server.ExchangeNode.SubWallet[params.HDCoinType].Fee() * 1000\n\t\tif amount < consts.MinOutput+fee {\n\t\t\terr = fmt.Errorf(\"You can't withdraw any less than %d %s\", consts.MinOutput+fee, params.Name)\n\t\t\treturn\n\t\t}\n\n\t\tvar peerIdx uint32\n\t\tif peerIdx, err = server.GetPeerFromPubkey(pubkey); err != nil {\n\t\t\terr = fmt.Errorf(\"You may not have ever connected with the exchange, or you're using a different identity. The exchange can only authenticate for channel creating if you are the node: \\n%s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tlogging.Infof(\"Checking if connected to peer\")\n\n\t\t// if we already have a channel and we can, we should push\n\t\tif !server.ExchangeNode.ConnectedToPeer(peerIdx) {\n\t\t\terr = fmt.Errorf(\"Not connected to peer! Please connect to the exchange. We don't know how to connect to you\")\n\t\t\treturn\n\t\t}\n\n\t\t// calculate capacity as a function of the amount to be sent\n\t\tvar ccap int64\n\t\tif amount < consts.MinChanCapacity {\n\t\t\tccap = consts.MinChanCapacity\n\t\t} else {\n\t\t\tccap = amount + consts.MinOutput + fee\n\t\t}\n\n\t\t// TODO: this should only happen when we get a proof that the other person actually took the withdraw / updated the state. We don't have a guarantee that they will always accept\n\n\t\t// clearing settlement layer\n\t\tif err = server.CreditUser(pubkey, uint64(amount), params); err != nil {\n\t\t\terr = fmt.Errorf(\"Error while crediting user for CreateChannel: %s\\n\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// check if any of the channels are of the correct param and have enough capacity (-[min+fee])\n\n\t\t// make data but we don't really want any\n\t\tnoData := new([32]byte)\n\n\t\tlogging.Infof(\"Trying to fund channel\")\n\t\t// retrieve chanIdx because we need it for qchan for outpoint hash, if that's not useful anymore just make this chanIdx => _\n\t\tvar chanIdx uint32\n\t\tif chanIdx, err = server.ExchangeNode.FundChannel(peerIdx, params.HDCoinType, ccap, amount, *noData); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tlogging.Infof(\"Getting qchanidx\")\n\t\t// get qchan so we can get the outpoint hash\n\t\tvar qchan *qln.Qchan\n\t\tif qchan, err = server.ExchangeNode.GetQchanByIdx(chanIdx); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tlogging.Infof(\"We're pretty much done with this withdraw\")\n\t\t// get outpoint hash because that's useful information to return\n\t\ttxid = qchan.Op.Hash.String()\n\n\t\treturn\n\t}\n\treturn\n}", "func verifyFlow(secret map[string]interface{}, scope string, code string) (*Token, error) {\n\t// Construct a POST request to fetch OAuth token with the verificaton code.\n\tparams := url.Values{\n\t\t\"client_id\": []string{toString(secret[\"client_id\"])},\n\t\t\"code\": []string{code},\n\t\t\"scope\": []string{scope},\n\t\t\"grant_type\": []string{\"authorization_code\"},\n\t\t\"redirect_uri\": []string{oobCallbackUrn},\n\t}\n\tif clientSecret, ok := secret[\"client_secret\"]; ok {\n\t\tparams.Set(\"client_secret\", toString(clientSecret))\n\t}\n\n\t// Send the POST request and return token.\n\treturn retrieveAccessToken(toString(secret[\"token_uri\"]), params)\n}", "func (_TokenStakingEscrow *TokenStakingEscrowTransactorSession) WithdrawRevoked(operator common.Address) (*types.Transaction, error) {\n\treturn _TokenStakingEscrow.Contract.WithdrawRevoked(&_TokenStakingEscrow.TransactOpts, operator)\n}", "func AliceWithdrawFromTxAPIHandler(w http.ResponseWriter, r *http.Request) {\n\tLog := Logger.NewSessionLogger()\n\n\tvar plog PodLog\n\tplog.Result = LOG_RESULT_FAILED\n\tplog.Operation = LOG_OPERATION_TYPE_ALICE_WITHDRAW_FROM_TX\n\n\tdefer func() {\n\t\terr := insertLogToDB(plog)\n\t\tif err != nil {\n\t\t\tLog.Warnf(\"insert log error! %v\", err)\n\t\t\treturn\n\t\t}\n\t\tnodeRecovery(w, Log)\n\t}()\n\n\tsessionID := r.FormValue(\"session_id\")\n\tLog.Infof(\"start withdraw eth from transaction in contract...sessionID=%v\", sessionID)\n\tplog.Detail = fmt.Sprintf(\"sessionID=%v\", sessionID)\n\n\tif sessionID == \"\" {\n\t\tLog.Warnf(\"invalid sessionID. sessionID=%v\", sessionID)\n\t\tfmt.Fprintf(w, RESPONSE_INCOMPLETE_PARAM)\n\t\treturn\n\t}\n\ttx, rs, err := loadAliceFromDB(sessionID)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to load transaction info from db. sessionID=%v, err=%v\", sessionID, err)\n\t\tfmt.Fprintf(w, RESPONSE_READ_DATABASE_FAILED)\n\t\treturn\n\t}\n\tif !rs {\n\t\tLog.Warnf(\"no transaction info loaded. sessionID=%v,\", sessionID)\n\t\tfmt.Fprintf(w, RESPONSE_NO_NEED_TO_WITHDRAW)\n\t\treturn\n\t}\n\tif tx.SubMode != TRANSACTION_SUB_MODE_COMPLAINT {\n\t\tLog.Warnf(\"the mode does not need withdraw eth.\")\n\t\tfmt.Fprintf(w, RESPONSE_NO_NEED_TO_WITHDRAW)\n\t\treturn\n\t}\n\tLog.Debugf(\"start send transaction to withdraw eth...sessionID=%v\", sessionID)\n\tt := time.Now()\n\ttxid, err := settleDealForComplaint(sessionID, tx.AliceAddr, tx.BobAddr)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to withdraw eth for Alice from contract. err=%v\", err)\n\t\tfmt.Fprintf(w, RESPONSE_READ_CONTRACT_FAILED)\n\t\treturn\n\t}\n\tLog.Debugf(\"success to send transaction to withdraw eth...txid=%v, sessionID=%v, time cost=%v\", txid, sessionID, time.Since(t))\n\n\tplog.Result = LOG_RESULT_SUCCESS\n\tfmt.Fprintf(w, fmt.Sprintf(RESPONSE_SUCCESS, \"send transaction for withdrawing from contract...\"))\n\treturn\n}", "func (a *Account) Withdraw (amount int) error {\n\tif a.balance < amount {\n\t\treturn errNomoney\n\t}\n\ta.balance -= amount\n\treturn nil\n}", "func (_Vault *VaultSession) Withdraw(inst []byte, heights *big.Int, instPaths [][32]byte, instPathIsLefts []bool, instRoots [32]byte, blkData [32]byte, sigIdxs []*big.Int, sigVs []uint8, sigRs [][32]byte, sigSs [][32]byte) (*types.Transaction, error) {\n\treturn _Vault.Contract.Withdraw(&_Vault.TransactOpts, inst, heights, instPaths, instPathIsLefts, instRoots, blkData, sigIdxs, sigVs, sigRs, sigSs)\n}", "func (dcr *ExchangeWallet) withdraw(addr stdaddr.Address, val, feeRate uint64) (*wire.MsgTx, uint64, error) {\n\tif val == 0 {\n\t\treturn nil, 0, fmt.Errorf(\"cannot withdraw value = 0\")\n\t}\n\tenough := func(sum uint64, size uint32, unspent *compositeUTXO) bool {\n\t\treturn sum+toAtoms(unspent.rpc.Amount) >= val\n\t}\n\tcoins, _, _, _, err := dcr.fund(enough)\n\tif err != nil {\n\t\treturn nil, 0, fmt.Errorf(\"unable to withdraw %s DCR to address %s with feeRate %d atoms/byte: %w\",\n\t\t\tamount(val), addr, feeRate, err)\n\t}\n\n\tmsgTx, sentVal, err := dcr.sendCoins(addr, coins, val, feeRate, true)\n\tif err != nil {\n\t\tif _, retErr := dcr.returnCoins(coins); retErr != nil {\n\t\t\tdcr.log.Errorf(\"Failed to unlock coins: %v\", retErr)\n\t\t}\n\t\treturn nil, 0, err\n\t}\n\treturn msgTx, sentVal, nil\n}", "func (_SingleAuto *SingleAutoTransactor) Withdraw(opts *bind.TransactOpts, _pid *big.Int, _wantAmt *big.Int) (*types.Transaction, error) {\n\treturn _SingleAuto.contract.Transact(opts, \"withdraw\", _pid, _wantAmt)\n}", "func (_Vault *VaultTransactorSession) Withdraw(inst []byte, heights *big.Int, instPaths [][32]byte, instPathIsLefts []bool, instRoots [32]byte, blkData [32]byte, sigIdxs []*big.Int, sigVs []uint8, sigRs [][32]byte, sigSs [][32]byte) (*types.Transaction, error) {\n\treturn _Vault.Contract.Withdraw(&_Vault.TransactOpts, inst, heights, instPaths, instPathIsLefts, instRoots, blkData, sigIdxs, sigVs, sigRs, sigSs)\n}", "func (cust *custInfo) doWithdrawal(amt uint64){\n cAddr, err3 := net.ResolveUDPAddr(\"udp\",\":0\")\n sAddr, err4 := net.ResolveUDPAddr(\"udp\", headAddress)\n connHead, errHead := net.DialUDP(\"udp\",cAddr,sAddr)\n logMessage(\"Status\",\"Connecting to Head for Withdrawal\")\n if(errHead != nil){\n fmt.Println(\"Err\",errHead)\n }\n if(err3!=nil || err4!=nil){\n log.Fatal(\"Connection error\",err4)\n }\n notifyMe(connHead)\n param := ParamsUpdate{cust.reqId,cust.accountNumber,\"withDrawal\",amt}\n b,err1 := json.Marshal(param)\n if(err1 != nil){\n fmt.Println(\"Error\")\n }\n connHead.Write(b)\n logMessage(\"Send\",param)\n fmt.Println(\"Withdrawal Request Sent\\n\")\n go receiveMsgFromTail(connTail,\"Withdrawal\")\n defer connHead.Close()\n}", "func (m *WithdrawManager) SubmitWithdrawal(ctx context.Context, req *withdraw.Request) (*withdraw.Response, error) {\n\tif m == nil {\n\t\treturn nil, ErrNilSubsystem\n\t}\n\tif req == nil {\n\t\treturn nil, withdraw.ErrRequestCannotBeNil\n\t}\n\n\texch, err := m.exchangeManager.GetExchangeByName(req.Exchange)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &withdraw.Response{\n\t\tExchange: withdraw.ExchangeResponse{\n\t\t\tName: req.Exchange,\n\t\t},\n\t\tRequestDetails: *req,\n\t}\n\n\t// Determines if the currency can be withdrawn from the exchange\n\terrF := exch.CanWithdraw(req.Currency, asset.Spot)\n\tif errF != nil && !errors.Is(errF, currencystate.ErrCurrencyStateNotFound) { // Suppress not found error\n\t\treturn nil, errF\n\t}\n\n\tif m.isDryRun {\n\t\tlog.Warnln(log.Global, \"Dry run enabled, no withdrawal request will be submitted or have an event created\")\n\t\tresp.ID = withdraw.DryRunID\n\t\tresp.Exchange.Status = \"dryrun\"\n\t\tresp.Exchange.ID = withdraw.DryRunID.String()\n\t} else {\n\t\tvar ret *withdraw.ExchangeResponse\n\t\tif req.Type == withdraw.Crypto {\n\t\t\tif !m.portfolioManager.IsWhiteListed(req.Crypto.Address) {\n\t\t\t\treturn nil, withdraw.ErrStrAddressNotWhiteListed\n\t\t\t}\n\t\t\tif !m.portfolioManager.IsExchangeSupported(req.Exchange, req.Crypto.Address) {\n\t\t\t\treturn nil, withdraw.ErrStrExchangeNotSupportedByAddress\n\t\t\t}\n\t\t}\n\t\tif req.Type == withdraw.Fiat {\n\t\t\tret, err = exch.WithdrawFiatFunds(ctx, req)\n\t\t\tif err != nil {\n\t\t\t\tresp.Exchange.Status = err.Error()\n\t\t\t} else {\n\t\t\t\tresp.Exchange.Status = ret.Status\n\t\t\t\tresp.Exchange.ID = ret.ID\n\t\t\t}\n\t\t} else if req.Type == withdraw.Crypto {\n\t\t\tret, err = exch.WithdrawCryptocurrencyFunds(ctx, req)\n\t\t\tif err != nil {\n\t\t\t\tresp.Exchange.Status = err.Error()\n\t\t\t} else {\n\t\t\t\tresp.Exchange.Status = ret.Status\n\t\t\t\tresp.Exchange.ID = ret.ID\n\t\t\t}\n\t\t}\n\t}\n\tdbwithdraw.Event(resp)\n\tif err == nil {\n\t\twithdraw.Cache.Add(resp.ID, resp)\n\t}\n\treturn resp, err\n}", "func testWithdrawZeroBalance(t *testing.T, n int) {\n\trng := pkgtest.Prng(t)\n\ts := test.NewSetup(t, rng, n)\n\t// create valid state and params\n\tparams, state := channeltest.NewRandomParamsAndState(rng, channeltest.WithParts(s.Parts...), channeltest.WithAssets((*ethchannel.Asset)(&s.Asset)), channeltest.WithIsFinal(true))\n\tagreement := state.Balances.Clone()\n\n\tfor i := range params.Parts {\n\t\tif i%2 == 0 {\n\t\t\tstate.Balances[0][i].SetInt64(0)\n\t\t\tagreement[0][i].SetInt64(0)\n\t\t} // is != 0 otherwise\n\t\tt.Logf(\"Part: %d ShouldFund: %t Bal: %v\", i, i%2 == 1, state.Balances[0][i])\n\t}\n\n\t// fund\n\tct := pkgtest.NewConcurrent(t)\n\tfor i, funder := range s.Funders {\n\t\ti, funder := i, funder\n\t\tgo ct.StageN(\"funding loop\", n, func(rt pkgtest.ConcT) {\n\t\t\treq := channel.NewFundingReq(params, state, channel.Index(i), agreement)\n\t\t\trequire.NoError(rt, funder.Fund(context.Background(), *req), \"funding should succeed\")\n\t\t})\n\t}\n\tct.Wait(\"funding loop\")\n\n\t// register\n\treq := channel.AdjudicatorReq{\n\t\tParams: params,\n\t\tAcc: s.Accs[0],\n\t\tTx: testSignState(t, s.Accs, params, state),\n\t\tIdx: 0,\n\t}\n\trequire.NoError(t, s.Adjs[0].Register(context.Background(), req))\n\t// we don't need to wait for a timeout since we registered a final state\n\n\t// withdraw\n\tfor i, _adj := range s.Adjs {\n\t\tadj := _adj\n\t\treq.Acc = s.Accs[i]\n\t\treq.Idx = channel.Index(i)\n\t\t// check that the nonce stays the same for zero balance withdrawals\n\t\tdiff, err := test.NonceDiff(s.Accs[i].Address(), adj, func() error {\n\t\t\treturn adj.Withdraw(context.Background(), req, nil)\n\t\t})\n\t\trequire.NoError(t, err)\n\t\tif i%2 == 0 {\n\t\t\tassert.Zero(t, diff, \"Nonce should stay the same\")\n\t\t} else {\n\t\t\tassert.Equal(t, 1, diff, \"Nonce should increase by 1\")\n\t\t}\n\t}\n\tassertHoldingsZero(context.Background(), t, s.CB, params, state.Assets)\n}", "func (_TokenStakingEscrow *TokenStakingEscrowTransactorSession) Withdraw(operator common.Address) (*types.Transaction, error) {\n\treturn _TokenStakingEscrow.Contract.Withdraw(&_TokenStakingEscrow.TransactOpts, operator)\n}", "func (_TokenStakingEscrow *TokenStakingEscrowCaller) DepositWithdrawnAmount(opts *bind.CallOpts, operator common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _TokenStakingEscrow.contract.Call(opts, out, \"depositWithdrawnAmount\", operator)\n\treturn *ret0, err\n}", "func (_SingleAuto *SingleAutoTransactorSession) Withdraw(_pid *big.Int, _wantAmt *big.Int) (*types.Transaction, error) {\n\treturn _SingleAuto.Contract.Withdraw(&_SingleAuto.TransactOpts, _pid, _wantAmt)\n}", "func (_Cakevault *CakevaultSession) EmergencyWithdraw() (*types.Transaction, error) {\n\treturn _Cakevault.Contract.EmergencyWithdraw(&_Cakevault.TransactOpts)\n}", "func (_Vault *VaultCallerSession) Withdrawed(arg0 [32]byte) (bool, error) {\n\treturn _Vault.Contract.Withdrawed(&_Vault.CallOpts, arg0)\n}", "func (_Vault *VaultTransactor) RequestWithdraw(opts *bind.TransactOpts, incognitoAddress string, token common.Address, amount *big.Int, signData []byte, timestamp []byte) (*types.Transaction, error) {\n\treturn _Vault.contract.Transact(opts, \"requestWithdraw\", incognitoAddress, token, amount, signData, timestamp)\n}", "func (_TokenStakingEscrow *TokenStakingEscrowSession) DepositWithdrawnAmount(operator common.Address) (*big.Int, error) {\n\treturn _TokenStakingEscrow.Contract.DepositWithdrawnAmount(&_TokenStakingEscrow.CallOpts, operator)\n}", "func (_IStakingRewards *IStakingRewardsTransactor) Withdraw(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) {\n\treturn _IStakingRewards.contract.Transact(opts, \"withdraw\", amount)\n}", "func (_IStakingRewards *IStakingRewardsSession) Withdraw(amount *big.Int) (*types.Transaction, error) {\n\treturn _IStakingRewards.Contract.Withdraw(&_IStakingRewards.TransactOpts, amount)\n}", "func (w *xcWallet) UnapproveToken(assetVersion uint32, onConfirm func()) (string, error) {\n\tapprover, ok := w.Wallet.(asset.TokenApprover)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"%s wallet is not a TokenApprover\", unbip(w.AssetID))\n\t}\n\treturn approver.UnapproveToken(assetVersion, onConfirm)\n}", "func (_TokenStakingEscrow *TokenStakingEscrowFilterer) FilterRevokedDepositWithdrawn(opts *bind.FilterOpts, operator []common.Address, grantManager []common.Address) (*TokenStakingEscrowRevokedDepositWithdrawnIterator, error) {\n\n\tvar operatorRule []interface{}\n\tfor _, operatorItem := range operator {\n\t\toperatorRule = append(operatorRule, operatorItem)\n\t}\n\tvar grantManagerRule []interface{}\n\tfor _, grantManagerItem := range grantManager {\n\t\tgrantManagerRule = append(grantManagerRule, grantManagerItem)\n\t}\n\n\tlogs, sub, err := _TokenStakingEscrow.contract.FilterLogs(opts, \"RevokedDepositWithdrawn\", operatorRule, grantManagerRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &TokenStakingEscrowRevokedDepositWithdrawnIterator{contract: _TokenStakingEscrow.contract, event: \"RevokedDepositWithdrawn\", logs: logs, sub: sub}, nil\n}", "func withdraw(res http.ResponseWriter, req *http.Request){\n\tvar result Account\n\n\tcollection := client.Database(Database).Collection(Collection)\n\tparams := url_parser(req.URL.String())\n\tfilter := bson.D{{\"identifier\", clean_string(params[\"account\"])}}\n\terr := collection.FindOne(context.TODO(), filter).Decode(&result)\n\t\n\tchange, err := strconv.ParseFloat(clean_string(params[\"withdrawl\"]), 64)\n\t\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tinitial, err := strconv.ParseFloat(result.Balance, 64)\n\tupdated := strconv.FormatFloat((initial - change), 'f', -1, 64)\n\tresult.Balance = updated\n\n\tif err != nil{\n\t\tfmt.Println(err)\n\t}\n\tentry, err := bson.Marshal(result)\n\t_ , err = collection.ReplaceOne(context.TODO(), filter, entry)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tgenerate_record(clean_string(params[\"account\"]), updated, \"-\"+clean_string(params[\"withdrawl\"]), \"withdrawl\")\n}", "func (a *Account) Withdraw(amount int) error {\n\tif a.balance < amount {\n\t\treturn errorWithdraw\n\t}\n\ta.balance -= amount\n\treturn nil\n}", "func (s *FundServer) Withdraw(amount int) {\n\ts.Transact(func(f *Fund) {\n\t\tf.Withdraw(amount)\n\t})\n}", "func (_Vault *VaultSession) Withdrawed(arg0 [32]byte) (bool, error) {\n\treturn _Vault.Contract.Withdrawed(&_Vault.CallOpts, arg0)\n}", "func (_TokenStakingEscrow *TokenStakingEscrowFilterer) WatchDepositWithdrawn(opts *bind.WatchOpts, sink chan<- *TokenStakingEscrowDepositWithdrawn, operator []common.Address, grantee []common.Address) (event.Subscription, error) {\n\n\tvar operatorRule []interface{}\n\tfor _, operatorItem := range operator {\n\t\toperatorRule = append(operatorRule, operatorItem)\n\t}\n\tvar granteeRule []interface{}\n\tfor _, granteeItem := range grantee {\n\t\tgranteeRule = append(granteeRule, granteeItem)\n\t}\n\n\tlogs, sub, err := _TokenStakingEscrow.contract.WatchLogs(opts, \"DepositWithdrawn\", operatorRule, granteeRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(TokenStakingEscrowDepositWithdrawn)\n\t\t\t\tif err := _TokenStakingEscrow.contract.UnpackLog(event, \"DepositWithdrawn\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (a *Account) Withdraw(amount int) error {\n\tif amount > a.Balance {\n\t\treturn fmt.Errorf(\"account: not enough funds\")\n\t}\n\n\ta.Balance -= amount\n\n\treturn nil\n}", "func (w *Wallet) Withdraw(amount Shivcoin) error {\n\tif amount < w.balance {\n\t\tw.balance -= amount\n\t\treturn nil\n\t} else {\n\t\treturn ErrInsufficientFunds\n\t}\n}" ]
[ "0.6124915", "0.60527366", "0.59476393", "0.5926781", "0.5890346", "0.5857869", "0.58281296", "0.57886124", "0.578397", "0.57604176", "0.5714432", "0.5687001", "0.56778246", "0.56577224", "0.5635493", "0.56080854", "0.559661", "0.55527294", "0.5551587", "0.5543905", "0.5539392", "0.55374396", "0.55363804", "0.5533404", "0.55154246", "0.5508218", "0.5497701", "0.5487953", "0.54799235", "0.54728186", "0.5467871", "0.54674006", "0.54667133", "0.5464994", "0.54539114", "0.54503506", "0.5436821", "0.54330456", "0.5428306", "0.542579", "0.5420984", "0.54146737", "0.5411993", "0.53700924", "0.53515595", "0.5348653", "0.53340477", "0.533228", "0.5323173", "0.53192323", "0.53191894", "0.5305408", "0.5304224", "0.530391", "0.52643543", "0.5259291", "0.5241931", "0.5236473", "0.52205503", "0.5217555", "0.5215931", "0.5205591", "0.51945347", "0.5185779", "0.5185779", "0.5184571", "0.5182847", "0.51784223", "0.51780194", "0.5176221", "0.5175564", "0.51590073", "0.51519847", "0.51404256", "0.513949", "0.5138581", "0.51213443", "0.5115558", "0.51021975", "0.5102178", "0.50989443", "0.50784", "0.5074792", "0.50671244", "0.506666", "0.50630444", "0.50589705", "0.50484467", "0.50454444", "0.5044557", "0.5044076", "0.5036666", "0.5035343", "0.50346327", "0.50328296", "0.5030264", "0.50237995", "0.5011292", "0.50091666", "0.50075215" ]
0.7984501
0
VerifyBSB will verify a BSB number and send back the current status of that BSB
func (as *AddressService) VerifyBSB(bsb string) (*BSBStatus, error) { var bsbStatus BSBStatus if err := as.client.Get(buildString("address/withdraw/bsb-verify/", bsb), &bsbStatus); err != nil { return nil, err } return &bsbStatus, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (api *API) fBVerificationExists(token string) (fbid uint64, err error) {\n\ts, err := api.sc.Prepare(\"SELECT fb_id FROM facebook_verification WHERE token = ?\")\n\tif err != nil {\n\t\treturn\n\t}\n\terr = s.QueryRow(token).Scan(&fbid)\n\tif err == sql.ErrNoRows {\n\t\terr = NoSuchVerificationToken\n\t}\n\treturn\n}", "func verify(srvChan chan string, channel, nick, hostname string, args []string) {\n\tmessage := \"NOTICE \" + channel + \" :\"\n\tif len(args) != 2 {\n\t\tmessage = \"NOTICE \" + channel + \" :ERROR: Invalid number of arguments\"\n\t} else {\n\t\tuname := args[0]\n\t\tpin := args[1]\n\t\treply := cmdDb.Cmd(\"get\", uname+\"Pin\")\n\t\tpinDb, err := (reply.Bytes())\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t\treturn\n\t\t}\n\t\tif string(pinDb) == pin {\n\t\t\tmessage += \"You are now verified as \" + uname\n\t\t\tcmdDb.Cmd(\"set\", uname+\"Host\", hostname)\n\t\t\tcmdDb.Cmd(\"set\", uname+\"Pin\", fmt.Sprintf(\"%06d\", rand.Intn(1000000)))\n\t\t} else {\n\t\t\tmessage += \"PIN does not match that of \" + uname\n\t\t}\n\t}\n\tlog.Println(message)\n\tsrvChan <- message\n}", "func BebGetSuccess(w http.ResponseWriter, name string) {\n\tw.WriteHeader(http.StatusOK)\n\ts := bebtypes.Subscription{\n\t\tName: name,\n\t\tSubscriptionStatus: bebtypes.SubscriptionStatusActive,\n\t}\n\terr := json.NewEncoder(w).Encode(s)\n\tExpect(err).ShouldNot(HaveOccurred())\n}", "func (s *BaseEvent) BSuccess() bool {\n if !s.sysParamsExtracted { panic(\"!s.sysParamsExtracted\"); }\n return (s.sysParams.BSuccess == 1)\n}", "func TestBirdShowStatus(t *testing.T) {\n\tout := \"1000-BIRD 1.6.4\\n\" +\n\t\t\"1011-Router ID is 192.168.1.9\\n\" +\n\t\t\" Current server time is 2018-12-27 12:15:01\\n\" +\n\t\t\" Last reboot on 2018-12-21 12:35:11\\n\" +\n\t\t\" Last reconfiguration on 2018-12-21 12:35:11\\n\" +\n\t\t\"0013 Daemon is up and running\\n\"\n\tcompleted := containsActionCompletedCode([]byte(out))\n\n\tassert.True(\"'show status' successfully completed\", completed, t)\n}", "func (bbs *BBSG2Pub) Verify(messages [][]byte, sigBytes, pubKeyBytes []byte) error {\n\tsignature, err := ParseSignature(sigBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse signature: %w\", err)\n\t}\n\n\tpublicKey, err := UnmarshalPublicKey(pubKeyBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse public key: %w\", err)\n\t}\n\n\tmessagesFr := make([]*SignatureMessage, len(messages))\n\tfor i := range messages {\n\t\tmessagesFr[i], err = ParseSignatureMessage(messages[i])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"parse signature message %d: %w\", i+1, err)\n\t\t}\n\t}\n\n\tp1 := signature.GetPoint().ToAffine()\n\tq1 := bls.G2ProjectiveOne.\n\t\tMulFR(signature.E.ToRepr()).\n\t\tAdd(publicKey.GetPoint())\n\n\tp2, err := getB(signature.S, messagesFr, publicKey)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"get B point: %w\", err)\n\t}\n\n\tif compareTwoPairings(p1.ToProjective(), q1, p2.ToProjective(), bls.G2ProjectiveOne) {\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"BLS12-381: invalid signature\")\n}", "func (b *backend) Verify(addr wallet.Address, _params *channel.Params, state *channel.State, sig []byte) (bool, error) {\n\tif err := state.Valid(); err != nil {\n\t\treturn false, errors.Wrap(err, \"verifying invalid state\")\n\t}\n\tlog.WithFields(log.Fields{\"channel\": state.ID, \"version\": state.Version}).Tracef(\"Verifying state\")\n\n\tbuff := new(bytes.Buffer)\n\tif err := state.Encode(buff); err != nil {\n\t\treturn false, errors.WithMessage(err, \"pack state\")\n\t}\n\treturn wallet.VerifySignature(buff.Bytes(), sig, addr)\n}", "func wbbVerify(curve *math.Curve, pk *math.G2, sig *math.G1, m *math.Zr) error {\n\tif pk == nil || sig == nil || m == nil {\n\t\treturn errors.Errorf(\"Weak-BB signature invalid: received nil input\")\n\t}\n\t// Set P = pk * g2^m\n\tP := curve.NewG2()\n\tP.Clone(pk)\n\tP.Add(curve.GenG2.Mul(m))\n\tP.Affine()\n\t// check that e(sig, pk * g2^m) = e(g1, g2)\n\tif !curve.FExp(curve.Pairing(P, sig)).Equals(curve.GenGt) {\n\t\treturn errors.Errorf(\"Weak-BB signature is invalid\")\n\t}\n\treturn nil\n}", "func (cgs *CmdGetSlotNumWithRegNum) Verify() error {\n\tif perror.Empty == cgs.RegistrationNumber {\n\t\treturn perror.ErrInvalidParams\n\t}\n\treturn nil\n}", "func (mb *tcpPackager) Verify(aduRequest []byte, aduResponse []byte) error {\n\treturn verify(aduRequest, aduResponse)\n}", "func (api *Staytus) VerifySubscriber(email string) (bool, error) {\n\trequest := &request{\n\t\tSubscriberEmail: email,\n\t}\n\tbody, err := json.Marshal(request)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdata, err := api.post(\"api/v1/subscribers/verify\", body)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tvar result bool\n\tif err := json.Unmarshal(data, &result); err != nil {\n\t\treturn false, err\n\t}\n\treturn result, nil\n}", "func (s BlockchainStatus) Verify() error {\n\tswitch s {\n\tcase UnknownChain, Created, Preferred, Validating, Syncing:\n\t\treturn nil\n\tdefault:\n\t\treturn errUnknownBlockchainStatus\n\t}\n}", "func (_Ethdkg *EthdkgCaller) Verify(opts *bind.CallOpts, message []byte, sig [2]*big.Int, pubK [4]*big.Int) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Ethdkg.contract.Call(opts, out, \"Verify\", message, sig, pubK)\n\treturn *ret0, err\n}", "func (c *Client) VerifyRoutingNumber(data string) (map[string]interface{}, error) {\n\tlog.info(\"========== VERIFY ROUTING NUMBER ==========\")\n\turl := buildURL(\"routing-number-verification\")\n\n\treturn c.do(\"POST\", url, data, nil)\n}", "func (client *Client) VerifyBankElement(request *VerifyBankElementRequest) (response *VerifyBankElementResponse, err error) {\n\tresponse = CreateVerifyBankElementResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (mb *rtuPackager) Verify(aduRequest []byte, aduResponse []byte) (err error) {\n\tlength := len(aduResponse)\n\t// Minimum size (including address, function and CRC)\n\tif length < rtuMinSize {\n\t\terr = fmt.Errorf(\"modbus: response length '%v' does not meet minimum '%v'\", length, rtuMinSize)\n\t\treturn\n\t}\n\t// Slave address must match\n\tif aduResponse[0] != aduRequest[0] {\n\t\terr = fmt.Errorf(\"modbus: response slave id '%v' does not match request '%v'\", aduResponse[0], aduRequest[0])\n\t\treturn\n\t}\n\treturn\n}", "func (s *SCIONBoxController) checkHBStatus(isd addr.ISD, As addr.AS) {\n\ttime.Sleep(HeartBeatPeriod * time.Second)\n\tfor true {\n\t\tslas, err := models.FindSCIONLabASByIAInt(isd, As)\n\t\tif err != nil {\n\t\t\tif err == orm.ErrNoRows {\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif slas.Status == models.Removed {\n\t\t\treturn\n\t\t}\n\t\tdelta := time.Now().Sub(slas.Updated)\n\t\tif delta.Seconds() > float64(HeartBeatLimit*HeartBeatPeriod) {\n\t\t\tif slas.Status != models.Inactive {\n\t\t\t\tlog.Printf(\"AS Status set to inactive, AS: %v, Time since last HB: %v\", slas, delta)\n\t\t\t\tslas.Status = models.Inactive\n\t\t\t\tslas.Update()\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(HeartBeatPeriod * time.Second)\n\t}\n}", "func Verify(raw []byte, hmacSecret *[32]byte) (*refs.MessageRef, *DeserializedMessage, error) {\n\tenc, err := EncodePreserveOrder(raw)\n\tif err != nil {\n\t\tif len(raw) > 15 {\n\t\t\traw = raw[:15]\n\t\t}\n\t\treturn nil, nil, fmt.Errorf(\"ssb Verify: could not encode message (%q): %w\", raw, err)\n\t}\n\n\t// destroys it for the network layer but makes it easier to access its values\n\tvar dmsg DeserializedMessage\n\tif err := json.Unmarshal(raw, &dmsg); err != nil {\n\t\tif len(raw) > 15 {\n\t\t\traw = raw[:15]\n\t\t}\n\t\treturn nil, nil, fmt.Errorf(\"ssb Verify: could not json.Unmarshal message (%q): %w\", raw, err)\n\t}\n\n\twoSig, sig, err := ExtractSignature(enc)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"ssb Verify(%s:%d): could not extract signature: %w\", dmsg.Author.Ref(), dmsg.Sequence, err)\n\t}\n\n\tif hmacSecret != nil {\n\t\tmac := auth.Sum(woSig, hmacSecret)\n\t\twoSig = mac[:]\n\t}\n\n\tif err := sig.Verify(woSig, &dmsg.Author); err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"ssb Verify(%s:%d): could not verify message: %w\", dmsg.Author.Ref(), dmsg.Sequence, err)\n\t}\n\n\t// hash the message - it's sadly the internal string rep of v8 that get's hashed, not the json string\n\tv8warp, err := InternalV8Binary(enc)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"ssb Verify(%s:%d): could hash convert message: %w\", dmsg.Author.Ref(), dmsg.Sequence, err)\n\t}\n\th := sha256.New()\n\tio.Copy(h, bytes.NewReader(v8warp))\n\n\tmr := refs.MessageRef{\n\t\tHash: h.Sum(nil),\n\t\tAlgo: refs.RefAlgoMessageSSB1,\n\t}\n\treturn &mr, &dmsg, nil\n}", "func inspectSBRStatus(\n\tctx context.Context,\n\tt *testing.T,\n\tf *framework.Framework,\n\tnamespacedName types.NamespacedName,\n) {\n\terr := retry(10, 5*time.Second, func() error {\n\t\tt.Logf(\"Inspecting SBR: '%s'\", namespacedName)\n\t\terr := assertSBRStatus(ctx, f, namespacedName)\n\t\tif err != nil {\n\t\t\tt.Logf(\"Error on inspecting SBR: '%#v'\", err)\n\t\t}\n\t\treturn err\n\t})\n\tt.Logf(\"SBR-Status: Result after attempts, error: '%#v'\", err)\n\trequire.NoError(t, err)\n}", "func SendVerificationSMS(phone string) bool {\n\n\t//generate/save to vc table\n\tvc, result := services.CreateVcRecord(phone, session)\n\tif result == false {\n\t\tlog.Println(\"sending verification failed cause of VC service failur\")\n\t\treturn false\n\t}\n\n\t//send\n\torigin := structs.SmsOrigin{From: \"10001398\", ApiKey: \"ED09D0D7-5FBA-43A2-8B9D-F0AE79666B52\"}\n\tSmsErr := services.SendSms(vc, phone, origin)\n\tif SmsErr != true {\n\t\tlog.Println(\"User Submition Failed Cause Of SMS service Error:008\")\n\t\tlog.Println(SmsErr)\n\t\tlog.Println(\"End <=008\")\n\t\treturn false\n\t}\n\n\treturn true\n\n}", "func CheckPaymentToBeta() {\n\tbrokerTxs, _ := models.GetTransactionsBySessionTypesAndPaymentStatuses([]int{},\n\t\t[]models.PaymentStatus{models.BrokerTxBetaPaymentPending})\n\n\tfor _, brokerTx := range brokerTxs {\n\t\tbalance := EthWrapper.CheckPRLBalance(eth_gateway.StringToAddress(brokerTx.ETHAddrBeta))\n\t\texpectedBalance := new(big.Int).Quo(brokerTx.GetTotalCostInWei(), big.NewInt(int64(2)))\n\t\tif balance.Int64() > 0 && balance.Int64() >= expectedBalance.Int64() {\n\t\t\tpreviousBetaPaymentStatus := brokerTx.PaymentStatus\n\t\t\tbrokerTx.PaymentStatus = models.BrokerTxBetaPaymentConfirmed\n\t\t\terr := models.DB.Save(&brokerTx)\n\t\t\tif err != nil {\n\t\t\t\toyster_utils.LogIfError(err, nil)\n\t\t\t\tbrokerTx.PaymentStatus = previousBetaPaymentStatus\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif brokerTx.Type == models.SessionTypeBeta {\n\t\t\t\tReportGoodAlphaToDRS(brokerTx)\n\t\t\t}\n\t\t\toyster_utils.LogToSegment(\"check_beta_payments: CheckPaymentToBeta - beta_confirmed\",\n\t\t\t\tanalytics.NewProperties().\n\t\t\t\t\tSet(\"beta_address\", brokerTx.ETHAddrBeta).\n\t\t\t\t\tSet(\"alpha_address\", brokerTx.ETHAddrAlpha))\n\t\t}\n\t}\n}", "func (c *Client) BPI(bpi int) error {\n\tvar args []byte\n\n\targs = combine(args, \"BPI \")\n\targs = combine(args, strconv.Itoa(bpi))\n\n\treturn c.Send(MESSAGE_CLIENT_SET_CHANNEL_INFO, args)\n}", "func (byb *TokenByb) checkBybToken() (smcError smc.Error) {\n\tsmcError.ErrorCode = bcerrors.ErrCodeOK\n\n\t// get token address by name and symbol\n\taddr1, bcerr1 := byb.getTokenAddrByName(BybName)\n\taddr2, bcerr2 := byb.getTokenAddrBySymbol(bybSymbol)\n\tif bcerr1.ErrorCode != bcerrors.ErrCodeOK {\n\t\treturn bcerr1\n\t}\n\tif bcerr2.ErrorCode != bcerrors.ErrCodeOK {\n\t\treturn bcerr2\n\t}\n\n\tif addr1 == \"\" && addr2 == \"\" {\n\t\tsmcError.ErrorCode = bcerrors.ErrCodeOK\n\t\treturn\n\t}\n\n\t// the address must be equal both addr1 and addr2\n\tif addr1 != addr2 {\n\t\tsmcError.ErrorCode = bcerrors.ErrCodeInterContractsBybInitialized\n\t\treturn\n\t}\n\n\t// get token by address\n\tiToken, err := byb.State.GetToken(addr1)\n\tif err != nil {\n\t\tsmcError.ErrorCode = bcerrors.ErrCodeLowLevelError\n\t\tsmcError.ErrorDesc = err.Error()\n\t\treturn\n\t}\n\tif iToken.Owner != byb.Sender.Addr {\n\t\tsmcError.ErrorCode = bcerrors.ErrCodeInterContractsBybInitialized\n\t\treturn\n\t}\n\n\tif Compare(iToken.TotalSupply, Zero()) != 0 {\n\t\tsmcError.ErrorCode = bcerrors.ErrCodeInterContractsBybInitialized\n\t\treturn\n\t}\n\n\t// get token contract with contract address\n\ttContract, err := byb.State.StateDB.GetContract(iToken.Address)\n\tif err != nil {\n\t\tsmcError.ErrorCode = bcerrors.ErrCodeLowLevelError\n\t\tsmcError.ErrorDesc = err.Error()\n\t\treturn\n\t}\n\n\t// can't set byb contract lose height\n\tif tContract.Address == byb.State.ContractAddress {\n\t\tsmcError.ErrorCode = bcerrors.ErrCodeInterContractsBybInitialized\n\t\treturn\n\t}\n\n\t// get world app state for block height\n\tworldAppState, err := byb.State.StateDB.GetWorldAppState()\n\tif err != nil {\n\t\tsmcError.ErrorCode = bcerrors.ErrCodeLowLevelError\n\t\tsmcError.ErrorDesc = err.Error()\n\t\treturn\n\t}\n\n\t// set byb token contract's lose height with block height\n\ttContract.LoseHeight = uint64(worldAppState.BlockHeight + 1)\n\n\t// update byb token contract\n\terr = byb.State.SetTokenContract(tContract)\n\tif err != nil {\n\t\tsmcError.ErrorCode = bcerrors.ErrCodeLowLevelError\n\t\tsmcError.ErrorDesc = smcError.Error()\n\t\treturn\n\t}\n\n\treturn\n}", "func (cb *callBack) OnVerifyComplete(zcnTxn *zcncore.Transaction, status int) {\n\tcb.sendVerifyCall()\n\n\terr := zcnTxn.GetVerifyError()\n\tif err == \"\" {\n\t\terr = \"no error\"\n\t}\n\n\tlog.Logger.Debug(\"Transaction verified\",\n\t\tzap.String(\"status\", TxnStatus(status).String()),\n\t\tzap.Any(\"txn_hash\", zcnTxn.GetTransactionHash()),\n\t\tzap.String(\"error\", err),\n\t)\n}", "func (_Ethdkg *EthdkgSession) Verify(message []byte, sig [2]*big.Int, pubK [4]*big.Int) (bool, error) {\n\treturn _Ethdkg.Contract.Verify(&_Ethdkg.CallOpts, message, sig, pubK)\n}", "func (dstv Dstv) Status(serialNumber string) (*StatusResponse, error) {\n\tdstv.AddQueryData(paymenex.PActId, \"STATUS\")\n\tdstv.AddQueryData(paymenex.PSerialNumber, serialNumber)\n\txml, err := dstv.MakeRequest()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// writeFile(\"status.xml\", xml) // DEBUG\n\tresponse := new(StatusResponse)\n\tok := dstv.ParseAndVerifyResponse(xml, response)\n\tif !ok {\n\t\treturn response, errors.New(errVerifyMsg)\n\t}\n\treturn response, nil\n}", "func (bft *ProtocolBFTCoSi) waitResponseVerification() (*Response, bool) {\n\tlog.Lvl3(bft.Name(), \"Waiting for response verification:\")\n\t// wait the verification\n\tverified := <-bft.verifyChan\n\n\t// sanity check\n\tif bft.IsLeaf() && len(bft.tempPrepareResponse) != 0 {\n\t\tpanic(\"bft.tempPrepareResponse is not 0 on leaf node\")\n\t}\n\n\tresp, err := bft.prepare.Response(bft.tempPrepareResponse)\n\tif err != nil {\n\t\treturn nil, false\n\t}\n\n\tif !verified {\n\t\t// Add our exception\n\t\tbft.tempExceptions = append(bft.tempExceptions, Exception{\n\t\t\tIndex: bft.index,\n\t\t\tCommitment: bft.prepare.GetCommitment(),\n\t\t})\n\t\t// Don't include our response!\n\t\tresp = bft.Suite().Scalar().Set(resp).Sub(resp, bft.prepare.GetResponse())\n\t\tlog.Lvl3(bft.Name(), \"Response verification: failed\")\n\t}\n\n\t// if we didn't get all the responses, add them to the exception\n\t// 1, find children that are not in tempPrepareResponsePublics\n\t// 2, for the missing ones, find the global index and then add it to the exception\n\tpublicsMap := make(map[kyber.Point]bool)\n\tfor _, p := range bft.tempPrepareResponsePublics {\n\t\tpublicsMap[p] = true\n\t}\n\tfor _, tn := range bft.Children() {\n\t\tif !publicsMap[tn.ServerIdentity.Public] {\n\t\t\t// We assume the server was also not available for the commitment\n\t\t\t// so no need to subtract the commitment.\n\t\t\t// Conversely, we cannot handle nodes which fail right\n\t\t\t// after making a commitment at the moment.\n\t\t\tbft.tempExceptions = append(bft.tempExceptions, Exception{\n\t\t\t\tIndex: tn.RosterIndex,\n\t\t\t\tCommitment: bft.Suite().Point().Null(),\n\t\t\t})\n\t\t}\n\t}\n\n\tr := &Response{\n\t\tTYPE: RoundPrepare,\n\t\tExceptions: bft.tempExceptions,\n\t\tResponse: resp,\n\t}\n\n\tlog.Lvl3(bft.Name(), \"Response verification:\", verified)\n\treturn r, verified\n}", "func test_checkBuriedState(t *testing.T) {\n\n\taddr, _, _ := eth_gateway.EthWrapper.GenerateEthAddr()\n\n\tburied, err := eth_gateway.EthWrapper.CheckBuriedState(addr)\n\n\tif err != nil {\n\t\tt.Fatal(\"Failed to check the bury state of the given address.\")\n\t} else {\n\t\tresult := \"false\"\n\t\tif buried {\n\t\t\tresult = \"true\"\n\t\t}\n\t\tt.Log(\"Successfully checked bury state: \" + result)\n\t}\n}", "func (c Channel) Verify(r *http.Request) error {\n\ttemp := verifyPassword{}\n\te := c.bodyStruct(r, &temp)\n\n\tif e != nil {\n\t\treturn e\n\t}\n\n\t// create a channel out of the validation object\n\tchannel := are_hub.NewChannel(\"\", temp.Password)\n\n\t// insert the channel into r's context\n\t*r = *r.WithContext(channel.ToCtx(r.Context()))\n\n\treturn nil\n}", "func (_Ethdkg *EthdkgCallerSession) Verify(message []byte, sig [2]*big.Int, pubK [4]*big.Int) (bool, error) {\n\treturn _Ethdkg.Contract.Verify(&_Ethdkg.CallOpts, message, sig, pubK)\n}", "func BebListSuccess(w http.ResponseWriter) {\n\tw.WriteHeader(http.StatusAccepted)\n\tresponse := bebtypes.Response{\n\t\tStatusCode: http.StatusOK,\n\t\tMessage: \"\",\n\t}\n\terr := json.NewEncoder(w).Encode(response)\n\tExpect(err).ShouldNot(HaveOccurred())\n}", "func TestOAuthVerifyState(t *testing.T) {\n\tservice := NewOAuth2Service(testClientID, testClientSecret, testScopes, testTokenURL, testAuthURL)\n\tservice.ExchangeAuthCodeForToken(testCode)\n}", "func (_Flytrap *FlytrapCaller) VerifySub(opts *bind.CallOpts, person common.Address, topic string) (bool, [2]byte, bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t\tret1 = new([2]byte)\n\t\tret2 = new(bool)\n\t)\n\tout := &[]interface{}{\n\t\tret0,\n\t\tret1,\n\t\tret2,\n\t}\n\terr := _Flytrap.contract.Call(opts, out, \"verifySub\", person, topic)\n\treturn *ret0, *ret1, *ret2, err\n}", "func STATUSBABYNAME(v string) predicate.Babystatus {\n\treturn predicate.Babystatus(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldSTATUSBABYNAME), v))\n\t})\n}", "func (api *API) FBissueVerification(fbid uint64) (err error) {\n\temail, err := api.fBGetEmail(fbid)\n\tif err != nil {\n\t\treturn\n\t}\n\trandom, err := randomString()\n\tif err != nil {\n\t\treturn\n\t}\n\terr = api.createFBVerification(fbid, random)\n\tif err != nil {\n\t\treturn\n\t}\n\tfbtoken, err := api.fbToken(fbid)\n\tif err != nil {\n\t\treturn\n\t}\n\tfirstName, _, _, err := fBName(fbid, fbtoken)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = api.issueVerificationEmail(email, firstName, random)\n\treturn\n}", "func TestIncompleteBirdShowStatus(t *testing.T) {\n\tout := \"1011-Router ID is 192.168.1.9\\n\" +\n\t\t\" Current server time is 2018-12-27 12:15:01\\n\" +\n\t\t\" Last reboot on 2018-12-21 12:35:11\\n\"\n\tcompleted := containsActionCompletedCode([]byte(out))\n\n\tassert.False(\"'show status' successfully completed\", completed, t)\n}", "func (c *CPU6502) bvs() uint8 {\n\tif c.getFlag(flagV) == 1 {\n\t\tc.branch()\n\t}\n\treturn 0\n}", "func (api *API) fBSetVerified(email string, fbuser uint64) (id gp.UserID, err error) {\n\tid, err = api.userWithEmail(email)\n\tif err != nil {\n\t\tlog.Println(\"There isn't a user with this facebook email\")\n\t\tid, err = api.createUserFromFB(fbuser, email)\n\t\treturn\n\t}\n\terr = api.userSetFB(id, fbuser)\n\tif err == nil {\n\t\terr = api.verify(id)\n\t\tif err == nil {\n\t\t\tlog.Println(\"Verifying worked. Now setting networks from invites...\")\n\t\t\terr = api.acceptAllInvites(id, email)\n\t\t}\n\t}\n\treturn\n}", "func (cpu *Mos6502) bvc() uint8 {\n\tif cpu.GetStatusFlag(V) == 0 {\n\t\tcpu.branch()\n\t}\n\treturn 0\n}", "func (ckmgr *CheckpointManager) getVBTimestampForVB(vbno uint16, ckptDoc *metadata.CheckpointsDoc, max_seqno uint64) (*base.VBTimestamp, error) {\n\tvar agreeedIndex int = -1\n\n\t//do checkpointing only when the remote bucket supports xdcrcheckpointing\n\t//get the existing checkpoint records if they exist, otherwise return an empty ckpt record\n\tckpt_list := ckmgr.ckptRecords(ckptDoc, vbno)\n\tfor index, ckpt_record := range ckpt_list {\n\t\tif ckpt_record != nil && ckpt_record.Seqno <= max_seqno {\n\t\t\tremote_vb_status := &service_def.RemoteVBReplicationStatus{VBOpaque: ckpt_record.Target_vb_opaque,\n\t\t\t\tVBSeqno: ckpt_record.Target_Seqno,\n\t\t\t\tVBNo: vbno}\n\n\t\t\tbMatch := false\n\t\t\tbMatch, current_remoteVBOpaque, err := ckmgr.capi_svc.PreReplicate(ckmgr.remote_bucket, remote_vb_status, ckmgr.support_ckpt)\n\t\t\t//remote vb topology changed\n\t\t\t//udpate the vb_uuid and try again\n\t\t\tif err == nil {\n\t\t\t\tckmgr.updateCurrentVBOpaque(vbno, current_remoteVBOpaque)\n\t\t\t\tckmgr.logger.Debugf(\"%v Remote vbucket %v has a new opaque %v, update\\n\", ckmgr.pipeline.Topic(), current_remoteVBOpaque, vbno)\n\t\t\t\tckmgr.logger.Debugf(\"%v Done with _pre_prelicate call for %v for vbno=%v, bMatch=%v\", ckmgr.pipeline.Topic(), remote_vb_status, vbno, bMatch)\n\t\t\t}\n\n\t\t\tif err != nil || bMatch {\n\t\t\t\tif bMatch {\n\t\t\t\t\tckmgr.logger.Debugf(\"%v Remote bucket %v vbno %v agreed on the checkpoint %v\\n\", ckmgr.pipeline.Topic(), ckmgr.remote_bucket, vbno, ckpt_record)\n\t\t\t\t\tif ckptDoc != nil {\n\t\t\t\t\t\tagreeedIndex = index\n\t\t\t\t\t}\n\n\t\t\t\t} else if err == service_def.NoSupportForXDCRCheckpointingError {\n\t\t\t\t\tckmgr.updateCurrentVBOpaque(vbno, nil)\n\t\t\t\t\tckmgr.logger.Infof(\"%v Remote vbucket %v is on a old node which doesn't support checkpointing, update target_vb_uuid=0\\n\", ckmgr.pipeline.Topic(), vbno)\n\n\t\t\t\t} else {\n\t\t\t\t\tckmgr.logger.Errorf(\"%v Pre_replicate failed for %v. err=%v\\n\", ckmgr.pipeline.Topic(), vbno, err)\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tgoto POPULATE\n\t\t\t}\n\t\t}\n\t}\nPOPULATE:\n\treturn ckmgr.populateVBTimestamp(ckptDoc, agreeedIndex, vbno), nil\n}", "func TestSuccessful(t *testing.T) {\n\tcontroller := newTestController(t)\n\n\t// First try creating an invoice\n\tinv := load_scaffold_invoice(t, \"valid_v1\")\n\t_, err := controller.Client.CreateInvoice(inv)\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to create invoice: %s\", err)\n\t}\n\n\t// Now create the parcel associated with that invoice\n\tdata := load_scaffold_parcel_data(t, \"valid_v1\", \"parcel\")\n\tif err := controller.Client.CreateParcel(inv.Name(), inv.Parcel[0].Label.SHA256, data); err != nil {\n\t\tt.Fatalf(\"Unable to create parcel: %s\", err)\n\t}\n\n\t// Now see if we get the parcel back from the server\n\tserverData, err := controller.Client.GetParcel(inv.Name(), inv.Parcel[0].Label.SHA256)\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to fetch parcel from server: %s\", err)\n\t}\n\n\tif !reflect.DeepEqual(data, serverData) {\n\t\tt.Fatalf(\"Did not get back valid data from the server\\nExpected: %s\\nGot: %s\", data, serverData)\n\t}\n\n\t// Now try yanking the parcel and fetching it to make sure it gives us an error\n\tif err := controller.Client.YankInvoice(inv.Name()); err != nil {\n\t\tt.Fatalf(\"Unable to yank invoice: %s\", err)\n\t}\n\n\t_, err = controller.Client.GetInvoice(inv.Name())\n\tif err == nil {\n\t\tt.Fatal(\"Shouldn't be able to get a yanked invoice\")\n\t}\n\n\t// Get the yanked invoice and make sure it works\n\t_, err = controller.Client.GetYankedInvoice(inv.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"Should be able to get a yanked invoice: %s\", err)\n\t}\n}", "func validateBIDS(valroot, resdir string) error {\n\tsrvcfg := config.Read()\n\tvar validateNifti bool\n\n\t// Use validation config file if available\n\tcfgpath := filepath.Join(valroot, srvcfg.Label.ValidationConfigFile)\n\tlog.ShowWrite(\"[Info] looking for config file at %q\", cfgpath)\n\tif fi, err := os.Stat(cfgpath); err == nil && !fi.IsDir() {\n\t\tvalcfg, err := handleValidationConfig(cfgpath)\n\t\tif err == nil {\n\t\t\tcheckdir := filepath.Join(valroot, valcfg.Bidscfg.BidsRoot)\n\t\t\tif fi, err = os.Stat(checkdir); err == nil && fi.IsDir() {\n\t\t\t\tvalroot = checkdir\n\t\t\t\tlog.ShowWrite(\"[Info] using validation root directory: %s; %s\", valroot, checkdir)\n\t\t\t} else {\n\t\t\t\tlog.ShowWrite(\"[Error] reading validation root directory: %s\", err.Error())\n\t\t\t}\n\t\t\tvalidateNifti = valcfg.Bidscfg.ValidateNifti\n\t\t} else {\n\t\t\tlog.ShowWrite(\"[Error] unmarshalling validation config file: %s\", err.Error())\n\t\t}\n\t} else {\n\t\tlog.ShowWrite(\"[Info] no validation config file found or processed, running from repo root: %s\", err.Error())\n\t}\n\n\t// Ignoring NiftiHeaders for now, since it seems to be a common error\n\toutBadge := filepath.Join(resdir, srvcfg.Label.ResultsBadge)\n\tlog.ShowWrite(\"[Info] running bids validation: '%s %t --json %s'\", srvcfg.Exec.BIDS, validateNifti, valroot)\n\n\t// Make sure the validator arguments are in the right order\n\tvar args []string\n\tif !validateNifti {\n\t\targs = append(args, \"--ignoreNiftiHeaders\")\n\t}\n\targs = append(args, \"--json\")\n\targs = append(args, valroot)\n\n\tvar out, serr bytes.Buffer\n\tcmd := exec.Command(srvcfg.Exec.BIDS, args...)\n\tout.Reset()\n\tserr.Reset()\n\tcmd.Stdout = &out\n\tcmd.Stderr = &serr\n\terr := cmd.Run()\n\t// Only return if the error is not related to the bids validation; if the out string contains information\n\t// we can continue\n\tif err != nil && !strings.Contains(out.String(), \"QUICK_VALIDATION_FAILED\") {\n\t\treturn fmt.Errorf(\"[Error] running bids validation (%s): %q, %q, %q\", valroot, err.Error(), serr.String(), out.String())\n\t}\n\n\t// We need this for both the writing of the result and the badge\n\toutput := out.Bytes()\n\n\t// CHECK: can this lead to a race condition, if a job for the same user/repo combination is started twice in short succession?\n\toutFile := filepath.Join(resdir, srvcfg.Label.ResultsFile)\n\terr = ioutil.WriteFile(outFile, []byte(output), os.ModePerm)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"[Error] writing results file for %q\", valroot)\n\t}\n\n\t// Write proper badge according to result\n\tcontent := resources.SuccessBadge\n\tvar parseBIDS BidsRoot\n\terr = json.Unmarshal(output, &parseBIDS)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"[Error] unmarshalling results json: %s\", err.Error())\n\t}\n\n\tif len(parseBIDS.Issues.Errors) > 0 {\n\t\tcontent = resources.ErrorBadge\n\t} else if len(parseBIDS.Issues.Warnings) > 0 {\n\t\tcontent = resources.WarningBadge\n\t}\n\n\terr = ioutil.WriteFile(outBadge, []byte(content), os.ModePerm)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"[Error] writing results badge for %q\", valroot)\n\t}\n\n\tlog.ShowWrite(\"[Info] finished validating repo at %q\", valroot)\n\treturn nil\n}", "func (l *LNCChallenger) VerifyInvoiceStatus(hash lntypes.Hash,\n\tstate lnrpc.Invoice_InvoiceState, timeout time.Duration) error {\n\n\treturn l.lndChallenger.VerifyInvoiceStatus(hash, state, timeout)\n}", "func GetMbVerificationCode(c *fiber.Ctx) error {\n\t// id := middleware.GetIdFromCookie(c)\n\tid := c.Params(\"id\")\n\tvar request dto.NewMobileVerificationRequest\n\tif err := c.BodyParser(&request); err != nil {\n\t\treturn err\n\t} else {\n\t\tappErr := service.GetMobileVerificationCode(request, id)\n\t\tif appErr.Status != true {\n\t\t\tgetStatus(c, 500, appErr)\n\t\t} else {\n\t\t\tgetStatus(c, 200, appErr)\n\t\t}\n\t}\n\treturn nil\n}", "func Verify(suite suites.Suite, X kyber.Point, msg, sig []byte) error {\n\tHM := hashToPoint(suite, msg)\n\ts := suite.G1().Point()\n\tif err := s.UnmarshalBinary(sig); err != nil {\n\t\treturn err\n\t}\n\ts.Neg(s)\n\tif !suite.PairingCheck([]kyber.Point{s, HM}, []kyber.Point{suite.G2().Point().Base(), X}) {\n\t\treturn errors.New(\"bls: invalid signature\")\n\t}\n\treturn nil\n}", "func (c *client) VerifyStatus() error {\n\t//ensure k3d is in PATH\n\tif _, err := c.pathLooker.Look(binaryName); err != nil {\n\t\tif c.verbose {\n\t\t\tfmt.Printf(\"Command '%s' not found in PATH\", binaryName)\n\t\t}\n\t\treturn fmt.Errorf(\"Command '%s' not found. Please install %s (see https://github.com/rancher/k3d#get)\", binaryName, binaryName)\n\t}\n\n\tif err := c.checkVersion(); err != nil {\n\t\treturn err\n\t}\n\n\t// execute a command and return the error\n\t_, err := c.runCmd(\"cluster\", \"list\")\n\treturn err\n}", "func (s *TXPoolServer) verifyBlock(req *tc.VerifyBlockReq, sender *actor.PID) {\n\tif req == nil || len(req.Txs) == 0 {\n\t\treturn\n\t}\n\n\ts.setHeight(req.Height)\n\ts.pendingBlock.mu.Lock()\n\tdefer s.pendingBlock.mu.Unlock()\n\n\ts.pendingBlock.sender = sender\n\ts.pendingBlock.height = req.Height\n\ts.pendingBlock.processedTxs = make(map[common.Uint256]*tc.VerifyTxResult, len(req.Txs))\n\ts.pendingBlock.unProcessedTxs = make(map[common.Uint256]*tx.Transaction, 0)\n\n\ttxs := make(map[common.Uint256]bool, len(req.Txs))\n\n\t// Check whether a tx's gas price is lower than the required, if yes,\n\t// just return error\n\tfor _, t := range req.Txs {\n\t\tif t.GasPrice < s.gasPrice {\n\t\t\tentry := &tc.VerifyTxResult{\n\t\t\t\tHeight: s.pendingBlock.height,\n\t\t\t\tTx: t,\n\t\t\t\tErrCode: errors.ErrGasPrice,\n\t\t\t}\n\t\t\ts.pendingBlock.processedTxs[t.Hash()] = entry\n\t\t\ts.sendBlkResult2Consensus()\n\t\t\treturn\n\t\t}\n\t\t// Check whether double spent\n\t\tif _, ok := txs[t.Hash()]; ok {\n\t\t\tentry := &tc.VerifyTxResult{\n\t\t\t\tHeight: s.pendingBlock.height,\n\t\t\t\tTx: t,\n\t\t\t\tErrCode: errors.ErrDoubleSpend,\n\t\t\t}\n\t\t\ts.pendingBlock.processedTxs[t.Hash()] = entry\n\t\t\ts.sendBlkResult2Consensus()\n\t\t\treturn\n\t\t}\n\t\ttxs[t.Hash()] = true\n\t}\n\n\tcheckBlkResult := s.txPool.GetUnverifiedTxs(req.Txs, req.Height)\n\n\tfor _, t := range checkBlkResult.UnverifiedTxs {\n\t\ts.assignTxToWorker(t, tc.NilSender, nil)\n\t\ts.pendingBlock.unProcessedTxs[t.Hash()] = t\n\t}\n\n\tfor _, t := range checkBlkResult.OldTxs {\n\t\ts.reVerifyStateful(t, tc.NilSender)\n\t\ts.pendingBlock.unProcessedTxs[t.Hash()] = t\n\t}\n\n\tfor _, t := range checkBlkResult.VerifiedTxs {\n\t\ts.pendingBlock.processedTxs[t.Tx.Hash()] = t\n\t}\n\n\t/* If all the txs in the blocks are verified, send response\n\t * to the consensus directly\n\t */\n\tif len(s.pendingBlock.unProcessedTxs) == 0 {\n\t\ts.sendBlkResult2Consensus()\n\t}\n}", "func VerificationHandler(w http.ResponseWriter, r *http.Request) {\n\tkey := r.URL.Query().Get(\"key\")\n\t// If we have something in the ?key= query field...\n\tif key != \"\" {\n\t\tsess, err := db.GetDbSession()\n\t\tif err != nil {\n\t\t\tpanic(\"test\")\n\t\t}\n\n\t\tuser := &models.User{}\n\t\tc := sess.Database(\"releasetrackr\").Collection(\"users\")\n\n\t\t// Find the user that the verification field corresponds to\n\t\tuserErr := c.FindOne(\n\t\t\tcontext.Background(),\n\t\t\tbson.M{\n\t\t\t\t\"verificationcode\": key,\n\t\t\t\t\"verified\": false,\n\t\t\t}).Decode(&user)\n\n\t\t// If it's invalid, display an error back to the user\n\t\tif userErr != nil {\n\t\t\tjson, _ := json.Marshal(&responses.ErrorResponse{\n\t\t\t\tCode: 400,\n\t\t\t\tError: \"The token you want to verify is invalid\",\n\t\t\t})\n\t\t\tw.WriteHeader(400)\n\t\t\tw.Write(json)\n\t\t\tlog.Printf(\"[Handler][VerificationHandler] Verification token fail: %s\", r.RemoteAddr)\n\t\t\treturn\n\t\t}\n\n\t\t// If not, we'll set the verified field to true\n\t\tchange := bson.M{\n\t\t\t\"$set\": bson.M{\n\t\t\t\t\"verified\": true,\n\t\t\t},\n\t\t}\n\t\tc.FindOneAndUpdate(\n\t\t\tcontext.Background(),\n\t\t\tuser,\n\t\t\tchange,\n\t\t)\n\n\t\tlog.Printf(\"[Handler][VerificationHandler] Verification token pass: %s - %s\", key, r.RemoteAddr)\n\n\t\t// Display a success message to the user.\n\t\tjson, _ := json.Marshal(&responses.SuccessResponse{\n\t\t\tCode: 200,\n\t\t\tMessage: \"Verification passed.\",\n\t\t})\n\n\t\tw.WriteHeader(200)\n\t\tw.Write(json)\n\t\treturn\n\t}\n}", "func (c *Client) PiSetBillingStatus(sbs piv1.SetBillingStatus) (*piv1.SetBillingStatusReply, error) {\n\tresBody, err := c.makeReq(http.MethodPost,\n\t\tpiv1.APIRoute, piv1.RouteSetBillingStatus, sbs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar sbsr piv1.SetBillingStatusReply\n\terr = json.Unmarshal(resBody, &sbsr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &sbsr, nil\n}", "func (client *Client) VerifyCen(request *VerifyCenRequest) (response *VerifyCenResponse, err error) {\n\tresponse = CreateVerifyCenResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func checkBinaryVersion(fsm *fsm.FSM) error {\n\tourVersion, err := semver.NewVersion(version.Get().Version)\n\tif err != nil {\n\t\treturn trace.Wrap(err, \"failed to parse this binary version: %v\",\n\t\t\tversion.Get().Version)\n\t}\n\n\tplan, err := fsm.GetPlan()\n\tif err != nil {\n\t\treturn trace.Wrap(err, \"failed to obtain operation plan\")\n\t}\n\n\trequiredVersion, err := plan.GravityPackage.SemVer()\n\tif err != nil {\n\t\treturn trace.Wrap(err, \"failed to parse required binary version: %v\",\n\t\t\tplan.GravityPackage)\n\t}\n\n\tif !ourVersion.Equal(*requiredVersion) {\n\t\treturn trace.BadParameter(\n\t\t\t`Current operation plan should be executed with the gravity binary of version %q while this binary is of version %q.\n\nPlease use the gravity binary from the upgrade installer tarball to execute the plan, or download appropriate version from the Ops Center (curl https://get.gravitational.io/telekube/install/%v | bash).\n`, requiredVersion, ourVersion, plan.GravityPackage.Version)\n\t}\n\n\treturn nil\n}", "func (b *Handler) VerifySignature(scr message.NewBlock) error {\n\tpacket := new(bytes.Buffer)\n\n\thdr := scr.State()\n\tif err := header.MarshalSignableVote(packet, hdr); err != nil {\n\t\treturn err\n\t}\n\n\treturn msg.VerifyBLSSignature(hdr.PubKeyBLS, scr.SignedHash, packet.Bytes())\n}", "func (m *Mpesa) B2BRequest(b2b B2BRequestBody) (*MpesaResult, error) {\n\n\treturn m.sendAndProcessMpesaRequest(m.getB2BUrl(), b2b, nil)\n\n}", "func (task *ParseTask) Verify(content []byte) error {\n\n\ttask.addContent(\"verify\")\n\terr := task.remote.IsValidResponse(content)\n\ttask.finishRequest(err, \"The content is valid\")\n\n\treturn err\n}", "func (h *handle) mpsseVerify() error {\n\tvar b [2]byte\n\tfor _, v := range []byte{0xAA, 0xAB} {\n\t\t// Write a bad command and ensure it returned correctly.\n\t\t// Unlike what the application note proposes, include a flush op right\n\t\t// after. Without the flush, the device will only flush after the delay\n\t\t// specified to SetLatencyTimer. The flush removes this unneeded wait,\n\t\t// which enables increasing the delay specified to SetLatencyTimer.\n\t\tb[0] = v\n\t\tb[1] = flush\n\t\tif _, err := h.Write(b[:]); err != nil {\n\t\t\treturn fmt.Errorf(\"ftdi: MPSSE verification failed: %w\", err)\n\t\t}\n\t\tp, e := h.h.GetQueueStatus()\n\t\tif e != 0 {\n\t\t\treturn toErr(\"Read/GetQueueStatus\", e)\n\t\t}\n\t\tif p != 2 {\n\t\t\treturn fmt.Errorf(\"ftdi: MPSSE verification failed: expected 2 bytes reply, got %d bytes\", p)\n\t\t}\n\t\tctx, cancel := context200ms()\n\t\tdefer cancel()\n\t\tif _, err := h.ReadAll(ctx, b[:]); err != nil {\n\t\t\treturn fmt.Errorf(\"ftdi: MPSSE verification failed: %w\", err)\n\t\t}\n\t\t// 0xFA means invalid command, 0xAA is the command echoed back.\n\t\tif b[0] != 0xFA || b[1] != v {\n\t\t\treturn fmt.Errorf(\"ftdi: MPSSE verification failed test for byte %#x: %#x\", v, b)\n\t\t}\n\t}\n\treturn nil\n}", "func (t *testerV10x) verifyResult(result types.Result, name string) string {\n\tr, err := types100.GetResult(result)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tExpect(r.Interfaces).To(HaveLen(1))\n\tExpect(r.Interfaces[0].Name).To(Equal(name))\n\tExpect(r.IPs).To(HaveLen(1))\n\n\treturn r.Interfaces[0].Mac\n}", "func (client *Client) VerifyBankElementWithCallback(request *VerifyBankElementRequest, callback func(response *VerifyBankElementResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *VerifyBankElementResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.VerifyBankElement(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func Verify (w http.ResponseWriter, r *http.Request) {\n\ttoken := helpers.ExtractToken(r)\n\tsuccess, err := helpers.VerifyToken(token)\n\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tfmt.Fprintf(w, err.Error());\n\t\treturn;\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprint(w, success)\n}", "func (p *piPlugin) cmdSetBillingStatus(token []byte, payload string) (string, error) {\n\t// Decode payload\n\tvar sbs pi.SetBillingStatus\n\terr := json.Unmarshal([]byte(payload), &sbs)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Verify token\n\terr = tokenMatches(token, sbs.Token)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Verify billing status\n\tswitch sbs.Status {\n\tcase pi.BillingStatusClosed, pi.BillingStatusCompleted:\n\t\t// These are allowed; continue\n\n\tcase pi.BillingStatusActive:\n\t\t// We don't currently allow the status to be manually set to\n\t\t// active.\n\t\treturn \"\", backend.PluginError{\n\t\t\tPluginID: pi.PluginID,\n\t\t\tErrorCode: uint32(pi.ErrorCodeBillingStatusChangeNotAllowed),\n\t\t\tErrorContext: \"cannot set to active\",\n\t\t}\n\n\tdefault:\n\t\t// Billing status is invalid\n\t\treturn \"\", backend.PluginError{\n\t\t\tPluginID: pi.PluginID,\n\t\t\tErrorCode: uint32(pi.ErrorCodeBillingStatusInvalid),\n\t\t\tErrorContext: \"invalid billing status\",\n\t\t}\n\t}\n\n\t// Verify signature\n\tmsg := sbs.Token + strconv.FormatUint(uint64(sbs.Status), 10) + sbs.Reason\n\terr = util.VerifySignature(sbs.Signature, sbs.PublicKey, msg)\n\tif err != nil {\n\t\treturn \"\", convertSignatureError(err)\n\t}\n\n\t// Ensure reason is provided when status is set to closed.\n\tif sbs.Status == pi.BillingStatusClosed && sbs.Reason == \"\" {\n\t\treturn \"\", backend.PluginError{\n\t\t\tPluginID: pi.PluginID,\n\t\t\tErrorCode: uint32(pi.ErrorCodeBillingStatusChangeNotAllowed),\n\t\t\tErrorContext: \"must provide a reason when setting \" +\n\t\t\t\t\"billing status to closed\",\n\t\t}\n\t}\n\n\t// Ensure no billing status already exists\n\tstatuses, err := p.billingStatuses(token)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(statuses) > 0 {\n\t\treturn \"\", backend.PluginError{\n\t\t\tPluginID: pi.PluginID,\n\t\t\tErrorCode: uint32(pi.ErrorCodeBillingStatusChangeNotAllowed),\n\t\t\tErrorContext: \"can not set billing status more than once\",\n\t\t}\n\t}\n\n\t// Ensure record's vote ended and it was approved\n\tvsr, err := p.voteSummary(token)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif vsr.Status != ticketvote.VoteStatusApproved {\n\t\treturn \"\", backend.PluginError{\n\t\t\tPluginID: pi.PluginID,\n\t\t\tErrorCode: uint32(pi.ErrorCodeBillingStatusChangeNotAllowed),\n\t\t\tErrorContext: \"setting billing status is allowed only if \" +\n\t\t\t\t\"proposal vote was approved\",\n\t\t}\n\t}\n\n\t// Save billing status change\n\treceipt := p.identity.SignMessage([]byte(sbs.Signature))\n\tbsc := pi.BillingStatusChange{\n\t\tToken: sbs.Token,\n\t\tStatus: sbs.Status,\n\t\tReason: sbs.Reason,\n\t\tPublicKey: sbs.PublicKey,\n\t\tSignature: sbs.Signature,\n\t\tTimestamp: time.Now().Unix(),\n\t\tReceipt: hex.EncodeToString(receipt[:]),\n\t}\n\terr = p.billingStatusSave(token, bsc)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Prepare reply\n\tsbsr := pi.SetBillingStatusReply{\n\t\tTimestamp: bsc.Timestamp,\n\t\tReceipt: bsc.Receipt,\n\t}\n\treply, err := json.Marshal(sbsr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(reply), nil\n}", "func ParseBvbin(s string) TermT {\n\tcs := C.CString(s)\n\tdefer C.free(unsafe.Pointer(cs))\n\treturn TermT(C.yices_parse_bvbin(cs))\n}", "func (r *SubscriptionReconciler) syncBEBSubscription(subscription *eventingv1alpha1.Subscription,\n\tresult *ctrl.Result, ctx context.Context, logger logr.Logger) (bool, error) {\n\tlogger.Info(\"Syncing subscription with BEB\")\n\n\tr.bebClient.Initialize()\n\n\t// if object is marked for deletion, we need to delete the BEB subscription\n\tif r.isInDeletion(subscription) {\n\t\treturn false, r.deleteBEBSubscription(subscription, logger, ctx)\n\t}\n\n\tvar statusChanged bool\n\tvar err error\n\tif statusChanged, err = r.bebClient.SyncBebSubscription(subscription); err != nil {\n\t\tlogger.Error(err, \"Update BEB subscription failed\")\n\t\tcondition := eventingv1alpha1.MakeCondition(eventingv1alpha1.ConditionSubscribed, eventingv1alpha1.ConditionReasonSubscriptionCreationFailed, corev1.ConditionFalse)\n\t\tif err := r.updateCondition(subscription, condition, ctx); err != nil {\n\t\t\treturn statusChanged, err\n\t\t}\n\t\treturn false, err\n\t}\n\n\tif !subscription.Status.IsConditionSubscribed() {\n\t\tcondition := eventingv1alpha1.MakeCondition(eventingv1alpha1.ConditionSubscribed, eventingv1alpha1.ConditionReasonSubscriptionCreated, corev1.ConditionTrue)\n\t\tif err := r.updateCondition(subscription, condition, ctx); err != nil {\n\t\t\treturn statusChanged, err\n\t\t}\n\t\tstatusChanged = true\n\t}\n\n\tstatusChangedAtCheck, retry, errTimeout := r.checkStatusActive(subscription)\n\tstatusChanged = statusChanged || statusChangedAtCheck\n\tif errTimeout != nil {\n\t\tlogger.Error(errTimeout, \"Timeout at retry\")\n\t\tresult.Requeue = false\n\t\treturn statusChanged, errTimeout\n\t}\n\tif retry {\n\t\tlogger.Info(\"Wait for subscription to be active\", \"name:\", subscription.Name, \"status:\", subscription.Status.EmsSubscriptionStatus.SubscriptionStatus)\n\t\tcondition := eventingv1alpha1.MakeCondition(eventingv1alpha1.ConditionSubscriptionActive, eventingv1alpha1.ConditionReasonSubscriptionNotActive, corev1.ConditionFalse)\n\t\tif err := r.updateCondition(subscription, condition, ctx); err != nil {\n\t\t\treturn statusChanged, err\n\t\t}\n\t\tresult.RequeueAfter = time.Second * 1\n\t} else if statusChanged {\n\t\tcondition := eventingv1alpha1.MakeCondition(eventingv1alpha1.ConditionSubscriptionActive, eventingv1alpha1.ConditionReasonSubscriptionActive, corev1.ConditionTrue)\n\t\tif err := r.updateCondition(subscription, condition, ctx); err != nil {\n\t\t\treturn statusChanged, err\n\t\t}\n\t}\n\t// OK\n\treturn statusChanged, nil\n}", "func (r Service) Verify(captchaResponse string) (requester.VerifyResponse, error) {\n\theaders := map[string]string{\n\t\t\"Content-Type\": \"application/x-www-form-urlencoded\",\n\t}\n\tbody := url.Values{}\n\tbody.Set(\"secret\", r.secret)\n\tbody.Set(\"response\", captchaResponse)\n\tapiRes := requester.VerifyResponse{}\n\terr := r.http.JSON(http.MethodPost, verifyAPI, headers, body.Encode(), &apiRes)\n\tif err != nil {\n\t\treturn requester.VerifyResponse{}, errors.New(\"failed to retrieve reCaptcha API response\")\n\t}\n\treturn apiRes, nil\n}", "func (service *Stainless) Verify(\n\treq *proto.VerificationRequest) (network.Message, error) {\n\tconsole, report, err := verify(req.SourceFiles)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Lvl4(\"Returning\", console, report)\n\n\treturn &proto.VerificationResponse{\n\t\tConsole: console,\n\t\tReport: report,\n\t}, nil\n}", "func (cpu *Mos6502) bcs() uint8 {\n\tif cpu.GetStatusFlag(C) == 1 {\n\t\tcpu.branch()\n\t}\n\treturn 0\n}", "func (bbsbr BlockBlobsStageBlockResponse) Status() string {\n\treturn bbsbr.rawResponse.Status\n}", "func Verification(pub ecdsa.PublicKey, hash []byte, r, s *big.Int) bool {\n\tverifystatus := ecdsa.Verify(&pub, hash, r, s)\n\treturn verifystatus\n}", "func (t *testerV01xOr02x) verifyResult(result types.Result, _ string) string {\n\tr, err := types020.GetResult(result)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tExpect(r.IP4.IP.IP).NotTo(BeNil())\n\tExpect(r.IP6).To(BeNil())\n\n\t// 0.2 and earlier don't return MAC address\n\treturn \"\"\n}", "func (u *walletIdentity) Verify(msg []byte, sig []byte) error {\n\treturn errors.New(\"not implemented\")\n}", "func CheckIBAN(number string) (*IBANBICInfo, error) {\n\n\tif len(number) == 0 {\n\t\treturn nil, ErrIBANBICInvalidInput\n\t}\n\n\tresult := &IBANBICInfo{\n\t\tBBAN: number,\n\t}\n\n\tbankName, err := performIBANBICRequest(\"BBANtoBANKNAME\", number)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult.BankName = bankName\n\n\tibanAndBic, err := performIBANBICRequest(\"BBANtoIBANandBIC\", number)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tibanBicParts := strings.SplitN(ibanAndBic, \"#\", 2)\n\tif len(ibanBicParts) < 2 {\n\t\treturn nil, errors.New(ErrIBANBICServiceError + \"Failed to get BIC and IBAN code\")\n\t}\n\n\tresult.IBAN = ibanBicParts[0]\n\tresult.BIC = ibanBicParts[1]\n\n\treturn result, nil\n\n}", "func Verify() {\n\tlist, _ := proxyServiceInstance.GetUnVerified()\n\tfor _, p := range list {\n\t\tverifyJob <- p\n\t}\n}", "func (fm *FinalModelStructBytes) Verify() int {\n fm.buffer.Shift(fm.FBEOffset())\n fbeResult := fm.VerifyFields()\n fm.buffer.Unshift(fm.FBEOffset())\n return fbeResult\n}", "func (pbusnr PageBlobsUpdateSequenceNumberResponse) Status() string {\n\treturn pbusnr.rawResponse.Status\n}", "func assertSBRStatus(\n\tctx context.Context,\n\tf *framework.Framework,\n\tnamespacedName types.NamespacedName,\n) error {\n\tsbr := &v1alpha1.ServiceBindingRequest{}\n\tif err := f.Client.Get(ctx, namespacedName, sbr); err != nil {\n\t\treturn err\n\t}\n\n\tfor i, condition := range sbr.Status.Conditions {\n\t\tif condition.Type != servicebindingrequest.BindingReady && condition.Status != corev1.ConditionTrue {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"Condition.Type and Condition.Status is '%s' and '%s' instead of '%s' and '%s'\",\n\t\t\t\tsbr.Status.Conditions[i].Type,\n\t\t\t\tsbr.Status.Conditions[i].Status,\n\t\t\t\tservicebindingrequest.BindingReady,\n\t\t\t\tcorev1.ConditionTrue)\n\t\t}\n\t}\n\treturn nil\n}", "func (_Flytrap *FlytrapSession) VerifySub(person common.Address, topic string) (bool, [2]byte, bool, error) {\n\treturn _Flytrap.Contract.VerifySub(&_Flytrap.CallOpts, person, topic)\n}", "func (m *Module) Verify(state, token string) (bool, error) {\n\tobj, err := jose.ParseEncrypted(token)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err)\n\t}\n\tb, err := obj.Decrypt(m.decryptionKey)\n\tcsrfPayload := &csrfPayload{}\n\tif err = json.Unmarshal(b, csrfPayload); err != nil {\n\t\treturn false, errors.Wrap(err)\n\t}\n\tif state != csrfPayload.State {\n\t\treturn false, nil\n\t}\n\tif time.Now().After(csrfPayload.ExpireAfter) {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}", "func (s *service) Verify(w http.ResponseWriter, r *http.Request) (interface{}, error) {\n\tctx := r.Context()\n\tuserID := httpapi.GetUserID(r)\n\ttoken := httpapi.GetToken(r)\n\n\treq, err := decodeSignupVerifyRequest(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tuser, err := s.repoMngr.User().ByIdentity(ctx, \"ID\", userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = s.otp.ValidateOTP(req.Code, token.CodeHash); err != nil {\n\t\treturn nil, err\n\t}\n\n\tjwtToken, err := s.token.Create(ctx, user, auth.JWTAuthorized)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = s.markUserVerified(ctx, user); err != nil {\n\t\treturn nil, err\n\t}\n\n\tloginHistory := &auth.LoginHistory{\n\t\tUserID: userID,\n\t\tTokenID: jwtToken.Id,\n\t\tIPAddress: sql.NullString{\n\t\t\tString: httpapi.GetIP(r),\n\t\t\tValid: true,\n\t\t},\n\t\tExpiresAt: s.token.RefreshableTill(ctx, jwtToken, jwtToken.RefreshToken),\n\t}\n\tif err = s.repoMngr.LoginHistory().Create(ctx, loginHistory); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.respond(ctx, w, user, jwtToken)\n}", "func (_Flytrap *FlytrapCallerSession) VerifySub(person common.Address, topic string) (bool, [2]byte, bool, error) {\n\treturn _Flytrap.Contract.VerifySub(&_Flytrap.CallOpts, person, topic)\n}", "func (api *Api) verify() error {\n\treturn nil\n}", "func GetStatusSniffer(uuid string) (running bool, status bool) {\n owlh, err := ndb.GetStapServerInformation(uuid)\n if err != nil {\n logs.Error(\"Error retrieving stap server information\")\n }\n logs.Info(\"Checking Sniffer status for uuid: \" + uuid)\n\n running, pid, cpu, mem := GetStatusSnifferSSH(uuid)\n cpuStatus := GetStatusCPU(owlh, cpu, uuid)\n memStatus := GetStatusMEM(owlh, mem, uuid)\n storageStatus := GetStatusStorage(owlh, uuid)\n\n logs.Alert(\"Checking \" + owlh[\"name\"] + \" - \" + owlh[\"ip\"] + \" - PID:\" + pid + \" CPU: \" + strconv.FormatBool(cpuStatus) + \" MEM: \" + strconv.FormatBool(memStatus) + \" STORAGE: \" + strconv.FormatBool(storageStatus))\n if cpuStatus && memStatus && storageStatus {\n return running, true\n }\n return running, false\n}", "func VFNMSUB132PD_BCST(ops ...operand.Op) { ctx.VFNMSUB132PD_BCST(ops...) }", "func verify(publicKey *rsa.PublicKey, message []byte, sig []byte) error {\n\th := sha256.New()\n\th.Write(message)\n\td := h.Sum(nil)\n\treturn rsa.VerifyPKCS1v15(publicKey, crypto.SHA256, d, sig)\n}", "func HandleLoadbalancerGetSuccessfully(t *testing.T) {\n\tth.Mux.HandleFunc(\"/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"GET\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", client.TokenID)\n\t\tth.TestHeader(t, r, \"Accept\", \"application/json\")\n\n\t\tfmt.Fprintf(w, SingleLoadbalancerBody)\n\t})\n}", "func (u *UserController) VerifyLink(c *gin.Context) {\n\tvar data forms.ResendCommand\n\n\tif (c.BindJSON(&data)) != nil {\n\t\tc.JSON(400, gin.H{\"message\": \"Provided all fields\"})\n\t\tc.Abort()\n\t\treturn\n\t}\n\n\tresult, err := userModel.GetUserByEmail(data.Email)\n\n\tif result.Email == \"\" {\n\t\tc.JSON(404, gin.H{\"message\": \"User account was not found\"})\n\t\tc.Abort()\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tc.JSON(500, gin.H{\"message\": \"Something wrong happened, try again later\"})\n\t\tc.Abort()\n\t\treturn\n\t}\n\n\tresetToken, _ := services.GenerateNonAuthToken(result.Email)\n\n\tlink := \"http://localhost:5000/api/v1/verify-account?verify_token=\" + resetToken\n\tbody := \"Here is your reset <a href='\" + link + \"'>link</a>\"\n\thtml := \"<strong>\" + body + \"</strong>\"\n\n\temail := services.SendMail(\"Verify Account\", body, result.Email, html, result.Name)\n\n\tif email == true {\n\t\tc.JSON(200, gin.H{\"messsage\": \"Check mail\"})\n\t\tc.Abort()\n\t\treturn\n\t} else {\n\t\tc.JSON(500, gin.H{\"message\": \"An issue occured sending you an email\"})\n\t\tc.Abort()\n\t\treturn\n\t}\n}", "func (cpu *Mos6502) sbc() uint8 {\n\tcpu.fetch()\n\n\tvalue := word(cpu.fetchedData) ^ 0x00ff\n\tcpu.temp = word(cpu.a) + value + word(cpu.GetStatusFlag(C))\n\n\tcpu.setStatusFlag(C, (cpu.temp&0xff00)>>7 == 1)\n\tcpu.setStatusFlag(Z, (cpu.temp&0x00ff) == 0)\n\tcpu.setStatusFlag(V, (cpu.temp^word(cpu.a))&(cpu.temp^value)&0x0080 > 0)\n\tcpu.setStatusFlag(N, (cpu.temp&0x0080) > 0)\n\n\tcpu.a = byte(cpu.temp & 0x00ff)\n\treturn 1\n}", "func (u *User) VerifyPIN(pin string) (map[string]interface{}, error) {\n\tlog.info(\"========== VERIFY PIN ==========\")\n\turl := buildURL(path[\"auth\"], u.UserID)\n\n\tdata := `{ \"refresh_token\": \"` + u.RefreshToken + `\", \"validation_pin\": \"` + pin + `\" }`\n\n\tres, err := u.do(\"POST\", url, data, nil)\n\n\treturn res, err\n}", "func (t *SimpleChaincode) verifyFile(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\r\n\tvar key, value,vstring string\r\n\tvar err error\r\n\t\r\n\r\n\tfmt.Println(\"running verifyFile()\")\r\n\r\n\tif len(args) != 2 {\r\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 2. name of the key and value to set\")\r\n\t}\r\n\r\n\tkey = args[0] //rename for funsies\r\n\tvalue = args[1]\r\n\t\r\n\tv,err := stub.GetState(key)\r\n\tif err != nil {\r\n\t\treturn nil, errors.New(\"Failed to get state\")\r\n\t}\r\n\tif v==nil {\r\n\t\treturn nil, errors.New(\"NIF doesn't exist\")\r\n\t} else {\r\n\t\t\r\n\t\tvstring = string(v)\r\n\t\tif vstring==value {\r\n\t\t\treturn nil,nil\r\n\t\t} else { \r\n\t\t\t\r\n\t\t\treturn nil, errors.New(\"No Match\")\r\n\t\t}\r\n\t\t\r\n\t\t\r\n\t}\r\n\treturn nil, nil\r\n}", "func Verify() bool {\n\treturn true\n}", "func (c OffChainCrossNodeSimplePaymentReconciliationBill) VerifySignature() error {\n\n\treturn c.ChannelChainTransferData.CheckMustAddressAndSigns()\n}", "func signVCWithBBS(r *require.Assertions, vc *verifiableapi.Credential) string {\n\tpubKey, privKey, err := bbs12381g2pub.GenerateKeyPair(sha256.New, nil)\n\tr.NoError(err)\n\tr.NotEmpty(privKey)\n\n\tpubKeyBytes, err := pubKey.Marshal()\n\tr.NoError(err)\n\n\tdidKey, keyID := fingerprint.CreateDIDKeyByCode(fingerprint.BLS12381g2PubKeyMultiCodec, pubKeyBytes)\n\n\tbbsSigner, err := newBBSSigner(privKey)\n\tr.NoError(err)\n\n\tsigSuite := bbsblssignature2020.New(\n\t\tsuite.WithSigner(bbsSigner),\n\t\tsuite.WithVerifier(bbsblssignature2020.NewG2PublicKeyVerifier()))\n\n\tldpContext := &verifiableapi.LinkedDataProofContext{\n\t\tSignatureType: \"BbsBlsSignature2020\",\n\t\tSignatureRepresentation: verifiableapi.SignatureProofValue,\n\t\tSuite: sigSuite,\n\t\tVerificationMethod: keyID,\n\t}\n\n\tloader, err := jsonldtest.DocumentLoader()\n\tr.NoError(err)\n\n\terr = vc.AddLinkedDataProof(ldpContext, jsonld.WithDocumentLoader(loader))\n\tr.NoError(err)\n\n\tvcSignedBytes, err := json.Marshal(vc)\n\tr.NoError(err)\n\tr.NotEmpty(vcSignedBytes)\n\n\tvcVerified, err := verifiableapi.ParseCredential(vcSignedBytes,\n\t\tverifiableapi.WithEmbeddedSignatureSuites(sigSuite),\n\t\tverifiableapi.WithPublicKeyFetcher(verifiableapi.SingleKey(pubKeyBytes, \"Bls12381G2Key2020\")),\n\t\tverifiableapi.WithJSONLDDocumentLoader(loader),\n\t)\n\tr.NoError(err)\n\tr.NotNil(vcVerified)\n\n\treturn didKey\n}", "func (s *Server) verifyTransaction(tx *Transaction) bool {\n\ttime.Sleep(txDelay)\n\treturn true\n}", "func (a *Client) BurnToken(params *BurnTokenParams) (*BurnTokenOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewBurnTokenParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"burnToken\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/asset/tokens/{symbol}/burns\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &BurnTokenReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*BurnTokenOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for burnToken: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func verifyCircleCIJobSuccess(orgRepo, gitHash, circleCIDeployJobName, circleCIAPIToken string) (err error) {\n\tclient := &circleci.Client{Token: circleCIAPIToken}\n\tsplitOrgRepo := strings.Split(orgRepo, \"/\")\n\torg := splitOrgRepo[0]\n\trepo := splitOrgRepo[1]\n\tvar targetBuildNum int\n\tif targetBuildNum, err = obtainBuildNum(org, repo, gitHash, circleCIDeployJobName,\n\t\tclient); err != nil {\n\t\treturn\n\t}\n\treturn checkForJobSuccess(org, repo, targetBuildNum, client)\n}", "func Test_VerifySigFromTass(t *testing.T) {\n\trequire := require.New(t)\n\n\t//c := &sm2.Driver{}\n\n\txBytes := common.FromHex(\"0000000000000000000000000000000000000000000000000000000000000000FD4241057FEC6CBEEC501F7E1763751B8F6DFCFB910FB634FBB76A16639EF172\")\n\tyBytes := common.FromHex(\"00000000000000000000000000000000000000000000000000000000000000001C6DA89F9C1A5EE9B6108E5A2A5FE336962630A34DBA1AF428451E1CE63BB3CF\")\n\tx := new(big.Int).SetBytes(xBytes)\n\ty := new(big.Int).SetBytes(yBytes)\n\n\tpublicKey := &gmsm_sm2.PublicKey{\n\t\tX: x,\n\t\tY: y,\n\t}\n\tvar pubSM2 sm2.PubKeySM2\n\tcopy(pubSM2[:], gmsm_sm2.Compress(publicKey))\n\n\trBytes := common.FromHex(\"00000000000000000000000000000000000000000000000000000000000000003AA29337E7149047FB8AE83F30AA00125E23173C88F284ADDED2E5B59ACAA5B9\")\n\tsBytes := common.FromHex(\"0000000000000000000000000000000000000000000000000000000000000000E2E9338109D74269578216039FD4D1C764E7F6F142CBB2E3035E7E49D375D330\")\n\tr := new(big.Int).SetBytes(rBytes)\n\ts := new(big.Int).SetBytes(sBytes)\n\n\tsignature := sm2.SignatureSM2(sm2.Serialize(r, s))\n\n\tmsg := []byte(\"112233445566112233445566112233445566112233445566\")\n\tok := pubSM2.VerifyBytes(msg, signature)\n\trequire.Equal(true, ok)\n}", "func (cpu *Mos6502) bcc() uint8 {\n\tif cpu.GetStatusFlag(C) == 0 {\n\t\tcpu.branch()\n\t}\n\treturn 0\n}", "func Verify(res http.ResponseWriter, req *http.Request) {\n\tvar (\n\t\tuserModel models.User\n\t\terrorResponse response.Error\n\t\tsuccessResponse response.Success\n\t\tverfiy userRequest.Verify\n\t\terror error\n\t)\n\terror = json.NewDecoder(req.Body).Decode(&verfiy)\n\tif error != nil {\n\t\terrorResponse.Code = http.StatusBadRequest\n\t\terrorResponse.Error = config.InvalidRequest\n\t\thelpers.SetResponse(res, http.StatusBadRequest, errorResponse)\n\t\treturn\n\t}\n\terror = verfiy.Validate()\n\tif error != nil {\n\t\terrorResponse.Code = http.StatusBadRequest\n\t\terrorResponse.Error = helpers.FormatError(error.Error())\n\t\thelpers.SetResponse(res, http.StatusBadRequest, errorResponse)\n\t\treturn\n\t}\n\tuser := userModel.Check(req.Context().Value(\"identity\"))\n\t//log.Println(user)\n\totp, _ := strconv.ParseInt(verfiy.Otp, 10, 64)\n\tif user.ID < 0 || user.OTP != otp || !helpers.InArray(user.OtpType, []string{\"authorizeEmail\", \"authorizePhone\", \"verifyEmail\", \"verifyPhone\"}) {\n\t\terrorResponse.Code = http.StatusBadRequest\n\t\terrorResponse.Error = config.InvalidToken\n\t\thelpers.SetResponse(res, http.StatusBadRequest, errorResponse)\n\t\treturn\n\t}\n\tswitch user.OtpType {\n\tcase \"authorizeEmail\":\n\t\tuser.EmailVerify = 1\n\t\tuser.LastLogedIn = time.Now()\n\t\tsuccessResponse.Msg = config.LoggedInMsg\n\tcase \"authorizePhone\":\n\t\tuser.PhoneVerify = 1\n\t\tuser.LastLogedIn = time.Now()\n\t\tsuccessResponse.Msg = config.LoggedInMsg\n\tcase \"verifyEmail\":\n\t\tuser.EmailVerify = 1\n\t\tsuccessResponse.Msg = config.EmailVerifyMsg\n\tcase \"verifyPhone\":\n\t\tuser.PhoneVerify = 1\n\t\tsuccessResponse.Msg = config.PhoneVerifyMsg\n\t}\n\tuser.OTP, user.OtpType, user.OtpValidity = 0, \"\", 0\n\terror = userModel.UpdateDetail()\n\tif error != nil {\n\t\terrorResponse.Code = http.StatusServiceUnavailable\n\t\terrorResponse.Error = config.ServiceUnavailable\n\t\thelpers.SetResponse(res, http.StatusServiceUnavailable, errorResponse)\n\t\treturn\n\t}\n\tsuccessResponse.Code = http.StatusOK\n\thelpers.SetResponse(res, http.StatusOK, successResponse)\n}", "func GetB() bool {\n\treturn machine.BUTTONB.Get()\n}", "func VerifyPassword(userName, password string) bool {\n\tlocalShare, err := LocalShareForPwdVerification()\n\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tcmd := exec.Command(\"smbclient\", \"//localhost/\"+localShare, \"-U\", userName, \"-c\", \"exit\")\n\n\tvar outbuf bytes.Buffer\n\tcmd.Stdout = &outbuf\n\n\tstdin, _ := cmd.StdinPipe()\n\n\tcmd.Start()\n\n\tstdin.Write([]byte(password))\n\tstdin.Close()\n\n\terr = cmd.Wait()\n\n\toutput := outbuf.String()\n\n\tlog.Printf(\"Output of password verification: %s\\n.\", output)\n\n\treturn err == nil\n}", "func vbm(args ...string) error {\n\treturn cmd(B2D.Vbm, args...)\n}", "func tearDown(url string) {\n\tvar errTransactions int64\n\terrTransactions = 0\n\tfmt.Println(\"....... State transfer is happening, Lets take a nap for 2 mins ......\")\n\tsleep(120)\n\tval1, val2 := queryChaincode(counter)\n\tfmt.Println(\"========= After Query Vals A = \",val1,\" \\n B = \", val2,\"\\n\")\n\n/*\theight := getChainHeight(url) //Remove hardcoding ??\n\tfmt.Println(\"========= Total Blocks #\", height)\n\tfor i := 1; i < height; i++ {\n\t\t//TODO: Don't hard code IP , can we take this as argument ?\n\t\tnonHashData := chaincode.ChaincodeBlockTrxInfo(url, i)\n\t\tlength := len(nonHashData.TransactionResult)\n\t\tfor j := 0; j < length; j++ {\n\t\t\tif nonHashData.TransactionResult[j].ErrorCode > 0 {\n\t\t\t\tfmt.Printf(\"\\n========= Block[%d] Trx#[%s] UUID [%d] ErrorCode [%d] Error: %s\\n\", i, counter, nonHashData.TransactionResult[j].Uuid, nonHashData.TransactionResult[j].ErrorCode, nonHashData.TransactionResult[j].Error)\n\t\t\t\terrTransactions++\n\t\t\t}\n\t\t}\n\t}\n\tif errTransactions > 0 {\n\t\tfmt.Println(\"========= Failed transactions #\", errTransactions)\n\t}\n\tfmt.Println(\"========= Successful transactions #\", counter-errTransactions)\n\n\tnewVal,err := strconv.ParseInt(val2, 10, 64);\n\n\tif err != nil {\n\t\t\tfmt.Println(\"Failed to convert \",val2,\" to int64\\n Error: \", err)\n\t}*/\n\n\t//TODO: Block size again depends on the Block configuration in pbft config file\n\t//Test passes when 2 * block height match with total transactions, else fails\n\tif (newVal == counter) {\n\t\tfmt.Println(\"######### TEST PASSED #########\")\n\t} else {\n\t\tfmt.Println(\"######### TEST FAILED #########\")\n\t}\n\n}", "func signVCWithBBS(r *require.Assertions, vc *verifiable.Credential) string {\n\tpubKey, privKey, err := bbs12381g2pub.GenerateKeyPair(sha256.New, nil)\n\tr.NoError(err)\n\tr.NotEmpty(privKey)\n\n\tpubKeyBytes, err := pubKey.Marshal()\n\tr.NoError(err)\n\n\tdidKey, keyID := fingerprint.CreateDIDKeyByCode(fingerprint.BLS12381g2PubKeyMultiCodec, pubKeyBytes)\n\n\tbbsSigner, err := newBBSSigner(privKey)\n\tr.NoError(err)\n\n\tsigSuite := bbsblssignature2020.New(\n\t\tsuite.WithSigner(bbsSigner),\n\t\tsuite.WithVerifier(bbsblssignature2020.NewG2PublicKeyVerifier()))\n\n\tldpContext := &verifiable.LinkedDataProofContext{\n\t\tSignatureType: \"BbsBlsSignature2020\",\n\t\tSignatureRepresentation: verifiable.SignatureProofValue,\n\t\tSuite: sigSuite,\n\t\tVerificationMethod: keyID,\n\t}\n\n\tloader, err := jsonldtest.DocumentLoader()\n\tr.NoError(err)\n\n\terr = vc.AddLinkedDataProof(ldpContext, jsonld.WithDocumentLoader(loader))\n\tr.NoError(err)\n\n\tvcSignedBytes, err := json.Marshal(vc)\n\tr.NoError(err)\n\tr.NotEmpty(vcSignedBytes)\n\n\tvcVerified, err := verifiable.ParseCredential(vcSignedBytes,\n\t\tverifiable.WithEmbeddedSignatureSuites(sigSuite),\n\t\tverifiable.WithPublicKeyFetcher(verifiable.SingleKey(pubKeyBytes, \"Bls12381G2Key2020\")),\n\t\tverifiable.WithJSONLDDocumentLoader(loader),\n\t)\n\tr.NoError(err)\n\tr.NotNil(vcVerified)\n\n\treturn didKey\n}" ]
[ "0.54125434", "0.54113877", "0.535207", "0.532243", "0.52799875", "0.52168584", "0.52088404", "0.5178279", "0.5171409", "0.5150322", "0.5136394", "0.51195115", "0.5100915", "0.5069178", "0.5030368", "0.50266194", "0.50152457", "0.50060326", "0.49994752", "0.49660742", "0.49428263", "0.49365884", "0.4928855", "0.4923025", "0.4856724", "0.48519108", "0.48517528", "0.4843189", "0.48377275", "0.4820545", "0.48023626", "0.4800466", "0.47908953", "0.47854102", "0.4780447", "0.47792235", "0.47754645", "0.47679043", "0.47643974", "0.47539324", "0.475349", "0.4749074", "0.4745571", "0.47432226", "0.47376123", "0.4729231", "0.471802", "0.47109535", "0.4709872", "0.47017545", "0.46967703", "0.46959636", "0.46954465", "0.46781793", "0.46720552", "0.46669397", "0.46651903", "0.46622956", "0.46616846", "0.46568683", "0.46549767", "0.46524227", "0.4646072", "0.46405277", "0.46229598", "0.4621771", "0.46216354", "0.46037722", "0.46005103", "0.45943594", "0.45938602", "0.45922554", "0.45918822", "0.4589522", "0.4577476", "0.4576335", "0.45630547", "0.45628038", "0.45580724", "0.45547152", "0.45496875", "0.45420006", "0.4539046", "0.45363435", "0.45306793", "0.4525411", "0.45241714", "0.45221856", "0.45123208", "0.45111996", "0.4509895", "0.4508298", "0.44991872", "0.4494003", "0.44914994", "0.44819832", "0.4479452", "0.44767103", "0.4475146", "0.447173" ]
0.73651963
0
CheckDeposit check a deposit for an address given the address id
func (as *AddressService) CheckDeposit(addressID int) error { if isEmptyStr(as.assetCode) { return errAssetCode } if err := as.client.Get(buildString("address/check/", as.assetCode, "/", strconv.Itoa(addressID)), nil); err != nil { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (k *Keeper) GetDeposit(ctx sdk.Context, address sdk.AccAddress) (deposit types.Deposit, found bool) {\n\tstore := k.Store(ctx)\n\n\tkey := types.DepositKey(address)\n\tvalue := store.Get(key)\n\tif value == nil {\n\t\treturn deposit, false\n\t}\n\n\tk.cdc.MustUnmarshalBinaryBare(value, &deposit)\n\treturn deposit, true\n}", "func (p *PrivKey) checkImportedAddress(walletAddress, p2shSegwitAddress, fullPublicKey string) {\n\t// Note,\n\t// GetAccount() calls GetAddressInfo() internally\n\n\tvar (\n\t\ttargetAddr string\n\t\taddrType address.AddrType\n\t)\n\n\tswitch p.btc.CoinTypeCode() {\n\tcase coin.BTC:\n\t\ttargetAddr = p2shSegwitAddress\n\t\taddrType = address.AddrTypeP2shSegwit\n\tcase coin.BCH:\n\t\ttargetAddr = walletAddress\n\t\taddrType = address.AddrTypeBCHCashAddr\n\tdefault:\n\t\tp.logger.Warn(\"this coin type is not implemented in checkImportedAddress()\",\n\t\t\tzap.String(\"coin_type_code\", p.btc.CoinTypeCode().String()))\n\t\treturn\n\t}\n\n\t// 1.call `getaccount` by target_address\n\tacnt, err := p.btc.GetAccount(targetAddr)\n\tif err != nil {\n\t\tp.logger.Warn(\n\t\t\t\"fail to call btc.GetAccount()\",\n\t\t\tzap.String(addrType.String(), targetAddr),\n\t\t\tzap.Error(err))\n\t\treturn\n\t}\n\tp.logger.Debug(\n\t\t\"account is found\",\n\t\tzap.String(\"account\", acnt),\n\t\tzap.String(addrType.String(), targetAddr))\n\n\t// 2.call `getaddressinfo` by target_address\n\taddrInfo, err := p.btc.GetAddressInfo(targetAddr)\n\tif err != nil {\n\t\tp.logger.Warn(\n\t\t\t\"fail to call btc.GetAddressInfo()\",\n\t\t\tzap.String(addrType.String(), targetAddr),\n\t\t\tzap.Error(err))\n\t} else {\n\t\tif addrInfo.Pubkey != fullPublicKey {\n\t\t\tp.logger.Warn(\n\t\t\t\t\"pubkey is not matched\",\n\t\t\t\tzap.String(\"in_bitcoin_core\", addrInfo.Pubkey),\n\t\t\t\tzap.String(\"in_database\", fullPublicKey))\n\t\t}\n\t}\n}", "func (u Usecase) Deposit(ctx context.Context, accID vos.AccountID, amount vos.Money) error {\n\tconst operation = \"accounts.Usecase.Deposit\"\n\n\tlog := logger.FromCtx(ctx).WithFields(logrus.Fields{\n\t\t\"accID\": accID,\n\t\t\"amount\": amount.Int(),\n\t})\n\n\tlog.Infoln(\"processing a deposit\")\n\n\tif amount <= 0 {\n\t\treturn ErrInvalidAmount\n\t}\n\n\terr := u.accRepo.Deposit(ctx, accID, amount)\n\n\tif err != nil {\n\t\treturn domain.Error(operation, err)\n\t}\n\n\tlog.Infoln(\"deposit successfully processed\")\n\n\treturn nil\n}", "func (e Exchange) DepositAddress(exch string, currencyCode currency.Code) (out string, err error) {\n\tif currencyCode.IsEmpty() {\n\t\terr = errors.New(\"currency code is empty\")\n\t\treturn\n\t}\n\treturn engine.Bot.DepositAddressManager.GetDepositAddressByExchange(exch, currencyCode)\n}", "func (mapper GovMapper) GetDeposit(proposalID uint64, depositorAddr btypes.Address) (deposit gtypes.Deposit, exists bool) {\n\texists = mapper.Get(KeyDeposit(proposalID, depositorAddr), &deposit)\n\tif !exists {\n\t\treturn gtypes.Deposit{}, false\n\t}\n\n\treturn deposit, true\n}", "func (h *HUOBI) QueryDepositAddress(ctx context.Context, cryptocurrency currency.Code) ([]DepositAddress, error) {\n\tresp := struct {\n\t\tDepositAddress []DepositAddress `json:\"data\"`\n\t}{}\n\n\tvals := url.Values{}\n\tvals.Set(\"currency\", cryptocurrency.Lower().String())\n\n\terr := h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, huobiAccountDepositAddress, vals, nil, &resp, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(resp.DepositAddress) == 0 {\n\t\treturn nil, errors.New(\"deposit address data isn't populated\")\n\t}\n\treturn resp.DepositAddress, nil\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) IsDeposit(opts *bind.CallOpts, blockNum *big.Int) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"isDeposit\", blockNum)\n\treturn *ret0, err\n}", "func deposit(ctx iscp.Sandbox) (dict.Dict, error) {\n\tctx.Log().Debugf(\"accounts.deposit.begin -- %s\", ctx.IncomingTransfer())\n\n\tmustCheckLedger(ctx.State(), \"accounts.deposit.begin\")\n\n\tcaller := ctx.Caller()\n\tparams := kvdecoder.New(ctx.Params(), ctx.Log())\n\ttargetAccount := params.MustGetAgentID(ParamAgentID, caller)\n\ttargetAccount = commonaccount.AdjustIfNeeded(targetAccount, ctx.ChainID())\n\n\t// funds currently are in the common account (because call is to 'accounts'), they must be moved to the target\n\tsucc := MoveBetweenAccounts(ctx.State(), commonaccount.Get(ctx.ChainID()), targetAccount, ctx.IncomingTransfer())\n\tassert.NewAssert(ctx.Log()).Require(succ, \"internal error: failed to deposit to %s\", targetAccount.String())\n\n\tctx.Log().Debugf(\"accounts.deposit.success: target: %s\\n%s\",\n\t\ttargetAccount, ctx.IncomingTransfer().String())\n\n\tmustCheckLedger(ctx.State(), \"accounts.deposit.exit\")\n\treturn nil, nil\n}", "func ProcessDeposit(state *beacon.BeaconState, dep *beacon.Deposit) error {\n\t// Deposits must be processed in order\n\tif dep.Index != state.DepositIndex {\n\t\treturn errors.New(fmt.Sprintf(\"deposit has index %d that does not match with state index %d\", dep.Index, state.DepositIndex))\n\t}\n\n\tserializedDepositData := dep.Data.Serialized()\n\n\t// Verify the Merkle branch\n\tif !merkle.VerifyMerkleBranch(\n\t\thash.Hash(serializedDepositData),\n\t\tdep.Proof[:],\n\t\tbeacon.DEPOSIT_CONTRACT_TREE_DEPTH,\n\t\tuint64(dep.Index),\n\t\tstate.LatestEth1Data.DepositRoot) {\n\t\treturn errors.New(fmt.Sprintf(\"deposit %d has merkle proof that failed to be verified\", dep.Index))\n\t}\n\n\t// Increment the next deposit index we are expecting. Note that this\n\t// needs to be done here because while the deposit contract will never\n\t// create an invalid Merkle branch, it may admit an invalid deposit\n\t// object, and we need to be able to skip over it\n\tstate.DepositIndex += 1\n\n\tvalIndex := beacon.ValidatorIndexMarker\n\tfor i, v := range state.ValidatorRegistry {\n\t\tif v.Pubkey == dep.Data.Pubkey {\n\t\t\tvalIndex = beacon.ValidatorIndex(i)\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Check if it is a known validator that is depositing (\"if pubkey not in validator_pubkeys\")\n\tif valIndex == beacon.ValidatorIndexMarker {\n\t\t// only unknown pubkeys need to be verified, others are already trusted\n\t\tif !bls.BlsVerify(\n\t\t\tdep.Data.Pubkey,\n\t\t\tssz.SignedRoot(dep.Data),\n\t\t\tdep.Data.ProofOfPossession,\n\t\t\tbeacon.GetDomain(state.Fork, state.Epoch(), beacon.DOMAIN_DEPOSIT)) {\n\t\t\t// simply don't handle the deposit.\n\t\t\treturn nil\n\t\t}\n\n\t\t// Not a known pubkey, add new validator\n\t\tvalidator := beacon.Validator{\n\t\t\tPubkey: dep.Data.Pubkey,\n\t\t\tWithdrawalCredentials: dep.Data.WithdrawalCredentials,\n\t\t\tActivationEpoch: beacon.FAR_FUTURE_EPOCH,\n\t\t\tExitEpoch: beacon.FAR_FUTURE_EPOCH,\n\t\t\tWithdrawableEpoch: beacon.FAR_FUTURE_EPOCH,\n\t\t\tInitiatedExit: false,\n\t\t\tSlashed: false,\n\t\t\tHighBalance: 0,\n\t\t}\n\t\t// Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled.\n\t\tstate.ValidatorRegistry = append(state.ValidatorRegistry, validator)\n\t\tstate.Balances = append(state.Balances, 0)\n\t\tvalIndex = beacon.ValidatorIndex(len(state.ValidatorRegistry) - 1)\n\t\tstate.SetBalance(valIndex, dep.Data.Amount)\n\t} else {\n\t\t// Increase balance by deposit amount\n\t\tstate.IncreaseBalance(valIndex, dep.Data.Amount)\n\t}\n\treturn nil\n}", "func (_SingleAuto *SingleAutoFilterer) FilterDeposit(opts *bind.FilterOpts, user []common.Address, pid []*big.Int) (*SingleAutoDepositIterator, error) {\n\n\tvar userRule []interface{}\n\tfor _, userItem := range user {\n\t\tuserRule = append(userRule, userItem)\n\t}\n\tvar pidRule []interface{}\n\tfor _, pidItem := range pid {\n\t\tpidRule = append(pidRule, pidItem)\n\t}\n\n\tlogs, sub, err := _SingleAuto.contract.FilterLogs(opts, \"Deposit\", userRule, pidRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &SingleAutoDepositIterator{contract: _SingleAuto.contract, event: \"Deposit\", logs: logs, sub: sub}, nil\n}", "func (_Depositmanager *DepositmanagerSession) DepositFor(_destination common.Address, _amount *big.Int, _tokenType *big.Int, _pubkey []byte) (*types.Transaction, error) {\n\treturn _Depositmanager.Contract.DepositFor(&_Depositmanager.TransactOpts, _destination, _amount, _tokenType, _pubkey)\n}", "func (_Depositmanager *DepositmanagerTransactorSession) DepositFor(_destination common.Address, _amount *big.Int, _tokenType *big.Int, _pubkey []byte) (*types.Transaction, error) {\n\treturn _Depositmanager.Contract.DepositFor(&_Depositmanager.TransactOpts, _destination, _amount, _tokenType, _pubkey)\n}", "func (dcr *ExchangeWallet) DepositAddress() (string, error) {\n\taddr, err := dcr.wallet.ExternalAddress(dcr.ctx, dcr.depositAccount())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn addr.String(), nil\n}", "func deposit(scid string, amount int64) {\n\t\n\twalletURL:= \"http://127.0.0.1:30309/json_rpc\"\n\t\n\tdata:= PayloadDeposit{\n\t\tJsonrpc: \"2.0\", \n\t\tID: \"0\",\n\t\tMethod: \"transfer_split\",\n\t\tParams: Params{\n\t\t\tMixin: 5,\n\t\t\tGetTxKey: true,\n\t\t\tScTx: ScTx{\n\t\t\t\tEntrypoint: \"Deposit\", \n\t\t\t\tScid: scid,\n\t\t\t\tValue: amount,\n\t\t\t}, \n\t\t},\n\t}\n\n\t\n\tpayloadBytes, err := json.Marshal(data)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tbody := bytes.NewReader(payloadBytes)\n\t\n\t_, err=rpcPost(body, walletURL)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\t\n\t//println(result)\t\n\tfmt.Println(\"Deposit sent to wallet!\")\n\t\n}", "func (deposit *Deposit) ValidateDeposit() (map[string]interface{}, bool) {\n\n\tif deposit.Amount <= 0 {\n\t\treturn u.Message(false, \"Amount is required\"), false\n\t}\n\n\tif deposit.FormaPago == \"\" {\n\t\treturn u.Message(false, \"FormaPago is required\"), false\n\t}\n\n\t//check client in DB\n\t_, err := ExistClientIdentificationDB(deposit.Clientidentificationcard)\n\tif err == gorm.ErrRecordNotFound {\n\t\treturn u.Message(false, \"Client exist no in DB\"), false\n\t}\n\n\treturn u.Message(false, \"Requirement passed\"), true\n}", "func (_TTFT20 *TTFT20Caller) IsWithdrawalAddress(opts *bind.CallOpts, _addr common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _TTFT20.contract.Call(opts, out, \"isWithdrawalAddress\", _addr)\n\treturn *ret0, err\n}", "func (as *ApiService) CreateDepositAddress(currency string) (*ApiResponse, error) {\n\treq := NewRequest(http.MethodPost, \"/api/v1/deposit-addresses\", map[string]string{\"currency\": currency})\n\treturn as.Call(req)\n}", "func (_TokenStakingEscrow *TokenStakingEscrowCaller) HasDeposit(opts *bind.CallOpts, operator common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _TokenStakingEscrow.contract.Call(opts, out, \"hasDeposit\", operator)\n\treturn *ret0, err\n}", "func (q queryServer) Deposit(ctx context.Context, req *v1.QueryDepositRequest) (*v1.QueryDepositResponse, error) {\n\tif req == nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"invalid request\")\n\t}\n\n\tif req.ProposalId == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"proposal id can not be 0\")\n\t}\n\n\tif req.Depositor == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"empty depositor address\")\n\t}\n\n\tdepositor, err := q.k.authKeeper.AddressCodec().StringToBytes(req.Depositor)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdeposit, err := q.k.Deposits.Get(ctx, collections.Join(req.ProposalId, sdk.AccAddress(depositor)))\n\tif err != nil {\n\t\treturn nil, status.Error(codes.NotFound, err.Error())\n\t}\n\n\treturn &v1.QueryDepositResponse{Deposit: &deposit}, nil\n}", "func (_Depositmanager *DepositmanagerTransactor) Deposit(opts *bind.TransactOpts, _amount *big.Int, _tokenType *big.Int, _pubkey []byte) (*types.Transaction, error) {\n\treturn _Depositmanager.contract.Transact(opts, \"deposit\", _amount, _tokenType, _pubkey)\n}", "func (_SingleAuto *SingleAutoTransactor) Deposit(opts *bind.TransactOpts, _pid *big.Int, _wantAmt *big.Int) (*types.Transaction, error) {\n\treturn _SingleAuto.contract.Transact(opts, \"deposit\", _pid, _wantAmt)\n}", "func (sc stakingClient) Deposit(fromInfo keys.Info, passWd, coinsStr, memo string, accNum, seqNum uint64) (\n\tresp sdk.TxResponse, err error) {\n\tif err = params.CheckKeyParams(fromInfo, passWd); err != nil {\n\t\treturn\n\t}\n\n\tcoin, err := sdk.ParseDecCoin(coinsStr)\n\tif err != nil {\n\t\treturn resp, fmt.Errorf(\"failed : parse Coins [%s] error: %s\", coinsStr, err)\n\t}\n\n\tmsg := types.NewMsgDeposit(fromInfo.GetAddress(), coin)\n\n\treturn sc.BuildAndBroadcast(fromInfo.GetName(), passWd, memo, []sdk.Msg{msg}, accNum, seqNum)\n}", "func TestGetBTCDeposAddress(t *testing.T) {\n\n\ta := InitApp(\"https://mbank.dl-dev.ru/api/\")\n\n\taddr, err := a.GetBTCDeposAddress(MinterAddress, \"BIP\", \"[email protected]\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif addr == \"\" {\n\t\tt.Errorf(\"Empty address %s\", addr)\n\t}\n\n}", "func (mapper GovMapper) AddDeposit(ctx context.Context, proposalID uint64, depositorAddr btypes.Address, depositAmount uint64) (btypes.Error, bool) {\n\tproposal, ok := mapper.GetProposal(proposalID)\n\tif !ok {\n\t\treturn ErrUnknownProposal(proposalID), false\n\t}\n\n\taccountMapper := ctx.Mapper(account.AccountMapperName).(*account.AccountMapper)\n\taccount := accountMapper.GetAccount(depositorAddr).(*types.QOSAccount)\n\taccount.MustMinusQOS(btypes.NewInt(int64(depositAmount)))\n\taccountMapper.SetAccount(account)\n\n\t// Update proposal\n\tproposal.TotalDeposit = proposal.TotalDeposit + depositAmount\n\tmapper.SetProposal(proposal)\n\n\t// Check if deposit has provided sufficient total funds to transition the proposal into the voting period\n\tactivatedVotingPeriod := false\n\tif proposal.Status == gtypes.StatusDepositPeriod && proposal.TotalDeposit >= mapper.GetParams(ctx).MinDeposit {\n\t\tmapper.activateVotingPeriod(ctx, proposal)\n\t\tactivatedVotingPeriod = true\n\t}\n\n\t// Add or update deposit object\n\tcurrDeposit, found := mapper.GetDeposit(proposalID, depositorAddr)\n\tif !found {\n\t\tnewDeposit := gtypes.Deposit{depositorAddr, proposalID, depositAmount}\n\t\tmapper.setDeposit(proposalID, depositorAddr, newDeposit)\n\t} else {\n\t\tcurrDeposit.Amount = currDeposit.Amount + depositAmount\n\t\tmapper.setDeposit(proposalID, depositorAddr, currDeposit)\n\t}\n\n\treturn nil, activatedVotingPeriod\n}", "func (_Vault *VaultTransactor) Deposit(opts *bind.TransactOpts, incognitoAddress string) (*types.Transaction, error) {\n\treturn _Vault.contract.Transact(opts, \"deposit\", incognitoAddress)\n}", "func (_Depositmanager *DepositmanagerTransactor) DepositFor(opts *bind.TransactOpts, _destination common.Address, _amount *big.Int, _tokenType *big.Int, _pubkey []byte) (*types.Transaction, error) {\n\treturn _Depositmanager.contract.Transact(opts, \"depositFor\", _destination, _amount, _tokenType, _pubkey)\n}", "func (dcr *DCRBackend) CheckAddress(addr string) bool {\n\t_, err := dcrutil.DecodeAddress(addr, chainParams)\n\treturn err == nil\n}", "func (h *Handle) Deposit() {\n\tvar result types.HexNumber\n\tvalue, _ := new(big.Int).SetString(\"20123456789000000000000000000\", 0)\n\taccount := types.Str2Address(\"0x1b978a1d302335a6f2ebe4b8823b5e17c3c84135\")\n\terr := tokenA.Deposit.SendTransaction(&result, account, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlog.Println(result)\n}", "func (as *ApiService) DepositAddresses(currency string) (*ApiResponse, error) {\n\treq := NewRequest(http.MethodGet, \"/api/v1/deposit-addresses\", map[string]string{\"currency\": currency})\n\treturn as.Call(req)\n}", "func (_SingleAuto *SingleAutoTransactorSession) Deposit(_pid *big.Int, _wantAmt *big.Int) (*types.Transaction, error) {\n\treturn _SingleAuto.Contract.Deposit(&_SingleAuto.TransactOpts, _pid, _wantAmt)\n}", "func (a *Client) UserGetDepositAddress(params *UserGetDepositAddressParams) (*UserGetDepositAddressOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewUserGetDepositAddressParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"User.getDepositAddress\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/user/depositAddress\",\n\t\tProducesMediaTypes: []string{\"application/javascript\", \"application/xml\", \"text/javascript\", \"text/xml\"},\n\t\tConsumesMediaTypes: []string{\"application/x-www-form-urlencoded\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &UserGetDepositAddressReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*UserGetDepositAddressOK), nil\n\n}", "func (w *Wallet) FindAddress(adrstr ...string) bool {\n\tfor _, adr := range adrstr {\n\t\t_, pub := w.AddressPublic[adr]\n\t\t_, priv := w.AddressChange[adr]\n\t\tif pub || priv {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (c *ClientWithResponses) GetDepositAddressWithResponse(ctx context.Context, currency CurrencyParam, reqEditors ...RequestEditorFn) (*GetDepositAddressResponse, error) {\n\trsp, err := c.GetDepositAddress(ctx, currency, reqEditors...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseGetDepositAddressResponse(rsp)\n}", "func Deposit(accID string, amount int64) error {\n\tif amount <= 0 {\n\t\treturn fmt.Errorf(\"invalid amount; %d\", amount)\n\t}\n\n\tvar accs []*share.Account\n\terr := client.GetByNames(ctx, share.KindAccount, []string{accID, \"Cash\"}, &accs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"get accounts error; %v\", err)\n\t}\n\taccs[0].Balance += amount\n\taccs[1].Balance -= amount\n\ttrans := []*share.Transaction{\n\t\t{Type: share.TransactionTypeDeposit, AccountID: accID, Amount: amount},\n\t\t{Type: share.TransactionTypeWithdraw, AccountID: \"Cash\", Amount: -amount},\n\t}\n\tfor _, tran := range trans {\n\t\ttran.NewKey(share.KindTransaction)\n\t}\n\terr = client.SaveModels(ctx, \"\", []interface{}{accs[0], accs[1], trans[0], trans[1]})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"save models error; %v\", err)\n\t}\n\treturn nil\n}", "func (c *Client) BalanceForAddress(address string) (int, error) {\n\turl := fmt.Sprintf(\"%s/addr/%s/balance?token=%s\", c.URL, address, c.Token)\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tresp, err := c.HC.Do(req)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tresponseString := string(bodyBytes)\n\tduffs, err := strconv.ParseInt(responseString, 10, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn int(duffs), nil\n}", "func (a *Account) Deposit(amount int64) (newBalance int64, ok bool) {\n\ta.mutex.Lock() // Aquire the lock on the shared resource the balance\n\tdefer a.mutex.Unlock() // Release lock when surrouding function has executed\n\tif !a.open || a.balance+amount < 0 {\n\t\treturn 0, false // If the bank account is not open or even with the deposit the balance is still negativr\n\t}\n\ta.balance += amount\n\treturn a.balance, true\n}", "func (broadcast *Broadcast) ValidatorDeposit(ctx context.Context, username, deposit,\n\tvalidatorPubKey, link, privKeyHex string, seq int64) (*model.BroadcastResponse, error) {\n\tvalPubKey, err := transport.GetPubKeyFromHex(validatorPubKey)\n\tif err != nil {\n\t\treturn nil, errors.FailedToGetPubKeyFromHexf(\"ValidatorDeposit: failed to get Val pub key\").AddCause(err)\n\t}\n\tmsg := model.ValidatorDepositMsg{\n\t\tUsername: username,\n\t\tDeposit: deposit,\n\t\tValPubKey: valPubKey,\n\t\tLink: link,\n\t}\n\treturn broadcast.broadcastTransaction(ctx, msg, privKeyHex, seq, \"\", false)\n}", "func (w *Wallet) Deposit(amount Bitcoin) {\n\tfmt.Printf(\"address of balance in wallet is %v \\n\", &w.balance) // get the pointer\n\tw.balance += amount\n}", "func TestGetBTCDepositStatus(t *testing.T) {\n\n\ta := InitApp(\"https://mbank.dl-dev.ru/api/\")\n\n\tstat, err := a.GetBTCDepositStatus(\"tb1qtfnwald5a667730yqrvdt67aslmgn3k7qykq5a\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif stat == nil {\n\t\tt.Errorf(\"Empty stat\")\n\t}\n\n\tstat, err = a.GetBTCDepositStatus(\"saawdadadw\")\n\tif err.Error() != \"Address not found\" {\n\t\tt.Fatal(err)\n\t\tt.Errorf(\"Cannot found err\")\n\t}\n\n}", "func (_SingleAuto *SingleAutoSession) Deposit(_pid *big.Int, _wantAmt *big.Int) (*types.Transaction, error) {\n\treturn _SingleAuto.Contract.Deposit(&_SingleAuto.TransactOpts, _pid, _wantAmt)\n}", "func (vmctx *VMContext) TransferToAddress(targetAddr address.Address, transfer coretypes.ColoredBalances) bool {\n\tprivileged := vmctx.CurrentContractHname() == accounts.Interface.Hname()\n\tfmt.Printf(\"TransferToAddress: %s privileged = %v\\n\", targetAddr.String(), privileged)\n\tif !privileged {\n\t\t// if caller is accounts, it must debit from account by itself\n\t\tagentID := vmctx.MyAgentID()\n\t\tvmctx.pushCallContext(accounts.Interface.Hname(), nil, nil) // create local context for the state\n\t\tdefer vmctx.popCallContext()\n\n\t\tif !accounts.DebitFromAccount(vmctx.State(), agentID, transfer) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn vmctx.txBuilder.TransferToAddress(targetAddr, transfer) == nil\n}", "func (s *SkyСoinService) CheckBalance(addr string) (*BalanceResponse, error) {\n\taddressesToGetBalance := make([]string, 0, 1)\n\taddressesToGetBalance = append(addressesToGetBalance, addr)\n\tbalanceResult, err := s.checkBalance(s.client, addressesToGetBalance)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &BalanceResponse{\n\t\tAddress: addr,\n\t\tHours: balanceResult.Confirmed.Hours,\n\t\tBalance: balanceResult.Confirmed.Coins,\n\t}, nil\n}", "func (p *TDepositWithdrawServiceClient) AuditDepositWithdraw(ctx context.Context, traceId string, status string, mark string, withdrawId int32) (r bool, err error) {\n var _args5 TDepositWithdrawServiceAuditDepositWithdrawArgs\n _args5.TraceId = traceId\n _args5.Status = status\n _args5.Mark = mark\n _args5.WithdrawId = withdrawId\n var _result6 TDepositWithdrawServiceAuditDepositWithdrawResult\n if err = p.c.Call(ctx, \"auditDepositWithdraw\", &_args5, &_result6); err != nil {\n return\n }\n return _result6.GetSuccess(), nil\n}", "func (*DepositServer) Deposit(ctx context.Context, req *DepositRequest) (*DepositResponse, error) {\n\tif req.GetAmount() < 0 {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"cannot deposit %v\", req.GetAmount())\n\t}\n\n\ttime.Sleep(time.Duration(req.GetAmount()) * time.Millisecond)\n\treturn &DepositResponse{Ok: true}, nil\n}", "func (acc *Account) Deposit(amount int64) (newBalance int64, ok bool) {\n\tacc.Lock()\n\tdefer acc.Unlock()\n\n\t//make sure the account is valid AND that we aren't overdrafting\n\tif acc.invalid || amount+acc.balance < 0 {\n\t\treturn\n\t}\n\n\tok = true\n\tnewBalance = amount + acc.balance\n\tacc.balance = newBalance\n\n\treturn\n}", "func checkBalance(t *testing.T, baseApp *baseapp.BaseApp, addr sdk.AccAddress, balances sdk.Coins, keeper bankkeeper.Keeper) {\n\tt.Helper()\n\tctxCheck := baseApp.NewContext(true)\n\tkeeperBalances := keeper.GetAllBalances(ctxCheck, addr)\n\trequire.True(t, balances.Equal(keeperBalances))\n}", "func (_Dospayment *DospaymentCaller) GuardianFundsAddr(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Dospayment.contract.Call(opts, out, \"guardianFundsAddr\")\n\treturn *ret0, err\n}", "func (w *Wallet) Deposit(amount Bitcoin) {\n\t/*Deposit takes a pointer to Wallet as the input because\n\totherwise, the amount we add to balance will be to a copy of balance\n\tand not to the one that the test is checking against\n\t*/\n\t(*w).balance += amount\n\t/*using (*w) dereferences the pointer so we can read it\n\thowever it is not necessary to write it like this because golang will\n\tdo it implicitly for you\n\t*/\n\n}", "func (p *PChainHelper) CheckBalance(client *avalanchegoclient.Client, address string, amount uint64) error {\n\n\tpBalance, err := client.PChainAPI().GetBalance(address)\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"Failed to retrieve P Chain balance.\")\n\t}\n\tpActualBalance := uint64(pBalance.Balance)\n\tif pActualBalance != amount {\n\t\treturn stacktrace.NewError(\"Found unexpected P Chain Balance for address: %s. Expected: %v, found: %v\",\n\t\t\taddress, amount, pActualBalance)\n\t}\n\n\treturn nil\n}", "func (as *AddressService) VerifyWithdrawal(token string) error {\n\tif err := as.client.Get(buildString(\"address/withdraw/verify/\", token), nil); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func GetDepositAddress(coin string) (address string, error error) {\n\tjsonData, err := doTauRequest(1, \"GET\", \"data/getdepositaddress?coin=\"+coin, nil)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"TauDepositAddress-> %v\", err)\n\t}\n\tvar d struct {\n\t\tCoin string `json:\"coin\"`\n\t\tAddress string `json:\"address\"`\n\t}\n\tif err := json.Unmarshal(jsonData, &d); err != nil {\n\t\treturn \"\", fmt.Errorf(\"TauDepositAddress-> %v\", err)\n\t}\n\treturn d.Address, nil\n}", "func CheckBalance(t *testing.T, app *App, addr sdk.AccAddress, exp sdk.Coins) {\n\tctxCheck := app.BaseApp.NewContext(true, abci.Header{})\n\tres := app.AccountKeeper.GetAccount(ctxCheck, addr)\n\n\trequire.Equal(t, exp, res.GetCoins())\n}", "func (_TTFT20 *TTFT20Session) IsWithdrawalAddress(_addr common.Address) (bool, error) {\n\treturn _TTFT20.Contract.IsWithdrawalAddress(&_TTFT20.CallOpts, _addr)\n}", "func AddressExists(exec boil.Executor, addressID uint16) (bool, error) {\n\tvar exists bool\n\tsql := \"select exists(select 1 from `address` where `address_id`=? limit 1)\"\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, addressID)\n\t}\n\n\trow := exec.QueryRow(sql, addressID)\n\n\terr := row.Scan(&exists)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"sqlboiler: unable to check if address exists\")\n\t}\n\n\treturn exists, nil\n}", "func (k *Kraken) GetDepositAddress(cryptocurrency currency.Code, _ string) (string, error) {\n\tmethods, err := k.GetDepositMethods(cryptocurrency.String())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar method string\n\tfor _, m := range methods {\n\t\tmethod = m.Method\n\t}\n\n\tif method == \"\" {\n\t\treturn \"\", errors.New(\"method not found\")\n\t}\n\n\treturn k.GetCryptoDepositAddress(method, cryptocurrency.String())\n}", "func (acc *Account) Deposit(amount int64) (newBalance int64, ok bool) {\n\tacc.mu.Lock()\n\tdefer acc.mu.Unlock()\n\tif acc.closed {\n\t\treturn 0, false\n\t}\n\tnewBalance = acc.balance + amount\n\tif newBalance < 0 {\n\t\treturn acc.balance, false\n\t}\n\tacc.balance = newBalance\n\treturn acc.balance, true\n}", "func (c *Client) Deposit(ctx context.Context, p *DepositRequestBody) (err error) {\n\t_, err = c.DepositEndpoint(ctx, p)\n\treturn\n}", "func (t *TauAPI) GetDepositAddress(coin string) (address string, error error) {\n\tjsonData, err := t.doTauRequest(&TauReq{\n\t\tVersion: 1,\n\t\tMethod: \"GET\",\n\t\tPath: \"data/getdepositaddress?coin=\" + coin,\n\t\tNeedsAuth: true,\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"TauDepositAddress-> %v\", err)\n\t}\n\tvar d struct {\n\t\tCoin string `json:\"coin\"`\n\t\tAddress string `json:\"address\"`\n\t}\n\tif err := json.Unmarshal(jsonData, &d); err != nil {\n\t\treturn \"\", fmt.Errorf(\"TauDepositAddress-> %v\", err)\n\t}\n\treturn d.Address, nil\n}", "func (dcr *ExchangeWallet) OwnsDepositAddress(address string) (bool, error) {\n\taddr, err := stdaddr.DecodeAddress(address, dcr.chainParams)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn dcr.wallet.AccountOwnsAddress(dcr.ctx, addr, dcr.depositAccount())\n}", "func (a *Account) Deposit(depAmt int) (int, bool) {\n\tcMutex.Lock()\n\tdefer cMutex.Unlock()\n\n\tif a.isOpen && (a.balance+depAmt >= 0) {\n\t\ta.balance += depAmt\n\t\treturn a.balance, true\n\t}\n\n\treturn a.balance, false\n}", "func (a *Account) Deposit(amount int64) (int64, bool) {\n\ta.Lock()\n\tdefer a.Unlock()\n\n\tif a.closed {\n\t\treturn 0, false\n\t}\n\n\tif a.balance+amount < 0 {\n\t\treturn 0, false\n\t}\n\n\ta.balance += amount\n\n\treturn a.balance, true\n}", "func (a *Account) Deposit(amount int64) (int64, bool) {\n\ta.mu.Lock()\n\tdefer a.mu.Unlock()\n\tif !a.open || a.balance+amount < 0 {\n\t\treturn 0, false\n\t}\n\ta.balance += amount\n\treturn a.balance, true\n}", "func (k Keeper) HasDeposit(ctx sdk.Context, id uint64) bool {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.DepositKey))\n\treturn store.Has(GetDepositIDBytes(id))\n}", "func (_Rootchain *RootchainTransactor) Deposit(opts *bind.TransactOpts, _depositTx []byte) (*types.Transaction, error) {\n\treturn _Rootchain.contract.Transact(opts, \"deposit\", _depositTx)\n}", "func checkDeposit(mines map[MineType] int, productType MineType, productNb int) int {\n\tvar toDeliver int\n\n\tif mines[productType] + productNb > siloSize {\n\t\ttoDeliver = siloSize - mines[productType]\n\t} else {\n\t\ttoDeliver = productNb\n\t}\n\n\tmines[productType] += toDeliver\n\n\treturn toDeliver\n}", "func deposit(res http.ResponseWriter, req *http.Request){\n\tvar result Account\n\n\tcollection := client.Database(Database).Collection(Collection)\n\tparams := url_parser(req.URL.String())\n\tfilter := bson.D{{\"identifier\", clean_string(params[\"account\"])}}\n\terr := collection.FindOne(context.TODO(), filter).Decode(&result)\n\t\n\tchange, err := strconv.ParseFloat(clean_string(params[\"deposit\"]), 64)\n\t\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tinitial, err := strconv.ParseFloat(result.Balance, 64)\n\tupdated := strconv.FormatFloat((initial + change), 'f', -1, 64)\n\tresult.Balance = updated\n\tif err != nil{\n\t\tfmt.Println(err)\n\t}\n\tentry, err := bson.Marshal(result)\n\t_ , err = collection.ReplaceOne(context.TODO(), filter, entry)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tgenerate_record(clean_string(params[\"account\"]), updated, \"+\"+clean_string(params[\"deposit\"]), \"deposit\")\n}", "func (a *Account) Deposit(amount int64) (newBalance int64, ok bool) {\n\t// default assumption is that the deposit failed or account is closed\n\tok = false\n\n\ta.mutex.Lock()\n\tdefer a.mutex.Unlock()\n\n\tif a.open {\n\t\tpossibleNewBalance := a.balance + amount\n\n\t\t// If amount is negative, action is permitted only if withdrawl\n\t\t// amount is less than original balance.\n\t\tif possibleNewBalance >= 0 {\n\t\t\tok = true\n\t\t\ta.balance = possibleNewBalance\n\t\t}\n\t}\n\n\treturn a.balance, ok\n}", "func (_PlasmaFramework *PlasmaFrameworkCallerSession) IsDeposit(blockNum *big.Int) (bool, error) {\n\treturn _PlasmaFramework.Contract.IsDeposit(&_PlasmaFramework.CallOpts, blockNum)\n}", "func (_PlasmaFramework *PlasmaFrameworkSession) IsDeposit(blockNum *big.Int) (bool, error) {\n\treturn _PlasmaFramework.Contract.IsDeposit(&_PlasmaFramework.CallOpts, blockNum)\n}", "func checkAccount(t *testing.T, tree *avl.Tree, id AccountID, expectedBalance, expectedReward, expectedStake *uint64) {\n\tvar balance, reward, stake uint64\n\tvar exist bool\n\n\tbalance, exist = ReadAccountBalance(tree, id)\n\tassert.Equal(t, expectedBalance != nil, exist, \"account ID: %x\", id)\n\treward, exist = ReadAccountReward(tree, id)\n\tassert.Equal(t, expectedReward != nil, exist, \"account ID: %x\", id)\n\tstake, exist = ReadAccountStake(tree, id)\n\tassert.Equal(t, expectedStake != nil, exist, \"account ID: %x\", id)\n\n\tif expectedBalance != nil {\n\t\tassert.Equal(t, balance, *expectedBalance, \"account ID: %x\", id)\n\t}\n\n\tif expectedReward != nil {\n\t\tassert.Equal(t, reward, *expectedReward, \"account ID: %x\", id)\n\t}\n\n\tif expectedStake != nil {\n\t\tassert.Equal(t, stake, *expectedStake, \"account ID: %x\", id)\n\t}\n}", "func Deposit(card *types.Card, amount types.Money) {\n\tif amount <= 0{\n\t\treturn\n\t}\n\t\n\tif !(*card).Active{\n\t\treturn\n\t}\n\n\tif amount > depositLimit {\n\t\treturn\n\t}\n\n\t(*card).Balance += amount\n}", "func (_Vault *VaultTransactorSession) Deposit(incognitoAddress string) (*types.Transaction, error) {\n\treturn _Vault.Contract.Deposit(&_Vault.TransactOpts, incognitoAddress)\n}", "func (_TTFT20 *TTFT20CallerSession) IsWithdrawalAddress(_addr common.Address) (bool, error) {\n\treturn _TTFT20.Contract.IsWithdrawalAddress(&_TTFT20.CallOpts, _addr)\n}", "func (_Wmatic *WmaticFilterer) FilterDeposit(opts *bind.FilterOpts, dst []common.Address) (*WmaticDepositIterator, error) {\n\n\tvar dstRule []interface{}\n\tfor _, dstItem := range dst {\n\t\tdstRule = append(dstRule, dstItem)\n\t}\n\n\tlogs, sub, err := _Wmatic.contract.FilterLogs(opts, \"Deposit\", dstRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &WmaticDepositIterator{contract: _Wmatic.contract, event: \"Deposit\", logs: logs, sub: sub}, nil\n}", "func (_Vault *VaultSession) Deposit(incognitoAddress string) (*types.Transaction, error) {\n\treturn _Vault.Contract.Deposit(&_Vault.TransactOpts, incognitoAddress)\n}", "func (_Cakevault *CakevaultTransactor) Deposit(opts *bind.TransactOpts, _amount *big.Int) (*types.Transaction, error) {\n\treturn _Cakevault.contract.Transact(opts, \"deposit\", _amount)\n}", "func (a *Act) Deposit(amt int64) (int64, bool) {\n\tbalance, ok := <-a.balance - amt\n\tif <-a.closed || balance < 0 {\n\t\treturn 0, false\n\t}\n\ta.balance <- balance\n\treturn <-a.balance, true\n}", "func (_WELV9 *WELV9Filterer) FilterDeposit(opts *bind.FilterOpts, dst []common.Address) (*WELV9DepositIterator, error) {\n\n\tvar dstRule []interface{}\n\tfor _, dstItem := range dst {\n\t\tdstRule = append(dstRule, dstItem)\n\t}\n\n\tlogs, sub, err := _WELV9.contract.FilterLogs(opts, \"Deposit\", dstRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &WELV9DepositIterator{contract: _WELV9.contract, event: \"Deposit\", logs: logs, sub: sub}, nil\n}", "func (a *Account) Deposit(amount int64) (newBalance int64, ok bool) {\n\ta.mutex.Lock()\n\tdefer a.mutex.Unlock()\n\n\t// Check if the account is closed, or if the \"deposit\" is negative and\n\t// would overdraw the account.\n\tif !a.open || (a.balance+amount) < 0 {\n\t\treturn 0, false\n\t}\n\n\ta.balance += amount\n\n\treturn a.balance, true\n}", "func (_Smartchef *SmartchefTransactor) Deposit(opts *bind.TransactOpts, _amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.contract.Transact(opts, \"deposit\", _amount)\n}", "func (_IWETH *IWETHTransactor) Deposit(opts *bind.TransactOpts) (*types.Transaction, error) {\r\n\treturn _IWETH.contract.Transact(opts, \"deposit\")\r\n}", "func (_Depositmanager *DepositmanagerTransactorSession) Deposit(_amount *big.Int, _tokenType *big.Int, _pubkey []byte) (*types.Transaction, error) {\n\treturn _Depositmanager.Contract.Deposit(&_Depositmanager.TransactOpts, _amount, _tokenType, _pubkey)\n}", "func isStakeAddressClean(ctx *action.Context, v *identity.Validator) (bool, error) {\n\toptions, err := ctx.GovernanceStore.GetStakingOptions()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tzero := balance.NewAmountFromInt(0)\n\n\t// check locked amount\n\tlockedAmt, err := ctx.Delegators.GetValidatorDelegationAmount(v.Address, v.StakeAddress)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !lockedAmt.Equals(*zero) {\n\t\treturn false, nil\n\t}\n\n\t// check pending amount\n\tpendingAmounts := ctx.Delegators.GetMaturedPendingAmount(v.StakeAddress, ctx.Header.Height, options.MaturityTime+1)\n\tif len(pendingAmounts) != 0 {\n\t\treturn false, nil\n\t}\n\n\t// check bounded amount\n\tboundCoin, err := ctx.Delegators.GetDelegatorBoundedAmount(v.StakeAddress)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !boundCoin.Equals(*zero) {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}", "func (_SimpleMultiSig *SimpleMultiSigFilterer) FilterDeposit(opts *bind.FilterOpts, _from []common.Address) (*SimpleMultiSigDepositIterator, error) {\n\n\tvar _fromRule []interface{}\n\tfor _, _fromItem := range _from {\n\t\t_fromRule = append(_fromRule, _fromItem)\n\t}\n\n\tlogs, sub, err := _SimpleMultiSig.contract.FilterLogs(opts, \"Deposit\", _fromRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &SimpleMultiSigDepositIterator{contract: _SimpleMultiSig.contract, event: \"Deposit\", logs: logs, sub: sub}, nil\n}", "func TestGetMinterDeposAddress(t *testing.T) {\n\n\ta := InitApp(\"https://mbank.dl-dev.ru/api/\")\n\n\taddr, err := a.GetMinterDeposAddress(BitcoinAddress, \"BIP\", 0.1)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif addr == nil {\n\t\tt.Errorf(\"Empty responce\")\n\t}\n\n\tif addr.Data.Tag == \"\" || addr.Data.Address == \"\" {\n\t\tt.Errorf(\"Empty tag or address: %s and %s \", addr.Data.Tag, addr.Data.Address)\n\t}\n\n}", "func (_WELV9 *WELV9Transactor) Deposit(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _WELV9.contract.Transact(opts, \"deposit\")\n}", "func (_Depositmanager *DepositmanagerSession) Deposit(_amount *big.Int, _tokenType *big.Int, _pubkey []byte) (*types.Transaction, error) {\n\treturn _Depositmanager.Contract.Deposit(&_Depositmanager.TransactOpts, _amount, _tokenType, _pubkey)\n}", "func (_Coordinatorproxy *CoordinatorproxyCaller) DepositManager(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Coordinatorproxy.contract.Call(opts, out, \"depositManager\")\n\treturn *ret0, err\n}", "func (c *CChainHelper) CheckBalance(client *avalanchegoclient.Client, address string, assetID string, expectedAmount uint64) error {\n\tpanic(\"TODO\")\n}", "func (ctx *DefaultContext) exist(address []byte) bool {\n\tacc := ctx.accounts[string(address)]\n\tif acc != nil {\n\t\treturn true\n\t}\n\treturn ctx.queryEngine.AccountExist(bytesToCommonAddress(address))\n}", "func Deposit(interactor account.Interactor) fiber.Handler {\n\n\treturn func(ctx *fiber.Ctx) error {\n\t\tvar userDetails = ctx.Locals(\"userDetails\").(map[string]string)\n\t\tuserId := userDetails[\"userId\"]\n\n\t\tvar p param\n\t\t_ = ctx.BodyParser(&p)\n\n\t\tbalance, err := interactor.Deposit(uuid.FromStringOrNil(userId), p.Amount)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn ctx.JSON(map[string]interface{}{\n\t\t\t\"message\": fmt.Sprintf(\"Amount successfully deposited. New balance %v\", balance),\n\t\t\t\"balance\": balance,\n\t\t\t\"userId\": userId,\n\t\t})\n\t}\n}", "func Deposit(card *types.Card, amount types.Money) {\n\tif !card.Active {\n\t\treturn\n\t}\n\n\tif amount < 0 {\n\t\treturn\n\t}\n\n\tif amount > 50_000_00 {\n\t\treturn\n\t}\n\n\tcard.Balance += amount\n\n\treturn\n}", "func (tt *UniqueSale) FindByAddress(addr string) (transfers []UniqueSale, err error) {\n\tquery := `select id,tokenid,lookup_id,operation, blocknumber,index,txhash,buyer,seller,price,hash,timestamp from unique_sale where lookup_id = $1 and tokenid=$2 and (buyer=$3 or seller=$3)`\n\tctx, cancel := context.WithTimeout(context.Background(), timeout())\n\tdefer cancel()\n\n\trows, err := db.QueryContext(ctx, query, tt.LookupID, tt.TokenID, addr)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn tt.getSales(rows)\n}", "func (_SingleAuto *SingleAutoFilterer) WatchDeposit(opts *bind.WatchOpts, sink chan<- *SingleAutoDeposit, user []common.Address, pid []*big.Int) (event.Subscription, error) {\n\n\tvar userRule []interface{}\n\tfor _, userItem := range user {\n\t\tuserRule = append(userRule, userItem)\n\t}\n\tvar pidRule []interface{}\n\tfor _, pidItem := range pid {\n\t\tpidRule = append(pidRule, pidItem)\n\t}\n\n\tlogs, sub, err := _SingleAuto.contract.WatchLogs(opts, \"Deposit\", userRule, pidRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(SingleAutoDeposit)\n\t\t\t\tif err := _SingleAuto.contract.UnpackLog(event, \"Deposit\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (_EtherDelta *EtherDeltaTransactor) Deposit(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _EtherDelta.contract.Transact(opts, \"deposit\")\n}", "func (w *Wallet) Deposit(amount Bitcoin) {\n\t// fmt.Printf(\"address of balance in Deposit is %v \\n\", &w.balance)\n\tw.balance += amount\n}", "func (s *server) Deposit(ctx context.Context, request *event.DepositParam) (*event.Response, error) {\n\treturn &event.Response{Status: int32(200), Message: string(\"Deposit\"), Data: []*event.Deposit{}}, nil\n}", "func (sc Funcs) Deposit(ctx wasmlib.ScFuncClientContext) *DepositCall {\n\treturn &DepositCall{Func: wasmlib.NewScFunc(ctx, HScName, HFuncDeposit)}\n}", "func checkContract(t *testing.T, tree *avl.Tree, id TransactionID, codeFilePath string, expectedGasBalance uint64, expectedEmptyMemPages []int, expectedNotEmptyMemPages []int) {\n\tcode, exist := ReadAccountContractCode(tree, id)\n\tassert.True(t, exist, \"contract ID: %x\", id)\n\tassert.NotEmpty(t, code, \"contract ID: %x\", id)\n\n\tgasBalance, exist := ReadAccountContractGasBalance(tree, id)\n\tassert.True(t, exist, \"contract ID: %x\", id)\n\tassert.Equal(t, expectedGasBalance, gasBalance, \"contract ID: %x\", id)\n\n\texpectedCode, err := ioutil.ReadFile(codeFilePath)\n\tassert.NoError(t, err)\n\tassert.EqualValues(t, expectedCode, code, \"contract ID: %x, filepath: %s\", id, codeFilePath)\n\n\tnumPages, exist := ReadAccountContractNumPages(tree, id)\n\tassert.True(t, exist, \"contract ID: %x\", id)\n\tassert.EqualValues(t, expectedPageNum, numPages, \"contract ID: %x\", id)\n\n\tfor _, v := range expectedEmptyMemPages {\n\t\tpage, exist := ReadAccountContractPage(tree, id, uint64(v))\n\t\tassert.False(t, exist)\n\t\tassert.Empty(t, page)\n\t}\n\n\tfor _, v := range expectedNotEmptyMemPages {\n\t\tpage, exist := ReadAccountContractPage(tree, id, uint64(v))\n\t\tassert.True(t, exist)\n\t\tassert.NotEmpty(t, page)\n\t\tassert.Len(t, page, PageSize)\n\t}\n}", "func (_TokensNetwork *TokensNetworkTransactor) Deposit(opts *bind.TransactOpts, token common.Address, participant common.Address, partner common.Address, amount *big.Int, settle_timeout uint64) (*types.Transaction, error) {\n\treturn _TokensNetwork.contract.Transact(opts, \"deposit\", token, participant, partner, amount, settle_timeout)\n}" ]
[ "0.5865928", "0.5765526", "0.5755803", "0.5730663", "0.57218504", "0.57081556", "0.57029444", "0.56777817", "0.5657796", "0.56503814", "0.55861264", "0.5580167", "0.5546772", "0.551311", "0.55073225", "0.54944414", "0.54910946", "0.54830885", "0.5471482", "0.54598916", "0.54572177", "0.54562366", "0.54514265", "0.54249805", "0.5408172", "0.53955036", "0.53890747", "0.538719", "0.5346112", "0.53286916", "0.5323975", "0.5319566", "0.53176117", "0.5313887", "0.53132087", "0.53117496", "0.53117454", "0.5311577", "0.53014535", "0.5299257", "0.52899647", "0.5277609", "0.5269228", "0.5267382", "0.5258762", "0.52497333", "0.52491933", "0.52470636", "0.5240346", "0.5240263", "0.5239636", "0.5238357", "0.52355766", "0.5219752", "0.5209091", "0.52083683", "0.5203757", "0.52027625", "0.5200503", "0.5197249", "0.5196017", "0.519593", "0.51793814", "0.5176929", "0.5172376", "0.5166111", "0.5161764", "0.5158309", "0.51582134", "0.5154724", "0.5150216", "0.51500154", "0.51488173", "0.514809", "0.51472384", "0.5142247", "0.5140507", "0.5138314", "0.5131498", "0.5128413", "0.5127532", "0.51261485", "0.51238066", "0.51198417", "0.5103781", "0.51034653", "0.51033455", "0.5091458", "0.5087914", "0.508595", "0.508513", "0.5072521", "0.5070458", "0.50701404", "0.5057685", "0.50553346", "0.50500304", "0.50486004", "0.5045503", "0.504471" ]
0.8275546
0
Deprecated: Use Type.Descriptor instead.
func (Type) EnumDescriptor() ([]byte, []int) { return file_price_price_proto_rawDescGZIP(), []int{0} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetTypeDescriptor(typeName string) (protoreflect.Descriptor, error) {\n\treturn dm.GetTypeDescriptor(typeName)\n}", "func (*TypeInformation) Descriptor() ([]byte, []int) {\n\treturn file_authzed_api_v0_namespace_proto_rawDescGZIP(), []int{3}\n}", "func (*Type) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1}\n}", "func (*WithWellKnownTypes) Descriptor() ([]byte, []int) {\n\treturn file_testing_proto_rawDescGZIP(), []int{1}\n}", "func (*Type_ListType) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 0}\n}", "func Type_Size__t_() TypeDescriptor {\n return type_Size__t_{}\n}", "func (*EntityType) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1beta1_entity_type_proto_rawDescGZIP(), []int{0}\n}", "func (*Pokemon_Type_Type) Descriptor() ([]byte, []int) {\n\treturn file_proto_pokemon_proto_rawDescGZIP(), []int{1, 0, 0}\n}", "func (Schema_Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_cloud_dataplex_v1_metadata_proto_rawDescGZIP(), []int{13, 0}\n}", "func (*DeviceType) Descriptor() ([]byte, []int) {\n\treturn file_register_registerpb_register_proto_rawDescGZIP(), []int{3}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*TypeDefinitionRegistrationOptions) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{132}\n}", "func (*KnownTypes) Descriptor() ([]byte, []int) {\n\treturn file_jsonpb_proto_test2_proto_rawDescGZIP(), []int{8}\n}", "func (*TypeCount) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{13}\n}", "func (*GetPersonTypeRequest) Descriptor() ([]byte, []int) {\n\treturn file_testsvc_test_v1_enums_proto_rawDescGZIP(), []int{1}\n}", "func (*SystemTypeExtension) Descriptor() ([]byte, []int) {\n\treturn file_ml_metadata_proto_metadata_store_proto_rawDescGZIP(), []int{0}\n}", "func (*AnalysisMessageWeakSchema_ArgType) Descriptor() ([]byte, []int) {\n\treturn file_analysis_v1alpha1_message_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*AllFieldTypes) Descriptor() ([]byte, []int) {\n\treturn file_examples_documents_example_proto_rawDescGZIP(), []int{1}\n}", "func (Entity_Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_cloud_dataplex_v1_metadata_proto_rawDescGZIP(), []int{11, 0}\n}", "func (*ComplexType) Descriptor() ([]byte, []int) {\n\treturn file_testing_proto_rawDescGZIP(), []int{4}\n}", "func (*TypeDefinitionOptions) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{131}\n}", "func (*FindingTypeStats) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_websecurityscanner_v1alpha_finding_type_stats_proto_rawDescGZIP(), []int{0}\n}", "func (*TypeDefinitionRequest) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{133}\n}", "func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 1}\n}", "func (*Type_FunctionType) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 2}\n}", "func (Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_dynamic_sampling_proto_rawDescGZIP(), []int{0}\n}", "func (Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_mtpc_proto_rawDescGZIP(), []int{1}\n}", "func (*UrlType) Descriptor() ([]byte, []int) {\n\treturn file_ur_proto_rawDescGZIP(), []int{0}\n}", "func (*KindTag) Descriptor() ([]byte, []int) {\n\treturn file_kindTagService_proto_rawDescGZIP(), []int{1}\n}", "func (StandardArtifacts_Types) EnumDescriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{40, 0}\n}", "func (FieldInfo_Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_TSPArchiveMessages_proto_rawDescGZIP(), []int{2, 0}\n}", "func (*Decl) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2}\n}", "func (Service_Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_mdb_clickhouse_v1_cluster_proto_rawDescGZIP(), []int{7, 0}\n}", "func (*NestedType) Descriptor() ([]byte, []int) {\n\treturn file_testing_proto_rawDescGZIP(), []int{3}\n}", "func (*InnerType) Descriptor() ([]byte, []int) {\n\treturn file_testing_proto_rawDescGZIP(), []int{2}\n}", "func (Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_github_com_getamis_alice_crypto_tss_addshare_message_proto_rawDescGZIP(), []int{0}\n}", "func NewDescriptor() *Descriptor {\n\treturn &Descriptor{}\n}", "func (*ArtifactType) Descriptor() ([]byte, []int) {\n\treturn file_ml_metadata_proto_metadata_store_proto_rawDescGZIP(), []int{3}\n}", "func (InstanceConfig_Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*MySQLType) Descriptor() ([]byte, []int) {\n\treturn file_mySQLOptions_proto_rawDescGZIP(), []int{0}\n}", "func (Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{0}\n}", "func TestDistSQLTypeResolver_GetTypeDescriptor_WrongType(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\n\tctx := context.Background()\n\ttc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{})\n\tdefer tc.Stopper().Stop(ctx)\n\n\ts := tc.Server(0)\n\n\ttdb := sqlutils.MakeSQLRunner(tc.ServerConn(0))\n\ttdb.Exec(t, `CREATE TABLE t()`)\n\tvar id descpb.ID\n\ttdb.QueryRow(t, \"SELECT $1::regclass::int\", \"t\").Scan(&id)\n\n\terr := descs.Txn(\n\t\tctx,\n\t\ts.ClusterSettings(),\n\t\ts.LeaseManager().(*lease.Manager),\n\t\ts.InternalExecutor().(sqlutil.InternalExecutor),\n\t\ts.DB(),\n\t\tfunc(ctx context.Context, txn *kv.Txn, descriptors *descs.Collection) error {\n\t\t\ttr := descs.NewDistSQLTypeResolver(descriptors, txn)\n\t\t\t_, _, err := tr.GetTypeDescriptor(ctx, id)\n\t\t\treturn err\n\t\t})\n\trequire.Regexp(t, `descriptor \\d+ is a relation not a type`, err)\n\trequire.Equal(t, pgcode.WrongObjectType, pgerror.GetPGCode(err))\n}", "func isDescriptor(T reflect.Type) bool {\n\treturn T.AssignableTo(descriptorType) && descriptorType.AssignableTo(T)\n}", "func (SubscriberQuotaUpdate_Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_pipelined_proto_rawDescGZIP(), []int{14, 0}\n}", "func (x *fastReflection_Metadata) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_Metadata\n}", "func (StandardPTransforms_DeprecatedPrimitives) EnumDescriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{4, 1}\n}", "func (d *Descriptor) Kind() Kind {\n\treturn d.kind\n}", "func (Host_Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_mdb_clickhouse_v1_cluster_proto_rawDescGZIP(), []int{6, 0}\n}", "func (*NoneArtifactStructType) Descriptor() ([]byte, []int) {\n\treturn file_ml_metadata_proto_metadata_store_proto_rawDescGZIP(), []int{17}\n}", "func (Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_protos_sortType_proto_rawDescGZIP(), []int{0}\n}", "func (*Instant) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_stu3_datatypes_proto_rawDescGZIP(), []int{7}\n}", "func (x *fastReflection_ModuleOptions) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_ModuleOptions\n}", "func (PatientProviderType) EnumDescriptor() ([]byte, []int) {\n\treturn file_heyrenee_v1_messages_patient_provider_proto_rawDescGZIP(), []int{1}\n}", "func (ControllerPlanningType) EnumDescriptor() ([]byte, []int) {\n\treturn file_alameda_api_v1alpha1_datahub_plannings_types_proto_rawDescGZIP(), []int{1}\n}", "func (MetadataUpdateType) EnumDescriptor() ([]byte, []int) {\n\treturn file_inotify_proto_rawDescGZIP(), []int{2}\n}", "func (Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_crypto_proto_rawDescGZIP(), []int{0}\n}", "func (MetricsType) EnumDescriptor() ([]byte, []int) {\n\treturn file_proto_enums_provider_proto_rawDescGZIP(), []int{1}\n}", "func (Signature_Scheme) EnumDescriptor() ([]byte, []int) {\n\treturn file_spacemesh_v1_types_proto_rawDescGZIP(), []int{10, 0}\n}", "func (*Signature) Descriptor() ([]byte, []int) {\n\treturn file_spacemesh_v1_types_proto_rawDescGZIP(), []int{10}\n}", "func (*ControllerPlanningSpec) Descriptor() ([]byte, []int) {\n\treturn file_alameda_api_v1alpha1_datahub_plannings_types_proto_rawDescGZIP(), []int{0}\n}", "func (Span_Link_Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{0, 3, 0}\n}", "func (PlanningType) EnumDescriptor() ([]byte, []int) {\n\treturn file_alameda_api_v1alpha1_datahub_plannings_types_proto_rawDescGZIP(), []int{0}\n}", "func (*Type_AbstractType) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 3}\n}", "func (ProbeConf_Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_github_com_cloudprober_cloudprober_probes_udplistener_proto_config_proto_rawDescGZIP(), []int{0, 0}\n}", "func (*ArtifactStructType) Descriptor() ([]byte, []int) {\n\treturn file_ml_metadata_proto_metadata_store_proto_rawDescGZIP(), []int{13}\n}", "func (*AllSupportedTypes) Descriptor() ([]byte, []int) {\n\treturn file_testing_proto_rawDescGZIP(), []int{0}\n}", "func (Event_Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_ml_metadata_proto_metadata_store_proto_rawDescGZIP(), []int{4, 0}\n}", "func (*PropertyTypes) Descriptor() ([]byte, []int) {\n\treturn file_v1_property_values_proto_rawDescGZIP(), []int{0}\n}", "func (*GetPersonTypeResponse) Descriptor() ([]byte, []int) {\n\treturn file_testsvc_test_v1_enums_proto_rawDescGZIP(), []int{2}\n}", "func (Diagnostic_Kind) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_api_servicemanagement_v1_resources_proto_rawDescGZIP(), []int{2, 0}\n}", "func (*Type_MapType) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 1}\n}", "func (DeviceType) EnumDescriptor() ([]byte, []int) {\n\treturn file_machinery_proto_rawDescGZIP(), []int{0}\n}", "func (*GetModelTypeRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{95}\n}", "func (*GetAvailableGPUTypesRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{12}\n}", "func (*TimeOnly) Descriptor() ([]byte, []int) {\n\treturn file_types_types_proto_rawDescGZIP(), []int{4}\n}", "func (NotifyType) EnumDescriptor() ([]byte, []int) {\n\treturn file_Notify_proto_rawDescGZIP(), []int{0}\n}", "func (*KindTagWhere) Descriptor() ([]byte, []int) {\n\treturn file_kindTagService_proto_rawDescGZIP(), []int{0}\n}", "func (o MetricDescriptorOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v MetricDescriptor) *string { return v.Type }).(pulumi.StringPtrOutput)\n}", "func (Scheme_Registered) EnumDescriptor() ([]byte, []int) {\n\treturn file_http_types_proto_rawDescGZIP(), []int{1, 0}\n}", "func (TopicType) EnumDescriptor() ([]byte, []int) {\n\treturn file_toit_model_pubsub_topic_proto_rawDescGZIP(), []int{0}\n}", "func (SensorType) EnumDescriptor() ([]byte, []int) {\n\treturn file_fk_atlas_proto_rawDescGZIP(), []int{0}\n}", "func (*ListArtifactStructType) Descriptor() ([]byte, []int) {\n\treturn file_ml_metadata_proto_metadata_store_proto_rawDescGZIP(), []int{16}\n}", "func (*ListModelTypesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{96}\n}", "func (*SortType) Descriptor() ([]byte, []int) {\n\treturn file_protos_sortType_proto_rawDescGZIP(), []int{0}\n}", "func (IdpType) EnumDescriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{37}\n}", "func (Scheme_Registered) EnumDescriptor() ([]byte, []int) {\n\treturn file_viz_proto_rawDescGZIP(), []int{11, 0}\n}", "func (Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_model_music_proto_rawDescGZIP(), []int{0}\n}", "func (PermissionType) EnumDescriptor() ([]byte, []int) {\n\treturn file_pkg_common_permissions_proto_rawDescGZIP(), []int{1}\n}", "func (*NodeType) Descriptor() ([]byte, []int) {\n\treturn file_e2sm_mho_go_v2_e2sm_v2_proto_rawDescGZIP(), []int{14}\n}", "func (*AnyArtifactStructType) Descriptor() ([]byte, []int) {\n\treturn file_ml_metadata_proto_metadata_store_proto_rawDescGZIP(), []int{18}\n}", "func (*TypedValue) Descriptor() ([]byte, []int) {\n\treturn file_proto_gnmi_gnmi_proto_rawDescGZIP(), []int{2}\n}", "func (*TypeDefinitionResponse) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{26}\n}", "func (*Fqdn) Descriptor() ([]byte, []int) {\n\treturn file_wgtwo_common_v0_types_proto_rawDescGZIP(), []int{5}\n}", "func (ms MetricDescriptor) Type() MetricType {\n\treturn MetricType((*ms.orig).Type)\n}", "func (SecondFactorType) EnumDescriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{40}\n}", "func (*AllType) Descriptor() ([]byte, []int) {\n\treturn file_all_proto_rawDescGZIP(), []int{0}\n}", "func (*Pokemon_Type) Descriptor() ([]byte, []int) {\n\treturn file_proto_pokemon_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*Scheme) Descriptor() ([]byte, []int) {\n\treturn file_http_types_proto_rawDescGZIP(), []int{1}\n}", "func (*RicStyleType) Descriptor() ([]byte, []int) {\n\treturn file_e2sm_mho_go_v2_e2sm_v2_proto_rawDescGZIP(), []int{21}\n}", "func (*TupleArtifactStructType) Descriptor() ([]byte, []int) {\n\treturn file_ml_metadata_proto_metadata_store_proto_rawDescGZIP(), []int{19}\n}" ]
[ "0.6832599", "0.6623278", "0.6592824", "0.61390984", "0.6112982", "0.6104237", "0.60554916", "0.6050091", "0.6049072", "0.6022466", "0.60134995", "0.60118586", "0.59987587", "0.59904647", "0.59875125", "0.59790707", "0.5977629", "0.59523624", "0.5936686", "0.5931284", "0.59295624", "0.59271145", "0.59267867", "0.59189934", "0.5918581", "0.5903734", "0.5897914", "0.5888334", "0.58770335", "0.5857432", "0.58410794", "0.58365077", "0.58361524", "0.5833148", "0.5832459", "0.5831446", "0.58286893", "0.58276576", "0.5821912", "0.58185494", "0.58149934", "0.58149266", "0.5810858", "0.5802826", "0.5800594", "0.58005697", "0.5798254", "0.57975096", "0.5796316", "0.5791436", "0.5788524", "0.57820237", "0.5779558", "0.57778", "0.57703614", "0.5767842", "0.57670605", "0.57639945", "0.5762774", "0.57607245", "0.5754533", "0.5754305", "0.57529134", "0.5744824", "0.57404864", "0.573522", "0.5721722", "0.5712892", "0.5711554", "0.5703138", "0.57022893", "0.5702187", "0.57010204", "0.56991345", "0.56989974", "0.5698224", "0.569535", "0.56941533", "0.5688482", "0.5686076", "0.5685382", "0.56851053", "0.56787694", "0.56773156", "0.5672099", "0.56707925", "0.56686103", "0.5667422", "0.56601954", "0.5654826", "0.5654634", "0.5653438", "0.5652131", "0.5645114", "0.5642945", "0.5641455", "0.56412715", "0.5638508", "0.5630084", "0.56234574" ]
0.5776995
54
Deprecated: Use Price.ProtoReflect.Descriptor instead.
func (*Price) Descriptor() ([]byte, []int) { return file_price_price_proto_rawDescGZIP(), []int{0} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*Price) Descriptor() ([]byte, []int) {\n\treturn file_proto_supply_proto_rawDescGZIP(), []int{0}\n}", "func (*PriceInfo) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_retail_v2_common_proto_rawDescGZIP(), []int{6}\n}", "func (*PricingPodPriceRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{8}\n}", "func (*PricingNodePriceRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{6}\n}", "func (*PricingPodPriceResponse) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{9}\n}", "func (*ModifyPriceLevel) Descriptor() ([]byte, []int) {\n\treturn file_openfeed_proto_rawDescGZIP(), []int{19}\n}", "func (*PricingNodePriceResponse) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{7}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*OrderPricePair) Descriptor() ([]byte, []int) {\n\treturn file_witness_proto_rawDescGZIP(), []int{10}\n}", "func (*GetDownLinkPriceRequest) Descriptor() ([]byte, []int) {\n\treturn file_wallet_proto_rawDescGZIP(), []int{25}\n}", "func (*ProductPriceRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_Ultimate_Super_WebDev_Corp_gateway_services_widget_widget_proto_rawDescGZIP(), []int{1}\n}", "func (*GasPriceRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{21}\n}", "func (*Money) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_stu3_datatypes_proto_rawDescGZIP(), []int{32}\n}", "func (*PlanChange_Removed) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_plan_change_proto_rawDescGZIP(), []int{0, 3}\n}", "func (*AddPriceLevel) Descriptor() ([]byte, []int) {\n\treturn file_openfeed_proto_rawDescGZIP(), []int{17}\n}", "func (*PlanChange_Modified) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_plan_change_proto_rawDescGZIP(), []int{0, 1}\n}", "func (*DeletePriceLevel) Descriptor() ([]byte, []int) {\n\treturn file_openfeed_proto_rawDescGZIP(), []int{18}\n}", "func (*GetDownLinkPriceResponse) Descriptor() ([]byte, []int) {\n\treturn file_wallet_proto_rawDescGZIP(), []int{26}\n}", "func (Type) EnumDescriptor() ([]byte, []int) {\n\treturn file_price_price_proto_rawDescGZIP(), []int{0}\n}", "func (*Instance) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{28}\n}", "func (*KindSellTime) Descriptor() ([]byte, []int) {\n\treturn file_kindSellTimeService_proto_rawDescGZIP(), []int{0}\n}", "func (*PlanChange) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_plan_change_proto_rawDescGZIP(), []int{0}\n}", "func (*PriceInfo_PriceRange) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_retail_v2_common_proto_rawDescGZIP(), []int{6, 0}\n}", "func ProtoFromMethodDescriptor(d protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto {\n\ttype canProto interface {\n\t\tMethodDescriptorProto() *descriptorpb.MethodDescriptorProto\n\t}\n\tif res, ok := d.(canProto); ok {\n\t\treturn res.MethodDescriptorProto()\n\t}\n\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\tif md, ok := res.AsProto().(*descriptorpb.MethodDescriptorProto); ok {\n\t\t\treturn md\n\t\t}\n\t}\n\treturn protodesc.ToMethodDescriptorProto(d)\n}", "func (*VolumeAtPrice) Descriptor() ([]byte, []int) {\n\treturn file_openfeed_proto_rawDescGZIP(), []int{54}\n}", "func (*Performance) Descriptor() ([]byte, []int) {\n\treturn file_commissionService_proto_rawDescGZIP(), []int{2}\n}", "func (*SymbolMarginRate) Descriptor() ([]byte, []int) {\n\treturn file_symbol_margin_rate_proto_rawDescGZIP(), []int{0}\n}", "func (*MetricsServiceRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{18}\n}", "func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto {\n\tp := &descriptorpb.DescriptorProto{\n\t\tName: proto.String(string(message.Name())),\n\t\tOptions: proto.Clone(message.Options()).(*descriptorpb.MessageOptions),\n\t}\n\tfor i, fields := 0, message.Fields(); i < fields.Len(); i++ {\n\t\tp.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i)))\n\t}\n\tfor i, exts := 0, message.Extensions(); i < exts.Len(); i++ {\n\t\tp.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i)))\n\t}\n\tfor i, messages := 0, message.Messages(); i < messages.Len(); i++ {\n\t\tp.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i)))\n\t}\n\tfor i, enums := 0, message.Enums(); i < enums.Len(); i++ {\n\t\tp.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i)))\n\t}\n\tfor i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ {\n\t\txrange := xranges.Get(i)\n\t\tp.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{\n\t\t\tStart: proto.Int32(int32(xrange[0])),\n\t\t\tEnd: proto.Int32(int32(xrange[1])),\n\t\t\tOptions: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions),\n\t\t})\n\t}\n\tfor i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ {\n\t\tp.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i)))\n\t}\n\tfor i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ {\n\t\trrange := ranges.Get(i)\n\t\tp.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{\n\t\t\tStart: proto.Int32(int32(rrange[0])),\n\t\t\tEnd: proto.Int32(int32(rrange[1])),\n\t\t})\n\t}\n\tfor i, names := 0, message.ReservedNames(); i < names.Len(); i++ {\n\t\tp.ReservedName = append(p.ReservedName, string(names.Get(i)))\n\t}\n\treturn p\n}", "func (*ProductMetadata) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v2_services_reach_plan_service_proto_rawDescGZIP(), []int{5}\n}", "func (*NetRateTalker) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{2}\n}", "func (*Money) Descriptor() ([]byte, []int) {\n\treturn file_protos_person_proto_rawDescGZIP(), []int{2}\n}", "func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2, 1, 0}\n}", "func (*PriceCalendar) Descriptor() ([]byte, []int) {\n\treturn file_fare_proto_rawDescGZIP(), []int{11}\n}", "func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {\n\tswitch d := d.(type) {\n\tcase protoreflect.FileDescriptor:\n\t\treturn ProtoFromFileDescriptor(d)\n\tcase protoreflect.MessageDescriptor:\n\t\treturn ProtoFromMessageDescriptor(d)\n\tcase protoreflect.FieldDescriptor:\n\t\treturn ProtoFromFieldDescriptor(d)\n\tcase protoreflect.OneofDescriptor:\n\t\treturn ProtoFromOneofDescriptor(d)\n\tcase protoreflect.EnumDescriptor:\n\t\treturn ProtoFromEnumDescriptor(d)\n\tcase protoreflect.EnumValueDescriptor:\n\t\treturn ProtoFromEnumValueDescriptor(d)\n\tcase protoreflect.ServiceDescriptor:\n\t\treturn ProtoFromServiceDescriptor(d)\n\tcase protoreflect.MethodDescriptor:\n\t\treturn ProtoFromMethodDescriptor(d)\n\tdefault:\n\t\t// WTF??\n\t\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\t\treturn res.AsProto()\n\t\t}\n\t\treturn nil\n\t}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*PlanChange_Added) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_plan_change_proto_rawDescGZIP(), []int{0, 0}\n}", "func (*PricingRequest) Descriptor() ([]byte, []int) {\n\treturn file_fare_proto_rawDescGZIP(), []int{9}\n}", "func (*Decimal) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_stu3_datatypes_proto_rawDescGZIP(), []int{5}\n}", "func (*GetVersionRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{9}\n}", "func (*MoneyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_swap_swap_proto_rawDescGZIP(), []int{0}\n}", "func NewDescriptor(spotPriceAPI api.SpotPriceHistoryDescriber) *descriptor {\n\treturn &descriptor{\n\t\tbucket: make(spotPriceBucket),\n\t\tapi: spotPriceAPI,\n\t}\n}", "func (*GetMXCpriceRequest) Descriptor() ([]byte, []int) {\n\treturn file_wallet_proto_rawDescGZIP(), []int{27}\n}", "func (*Type) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1}\n}", "func (*Decimal) Descriptor() ([]byte, []int) {\n\treturn file_Harmony_proto_rawDescGZIP(), []int{5}\n}", "func (*ProductsListResponse_Discount) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_proto_productslist_products_list_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*GasPriceResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{49}\n}", "func (x *fastReflection_MsgCommunityPoolSpend) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_MsgCommunityPoolSpend\n}", "func (*AddPeerResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{30}\n}", "func (*Spend) Descriptor() ([]byte, []int) {\n\treturn file_recordwants_proto_rawDescGZIP(), []int{3}\n}", "func (x *fastReflection_Supply) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_Supply\n}", "func (*FeedbackMetrics) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{12}\n}", "func (*TelemetryParams) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{62}\n}", "func (*CalculateDealPriceRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_mindexd_pb_mindexd_proto_rawDescGZIP(), []int{13}\n}", "func (*TradeRequest_Ticker) Descriptor() ([]byte, []int) {\n\treturn file_grpcoin_proto_rawDescGZIP(), []int{8, 0}\n}", "func (*DistributionChange_Modified) Descriptor() ([]byte, []int) {\n\treturn edgelq_applications_proto_v1alpha2_distribution_change_proto_rawDescGZIP(), []int{0, 1}\n}", "func (*DistributionChange_Removed) Descriptor() ([]byte, []int) {\n\treturn edgelq_applications_proto_v1alpha2_distribution_change_proto_rawDescGZIP(), []int{0, 3}\n}", "func (*PlanChange_Current) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_plan_change_proto_rawDescGZIP(), []int{0, 2}\n}", "func (*OrderDetail_ShopMoney) Descriptor() ([]byte, []int) {\n\treturn file_order_proto_rawDescGZIP(), []int{1, 19}\n}", "func (*TradeData) Descriptor() ([]byte, []int) {\n\treturn file_stock_service_proto_rawDescGZIP(), []int{2}\n}", "func (*ProvisioningPolicyChange_Modified) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha_provisioning_policy_change_proto_rawDescGZIP(), []int{0, 1}\n}", "func (PriceLevel) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_maps_places_v1_place_proto_rawDescGZIP(), []int{0}\n}", "func (*CancelPlanResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{23}\n}", "func (*ReachCurve) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v2_services_reach_plan_service_proto_rawDescGZIP(), []int{16}\n}", "func (*Chart) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{20}\n}", "func (*Police) Descriptor() ([]byte, []int) {\n\treturn file_proto_product_product_proto_rawDescGZIP(), []int{2}\n}", "func (Order_PriceMode) EnumDescriptor() ([]byte, []int) {\n\treturn file_waves_order_proto_rawDescGZIP(), []int{1, 1}\n}", "func (*ComplexType) Descriptor() ([]byte, []int) {\n\treturn file_testing_proto_rawDescGZIP(), []int{4}\n}", "func (*GenerateProductMixIdeasRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v2_services_reach_plan_service_proto_rawDescGZIP(), []int{7}\n}", "func (*ProvisioningPolicyChange_Removed) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha_provisioning_policy_change_proto_rawDescGZIP(), []int{0, 3}\n}", "func (*CalculateDealPriceResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_mindexd_pb_mindexd_proto_rawDescGZIP(), []int{14}\n}", "func (*Decl) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2}\n}", "func (*PortfolioPosition_Ticker) Descriptor() ([]byte, []int) {\n\treturn file_grpcoin_proto_rawDescGZIP(), []int{7, 0}\n}", "func (*CSOEconItemDropRateBonus) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_common_proto_rawDescGZIP(), []int{78}\n}", "func (x *fastReflection_MsgUpdateParams) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_MsgUpdateParams\n}", "func (*Trade) Descriptor() ([]byte, []int) {\n\treturn file_openfeed_proto_rawDescGZIP(), []int{25}\n}", "func (*GetMXCpriceResponse) Descriptor() ([]byte, []int) {\n\treturn file_wallet_proto_rawDescGZIP(), []int{28}\n}", "func (*Forecast) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v2_services_reach_plan_service_proto_rawDescGZIP(), []int{18}\n}", "func (*OrderDetail_PriceSet) Descriptor() ([]byte, []int) {\n\treturn file_order_proto_rawDescGZIP(), []int{1, 2}\n}", "func (*UpgradeRuntimeRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_notebooks_v1_managed_service_proto_rawDescGZIP(), []int{9}\n}", "func (*TraceProto) Descriptor() ([]byte, []int) {\n\treturn file_internal_tracing_extended_extended_trace_proto_rawDescGZIP(), []int{0}\n}", "func (*ChartInfo) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{24}\n}", "func (*Listen) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{4}\n}", "func (*PriceCalendarRequest) Descriptor() ([]byte, []int) {\n\treturn file_fare_proto_rawDescGZIP(), []int{12}\n}", "func (*DistributionChange_Current) Descriptor() ([]byte, []int) {\n\treturn edgelq_applications_proto_v1alpha2_distribution_change_proto_rawDescGZIP(), []int{0, 2}\n}", "func (*TradeMessage) Descriptor() ([]byte, []int) {\n\treturn file_trade_proto_rawDescGZIP(), []int{0}\n}", "func (*Preferences) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v2_services_reach_plan_service_proto_rawDescGZIP(), []int{8}\n}", "func (*Amount) Descriptor() ([]byte, []int) {\n\treturn file_spacemesh_v1_types_proto_rawDescGZIP(), []int{2}\n}", "func (*VectorClock) Descriptor() ([]byte, []int) {\n\treturn file_pkg_proto_l3_proto_rawDescGZIP(), []int{3}\n}", "func (*Bonus) Descriptor() ([]byte, []int) {\n\treturn file_services_core_protobuf_servers_proto_rawDescGZIP(), []int{27}\n}", "func (*MyProto) Descriptor() ([]byte, []int) {\n\treturn file_my_proto_proto_rawDescGZIP(), []int{0}\n}", "func (*OnTargetAudienceMetrics) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v2_services_reach_plan_service_proto_rawDescGZIP(), []int{19}\n}", "func (*RecordSpend) Descriptor() ([]byte, []int) {\n\treturn file_recordwants_proto_rawDescGZIP(), []int{1}\n}", "func (*MaximizeConversions) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_common_bidding_proto_rawDescGZIP(), []int{6}\n}", "func (*OrderDetail_PresentmentMoney) Descriptor() ([]byte, []int) {\n\treturn file_order_proto_rawDescGZIP(), []int{1, 20}\n}", "func (*GetPeerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{6}\n}", "func (*DistributionChange) Descriptor() ([]byte, []int) {\n\treturn edgelq_applications_proto_v1alpha2_distribution_change_proto_rawDescGZIP(), []int{0}\n}", "func (*Plan) Descriptor() ([]byte, []int) {\n\treturn file_deliveryapi_pb_service_proto_rawDescGZIP(), []int{2}\n}", "func (*PrivateApiCF) Descriptor() ([]byte, []int) {\n\treturn file_pkg_kascfg_kascfg_proto_rawDescGZIP(), []int{24}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}" ]
[ "0.7140815", "0.69504553", "0.6876126", "0.6736322", "0.6634963", "0.6624022", "0.65697724", "0.6557077", "0.6546061", "0.6503353", "0.64754117", "0.64043206", "0.6386197", "0.63846004", "0.63826203", "0.63817465", "0.63527876", "0.63320667", "0.6331542", "0.63248783", "0.6304258", "0.6304156", "0.6303828", "0.62835616", "0.62816226", "0.6269427", "0.62608427", "0.6250564", "0.6244573", "0.62343735", "0.6232282", "0.6228927", "0.62244475", "0.6224406", "0.62176275", "0.62143713", "0.6212387", "0.6208411", "0.6195525", "0.61775476", "0.61758053", "0.6172106", "0.6168861", "0.6167394", "0.6163431", "0.6151116", "0.6149939", "0.6147416", "0.6146915", "0.614618", "0.61378527", "0.6134164", "0.6132807", "0.6130586", "0.61224866", "0.61141515", "0.6112712", "0.610901", "0.61066496", "0.6104543", "0.60988295", "0.60960394", "0.6087704", "0.6080907", "0.60806036", "0.6074359", "0.6071701", "0.60692173", "0.60689217", "0.6057819", "0.60505855", "0.6049551", "0.6047693", "0.6046525", "0.6041951", "0.60374904", "0.60336655", "0.6032579", "0.60289", "0.6027157", "0.6026564", "0.6025027", "0.60131705", "0.60121727", "0.6003951", "0.6003855", "0.60036397", "0.6002519", "0.6002074", "0.5999375", "0.59986246", "0.59945637", "0.5990965", "0.5988475", "0.5985194", "0.5984823", "0.5984606", "0.5983416", "0.5983321", "0.59830385" ]
0.723794
0
Deprecated: Use SearchParams.ProtoReflect.Descriptor instead.
func (*SearchParams) Descriptor() ([]byte, []int) { return file_price_price_proto_rawDescGZIP(), []int{1} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*GetSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{112}\n}", "func (*ConversationSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_box_search_proto_rawDescGZIP(), []int{14}\n}", "func (*ConversationRelatedSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_box_search_proto_rawDescGZIP(), []int{12}\n}", "func (*DeleteSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{116}\n}", "func (*SearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{20}\n}", "func (*SearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_agency_web_proto_rawDescGZIP(), []int{12}\n}", "func (*MessageSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_box_search_proto_rawDescGZIP(), []int{16}\n}", "func (*SearchGoogleAdsFieldsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v13_services_google_ads_field_service_proto_rawDescGZIP(), []int{1}\n}", "func (*GetServiceRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{6}\n}", "func (x *fastReflection_Params) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_Params\n}", "func (*SearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_golang_pkg_proto_movies_movies_proto_rawDescGZIP(), []int{3}\n}", "func (*SearchReq) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{26}\n}", "func (*SearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_pokemon_proto_rawDescGZIP(), []int{0}\n}", "func (*PostAnnotationsSearchesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{117}\n}", "func (*GetAnnotationSearchMetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{123}\n}", "func (x *fastReflection_MsgUpdateParams) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_MsgUpdateParams\n}", "func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {\n\tswitch d := d.(type) {\n\tcase protoreflect.FileDescriptor:\n\t\treturn ProtoFromFileDescriptor(d)\n\tcase protoreflect.MessageDescriptor:\n\t\treturn ProtoFromMessageDescriptor(d)\n\tcase protoreflect.FieldDescriptor:\n\t\treturn ProtoFromFieldDescriptor(d)\n\tcase protoreflect.OneofDescriptor:\n\t\treturn ProtoFromOneofDescriptor(d)\n\tcase protoreflect.EnumDescriptor:\n\t\treturn ProtoFromEnumDescriptor(d)\n\tcase protoreflect.EnumValueDescriptor:\n\t\treturn ProtoFromEnumValueDescriptor(d)\n\tcase protoreflect.ServiceDescriptor:\n\t\treturn ProtoFromServiceDescriptor(d)\n\tcase protoreflect.MethodDescriptor:\n\t\treturn ProtoFromMethodDescriptor(d)\n\tdefault:\n\t\t// WTF??\n\t\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\t\treturn res.AsProto()\n\t\t}\n\t\treturn nil\n\t}\n}", "func (*ListSearchesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{113}\n}", "func (x *fastReflection_QueryParamsRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_QueryParamsRequest\n}", "func (*Search) Descriptor() ([]byte, []int) {\n\treturn file_graphsrv_proto_rawDescGZIP(), []int{3}\n}", "func (*ListAnnotationSearchMetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{124}\n}", "func (*SearchInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_elastic_proto_rawDescGZIP(), []int{7}\n}", "func (*PostModelsSearchesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{83}\n}", "func (*PostAnnotationSearchMetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{122}\n}", "func (*SearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_movie_movie_proto_rawDescGZIP(), []int{0}\n}", "func (*ProjectMemberSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{93}\n}", "func (*SearchTrainRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_apis_tra_v1alpha1_tra_proto_rawDescGZIP(), []int{3}\n}", "func (*SearchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_genomics_v1_annotations_proto_rawDescGZIP(), []int{17}\n}", "func (*FindPersonRequest) Descriptor() ([]byte, []int) {\n\treturn file_protos_face_recognition_service_proto_rawDescGZIP(), []int{2}\n}", "func (*FindPeopleRequest) Descriptor() ([]byte, []int) {\n\treturn file_people_proto_rawDescGZIP(), []int{2}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*ProjectMemberSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{96}\n}", "func (*RecogniseRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_videoservice_proto_rawDescGZIP(), []int{2}\n}", "func (*GlobalSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_box_search_proto_rawDescGZIP(), []int{10}\n}", "func (*PostSearchesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{114}\n}", "func (*ProjectSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{76}\n}", "func (*ExternalIDPSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{159}\n}", "func (*ProjectSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{73}\n}", "func (*DeleteAnnotationSearchMetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{118}\n}", "func (*GrantedProjectSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{111}\n}", "func (*ListServicesRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{4}\n}", "func (*GrantedProjectSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{114}\n}", "func (*NearbyRequest) Descriptor() ([]byte, []int) {\n\treturn file_internal_services_search_proto_search_proto_rawDescGZIP(), []int{0}\n}", "func ProtoFromMethodDescriptor(d protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto {\n\ttype canProto interface {\n\t\tMethodDescriptorProto() *descriptorpb.MethodDescriptorProto\n\t}\n\tif res, ok := d.(canProto); ok {\n\t\treturn res.MethodDescriptorProto()\n\t}\n\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\tif md, ok := res.AsProto().(*descriptorpb.MethodDescriptorProto); ok {\n\t\t\treturn md\n\t\t}\n\t}\n\treturn protodesc.ToMethodDescriptorProto(d)\n}", "func (*SearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_app_proto_rawDescGZIP(), []int{0}\n}", "func (*Verdict) Descriptor() ([]byte, []int) {\n\treturn file_google_maps_addressvalidation_v1_address_validation_service_proto_rawDescGZIP(), []int{5}\n}", "func (*SearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_Prod_proto_rawDescGZIP(), []int{0}\n}", "func (*RefreshRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{16}\n}", "func (*ConversationCalendarSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_box_search_proto_rawDescGZIP(), []int{18}\n}", "func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto {\n\tp := &descriptorpb.DescriptorProto{\n\t\tName: proto.String(string(message.Name())),\n\t\tOptions: proto.Clone(message.Options()).(*descriptorpb.MessageOptions),\n\t}\n\tfor i, fields := 0, message.Fields(); i < fields.Len(); i++ {\n\t\tp.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i)))\n\t}\n\tfor i, exts := 0, message.Extensions(); i < exts.Len(); i++ {\n\t\tp.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i)))\n\t}\n\tfor i, messages := 0, message.Messages(); i < messages.Len(); i++ {\n\t\tp.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i)))\n\t}\n\tfor i, enums := 0, message.Enums(); i < enums.Len(); i++ {\n\t\tp.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i)))\n\t}\n\tfor i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ {\n\t\txrange := xranges.Get(i)\n\t\tp.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{\n\t\t\tStart: proto.Int32(int32(xrange[0])),\n\t\t\tEnd: proto.Int32(int32(xrange[1])),\n\t\t\tOptions: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions),\n\t\t})\n\t}\n\tfor i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ {\n\t\tp.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i)))\n\t}\n\tfor i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ {\n\t\trrange := ranges.Get(i)\n\t\tp.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{\n\t\t\tStart: proto.Int32(int32(rrange[0])),\n\t\t\tEnd: proto.Int32(int32(rrange[1])),\n\t\t})\n\t}\n\tfor i, names := 0, message.ReservedNames(); i < names.Len(); i++ {\n\t\tp.ReservedName = append(p.ReservedName, string(names.Get(i)))\n\t}\n\treturn p\n}", "func (*IdpProviderSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{158}\n}", "func (*ContractQueryRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{22}\n}", "func (*GetPeerInfoRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{6}\n}", "func (*FetchMatchesRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_backend_proto_rawDescGZIP(), []int{1}\n}", "func (*LookupRequest) Descriptor() ([]byte, []int) {\n\treturn file_authzed_api_v0_acl_service_proto_rawDescGZIP(), []int{10}\n}", "func (*PatchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{4}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*GenerateMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{0}\n}", "func (*PostConceptsSearchesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{32}\n}", "func (*Search) Descriptor() ([]byte, []int) {\n\treturn file_src_search_proto_rawDescGZIP(), []int{1}\n}", "func (*ValidatorUpdate) Descriptor() ([]byte, []int) {\n\treturn file_tm_replay_proto_rawDescGZIP(), []int{9}\n}", "func (x *fastReflection_MsgUpdateParamsResponse) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_MsgUpdateParamsResponse\n}", "func (*OrgDomainSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{61}\n}", "func (*OrgDomainSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{58}\n}", "func (*ModifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{10}\n}", "func (*SearchServicesRequests) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_connection_resource_v1_proto_rawDescGZIP(), []int{10}\n}", "func (*DescribeRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{4}\n}", "func (*Search) Descriptor() ([]byte, []int) {\n\treturn file_searchMovie_proto_rawDescGZIP(), []int{2}\n}", "func (*Example) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{4}\n}", "func (*ListMatchesRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{62}\n}", "func (*SearchInstancesRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_rawDescGZIP(), []int{11}\n}", "func (*OrgMemberSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{67}\n}", "func (*IdpSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{148}\n}", "func (x *fastReflection_ServiceCommandDescriptor) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_ServiceCommandDescriptor\n}", "func (*LanguageDetectorRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_language_proto_rawDescGZIP(), []int{1}\n}", "func (x *fastReflection_AddressStringToBytesRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressStringToBytesRequest\n}", "func (x *fastReflection_FlagOptions) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_FlagOptions\n}", "func (*OrgMemberSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{70}\n}", "func (*Request) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_newfindmaxpb_newfindmaxpb_proto_rawDescGZIP(), []int{0}\n}", "func (*ConversationSearchResponse) Descriptor() ([]byte, []int) {\n\treturn file_box_search_proto_rawDescGZIP(), []int{15}\n}", "func (*SearchLinksRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_datacatalog_lineage_v1_lineage_proto_rawDescGZIP(), []int{23}\n}", "func (*MessageSearchResponse) Descriptor() ([]byte, []int) {\n\treturn file_box_search_proto_rawDescGZIP(), []int{17}\n}", "func (*InstanceSearchParams) Descriptor() ([]byte, []int) {\n\treturn file_application_proto_rawDescGZIP(), []int{21}\n}", "func (*UpdateDomainMappingRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{40}\n}", "func (*Finding) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_websecurityscanner_v1beta_finding_proto_rawDescGZIP(), []int{0}\n}", "func (*CMsgSearchResults) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{65}\n}", "func (*SearchResponse) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{21}\n}", "func (*ProvideValidationFeedbackRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_maps_addressvalidation_v1_address_validation_service_proto_rawDescGZIP(), []int{2}\n}", "func (x *fastReflection_QueryAccountInfoRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_QueryAccountInfoRequest\n}", "func (*ListRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{0}\n}", "func (*Message) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{2}\n}", "func (*DeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{10}\n}", "func (*SearchRes) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{25}\n}", "func (*ListCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{164}\n}", "func (*CMsgClientToGCPlayerStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{143}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_teams_v1_teams_proto_rawDescGZIP(), []int{5}\n}", "func (*ProjectRoleSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{89}\n}", "func (*DeleteCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{162}\n}", "func (*FindWebhookCallRequest) Descriptor() ([]byte, []int) {\n\treturn file_events_Event_proto_rawDescGZIP(), []int{9}\n}", "func (x *fastReflection_AddressBytesToStringRequest) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_AddressBytesToStringRequest\n}" ]
[ "0.6757402", "0.66963845", "0.667019", "0.6669961", "0.6665845", "0.66342306", "0.66099745", "0.6583955", "0.658169", "0.657634", "0.6543787", "0.6539887", "0.6536683", "0.6519112", "0.6497361", "0.6452814", "0.6445318", "0.64435625", "0.64422023", "0.6427908", "0.64192164", "0.6415538", "0.64122313", "0.6405412", "0.6396914", "0.6387444", "0.63870454", "0.6385379", "0.63844764", "0.6382685", "0.6375887", "0.63753706", "0.6367967", "0.63650006", "0.6357498", "0.6355661", "0.63530874", "0.6348461", "0.6344864", "0.6343079", "0.63374263", "0.6329927", "0.6322922", "0.6319801", "0.6315986", "0.63026917", "0.6300737", "0.628937", "0.62856257", "0.6284585", "0.6266558", "0.6261589", "0.62479496", "0.6247072", "0.62451553", "0.62412524", "0.62401706", "0.6238949", "0.62303305", "0.62302166", "0.6224745", "0.62240416", "0.62213767", "0.62208927", "0.621732", "0.62147874", "0.6214765", "0.6211932", "0.6209727", "0.62095106", "0.6193744", "0.61922693", "0.619027", "0.61869913", "0.6180664", "0.6180609", "0.6177217", "0.617715", "0.6175722", "0.617568", "0.6172871", "0.61702096", "0.6165702", "0.6165163", "0.6164568", "0.6160689", "0.6159444", "0.6156503", "0.61548704", "0.615467", "0.6149142", "0.61463463", "0.61418575", "0.6141669", "0.61409736", "0.61389154", "0.6133618", "0.61311394", "0.61290926", "0.61274034" ]
0.61617607
85
NewInt returns a new integer value.
func NewInt(i int64) T { return big.NewInt(i) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewInt(x int64) *Int {}", "func NewInt(i int) Int { return &i }", "func NewInt(input int) *int {\n\treturn &input\n}", "func NewInt(value int) *Int {\n\treturn &Int{\n\t\tvalue: value,\n\t}\n}", "func NewInt() *Int {\n\treturn &Int{}\n}", "func NewInt(x int64) *big.Int", "func NewInt(i int64) *Value {\n\treturn &Value{i, Int}\n}", "func NewInt(v int32) Int {\n\treturn Int{v, true}\n}", "func NewInt(x int) *Numeric {\n\tvar r Numeric\n\treturn r.SetInt(x)\n}", "func NewInt(n int) (Int, error) {\n\tvar pn Int\n\tpn.PyObject = C.PyInt_FromLong(C.long(n))\n\tif pn.PyObject == nil {\n\t\treturn pn, errors.Wrap(GetError(), \"error converting to Python int\")\n\t}\n\treturn pn, nil\n}", "func NewInt(t token.Token, value int) *IntLiteral {\n\treturn &IntLiteral{\n\t\tToken: t,\n\t\tTypeOf: NewIntType(),\n\t\tValue: value,\n\t}\n}", "func NewInt(x int64) *big.Int {\n\treturn big.NewInt(x)\n}", "func NewIntValue(ref *int) *IntValue {\n\treturn (*IntValue)(ref)\n}", "func Int(value int) *int {\n\treturn New(value).(*int)\n}", "func Int(v int) *int {\n\tp := new(int)\n\t*p = v\n\treturn p\n}", "func NewFromInt(i int) *VInteger {\n\tr := new(VInteger)\n\tr.value = i\n\treturn r\n}", "func (c IntCodec) New() unsafe.Pointer {\n\treturn unsafe.Pointer(new(int))\n}", "func NewInt(start int, incr uint, end int, width uint) Sequence {\n\tstep := int(incr)\n\tif end < start {\n\t\tstep = -step\n\t}\n\n\tseq := &intSequence{data: make(chan string), step: step, end: end, width: width, widthExceededMutex: sync.RWMutex{}}\n\n\tgo seq.push(start)\n\n\treturn seq\n}", "func newInteger(value int) cell {\n\treturn cell(value)<<16 | cell(IntegerType)\n}", "func NewInt(i int64, valid bool) NullInt {\n\treturn NullInt{\n\t\tNullInt64: sql.NullInt64{\n\t\t\tInt64: i,\n\t\t\tValid: valid,\n\t\t},\n\t}\n}", "func NewIntFromString(s string) (res Int, ok bool) {\n\ti, ok := newIntegerFromString(s)\n\tif !ok {\n\t\treturn\n\t}\n\t// Check overflow\n\tif i.BitLen() > 255 {\n\t\tok = false\n\t\treturn\n\t}\n\treturn Int{i}, true\n}", "func NewNumberInt(i int64) *Number {\n\treturn NewNumberBig(big.NewInt(i))\n}", "func New(s string) *VInteger {\n\tr := new(VInteger)\n\tr.value, _ = strconv.Atoi(s)\n\treturn r\n}", "func NewInt() *Tree {\n\ttree := &Tree{\n\t\tdatatype: containers.IntContainer(0),\n\t}\n\t// The below handling is required to achieve method overriding.\n\t// Refer: https://stackoverflow.com/questions/38123911/golang-method-override\n\ttree.TreeOperations = interface{}(tree).(binarytrees.TreeOperations)\n\treturn tree\n}", "func NewInt(usePointers bool) FormalParam {\n\n\t// Get the type beforehand to derive a random value for it\n\tname := \"fpInt\" + strings.Title(utils.RandomName())\n\tintegralType := \"int\"\n\tisSigned := utils.RandomBool()\n\tvalue := GetIntegralValue(integralType, isSigned, name)\n\tif usePointers {\n\t\tusePointers = utils.RandomBool()\n\t}\n\n\treturn IntegralFormalParam{Name: name,\n\t\tType: integralType,\n\t\tIsSigned: isSigned,\n\t\tValue: value,\n\t\tIsPointer: usePointers}\n}", "func Int(n int) *int {\n\treturn &n\n}", "func Int(name string, value int) *int {\n\tp := new(int)\n\tIntVar(p, name, value)\n\treturn p\n}", "func NewIntNode(t *lexer.Token) (*IntNode, error) {\n\ti := &IntNode{token: t}\n\tnum, err := strconv.ParseInt(purgeNumericStrings(t.Value), 0, 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ti.value = num\n\n\treturn i, nil\n}", "func Int(name string, value int, usage string) *int {\n\tp := new(int);\n\tIntVar(p, name, value, usage);\n\treturn p;\n}", "func NewIntegerValue(value int) IntegerValue {\n\treturn IntegerValue(value)\n}", "func NewIntegerValue(value int) IntegerValue {\n\treturn IntegerValue(value)\n}", "func (encoder *encoder) DecodeIntNew(p interface{}) (coeffs []int64) {\n\tcoeffs = make([]int64, encoder.ringQ.N)\n\tencoder.DecodeInt(p, coeffs)\n\treturn\n}", "func Int(v int) *int {\n\treturn &v\n}", "func Int(v int) *int {\n\treturn &v\n}", "func Int(v int) *int {\n\treturn &v\n}", "func Int(v int) *int {\n\treturn &v\n}", "func Int(v int) *int {\n\treturn &v\n}", "func Int(v int) *int {\n\treturn &v\n}", "func Int(v int) *int {\n\treturn &v\n}", "func Int(v int) *int {\n\treturn &v\n}", "func NewInt32(x int32) *Numeric {\n\tvar r Numeric\n\treturn r.SetInt32(x)\n}", "func (c Int32Codec) New() unsafe.Pointer {\n\treturn unsafe.Pointer(new(int32))\n}", "func Int(i int) *int {\n\treturn &i\n}", "func Int(i int) *int {\n\treturn &i\n}", "func Int(i int) *int {\n\treturn &i\n}", "func NewInt32() *Int32 {\n\treturn &Int32{}\n}", "func NewInt32(store *store.Store, cfgPath string) *Int32 {\n\tf := &Int32{}\n\tf.init(store, f.mapCfgValue, cfgPath)\n\treturn f\n}", "func NewInt32(input int32) *int32 {\n\treturn &input\n}", "func Int() int {\n\treturn 111\n}", "func NewIntFromString(s string) *big.Int {\n\tv, ok := new(big.Int).SetString(s, 10) //nolint:gomnd\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Bad base 10 string %s\", s))\n\t}\n\treturn v\n}", "func NewInt8(x int8) *Numeric {\n\tvar r Numeric\n\treturn r.SetInt8(x)\n}", "func NewInt32(val int32) *int32 {\n\tp := new(int32)\n\t*p = val\n\treturn p\n}", "func NewIntegerType() *IntegerType {\n\tout := &IntegerType{\n\t\texamples: raml.NewIntegerExampleMap(0),\n\t}\n\n\tout.examples.SerializeOrdered(false)\n\n\tout.ExtendedDataType = NewExtendedDataType(rmeta.TypeInteger, out)\n\n\treturn out\n}", "func NewInt64(i int64) Int64 {\n\treturn Int64(i)\n}", "func Int(v int) *int { return &v }", "func Int(v int) *int { return &v }", "func Int(v int) *int { return &v }", "func Int(v int) *int { return &v }", "func (c *Constructor[_]) Int(name string, value int, help string) *int {\n\tp := new(int)\n\tc.IntVar(p, name, value, help)\n\treturn p\n}", "func NewFromInt(value int64) Decimal {\n\treturn Decimal{\n\t\tvalue: big.NewInt(value),\n\t\texp: 0,\n\t}\n}", "func (c Int64Codec) New() unsafe.Pointer {\n\treturn unsafe.Pointer(new(int64))\n}", "func NewPointInt(x, y int) Point {\n\treturn Point{int8(x), int8(y)}\n}", "func I() *big.Int { return new(big.Int) }", "func NewInt8(value int8) *Int8 {\n\treturn &Int8{\n\t\tvalue: value,\n\t}\n}", "func NewInt32(value int32) *Int32 {\n\treturn &Int32{\n\t\tvalue: value,\n\t}\n}", "func NewInt8(i int8, valid bool) Int8 {\n\treturn Int8{\n\t\tNullInt8: NullInt8{\n\t\t\tInt8: i,\n\t\t\tValid: valid,\n\t\t},\n\t}\n}", "func Make_Tv_Int(val int) *vector_tile.Tile_Value {\n\ttv := new(vector_tile.Tile_Value)\n\tt := int64(val)\n\ttv.SintValue = &t\n\treturn tv\n}", "func Make_Tv_Int(val int) *vector_tile.Tile_Value {\n\ttv := new(vector_tile.Tile_Value)\n\tt := int64(val)\n\ttv.SintValue = &t\n\treturn tv\n}", "func NewInt64(i int64) Int64 {\n\treturn Int64{Int64: i, Valid: true}\n}", "func NewIntList() *IntList {\n // -111 for no good reason. FIXME(david)?\n f := newIntListNode(nil, nil, -111)\n l := newIntListNode(f, nil, -111)\n f.next = l\n return &IntList{ first: f,\n last: l,\n }\n}", "func NewInt64() *Int64 {\n\treturn &Int64{}\n}", "func NewNumberID(v int64) ID { return ID{number: v} }", "func (t Target) Int() *big.Int {\n\treturn new(big.Int).SetBytes(t[:])\n}", "func Int(i int) *int { return &i }", "func NewIntegerLiteral(s string) Expression {\n\tval, err := strconv.ParseInt(s, 10, 0)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &integerLiteral{value: &integer{value: val}}\n}", "func NewIntSetting(name string, description string, fallback int) *IntSetting {\n\treturn &IntSetting{\n\t\tBaseSetting: &BaseSetting{\n\t\t\tNameValue: name,\n\t\t\tDescriptionValue: description,\n\t\t},\n\t\tIntValue: &fallback,\n\t}\n}", "func NewFromInt32(x int32) Type {\n\treturn (Type(x) << int32Size)\n}", "func newProtobufFieldInt() *protobufFieldInt {\n\treturn &protobufFieldInt{\n\t\tgenericProtobufField: newGenericProtobufField(numberField),\n\t}\n}", "func IntValue(n int64) Value {\n\treturn Value{iface: n}\n}", "func IntValue(i int) Value {\n\ti2 := Int(i)\n\treturn Value{IntValue: &i2}\n}", "func I() *big.Int { return new(big.Int) }", "func NewIntCode(prog []int, in io.Reader, out io.Writer) *IntCode {\n\t// please don't mutate my input\n\tb := make([]int, len(prog))\n\tcopy(b, prog)\n\t// ok thanks\n\tres := IntCode{\n\t\tprog: b,\n\t\tpos: 0,\n\t\tin: os.Stdin,\n\t\tout: os.Stdout,\n\t}\n\treturn &res\n}", "func newIntListNode(prevNode, nextNode *IntListNode, val int) *IntListNode {\n return &IntListNode{ next: nextNode,\n prev: prevNode,\n value: val,\n }\n}", "func NewFromIntSlice(items []int) *SliceOfInt {\n\tslicy := &SliceOfInt{items}\n\treturn slicy\n}", "func NewIntHandler() *TypeHandler {\n\tTypeHandler := TypeHandler{\n\t\tKind: reflect.Int,\n\t\tType: \"int\",\n\t}\n\n\tTypeHandler.GetDefaultFaker = func() reflect.Value {\n\t\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\t\treturn reflect.ValueOf(r.Int())\n\t}\n\n\treturn &TypeHandler\n}", "func NewInt64(value int64) *Int64 {\n\treturn &Int64{\n\t\tvalue: value,\n\t}\n}", "func Int(i *int) Parser {\n\treturn &intParser{destination: i}\n}", "func NewNthInt(stream IntStream) *NthInt {\n\treturn &NthInt{stream: stream}\n}", "func NewInt64(inp int64) B64 {\n\tvar resp = []B64{\n\t\tnewInt64(inp),\n\t}\n\treturn resp[0]\n}", "func NewIntArray(ints []int32) (*Obj) {\n return &Obj{nil, ints, nil}\n}", "func (c Int8Codec) New() unsafe.Pointer {\n\treturn unsafe.Pointer(new(int8))\n}", "func NewAttributeValueInt(v int64) AttributeValue {\n\torig := &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_IntValue{IntValue: v}}\n\treturn AttributeValue{orig: orig}\n}", "func Int() int {\n\treturn int(Int64n(int64(1000)))\n}", "func NewBigInt(v string, base int) *big.Int {\n b := big.NewInt(0)\n b.SetString(v, base)\n return b\n}", "func Int(flag string, value int, description string) *int {\n\tvar v int\n\tIntVar(&v, flag, value, description)\n\treturn &v\n}", "func NewIntegerValue(val interface{}) (*QueryAst, error) {\n\tintVal, err := strconv.ParseInt(string(val.(*token.Token).Lit), 10, 64)\n\tast := new(QueryAst)\n\tast.value = intVal\n\treturn ast, err\n}", "func NewInt64() *Int64 {\n\treturn &Int64{\n\t\tDegree: 0,\n\t\tc: map[uint64]int64{0: 0},\n\t}\n}", "func newLeafListInt(values []*big.Int, width Width) *TypedLeafListInt {\n\tbytes := make([]byte, 0)\n\ttypeOpts := make([]int32, 0)\n\ttypeOpts = append(typeOpts, int32(width))\n\tfor _, v := range values {\n\t\tvar isNegative int32 = isPositiveTypeOpt\n\t\tif v.Sign() < 0 {\n\t\t\tisNegative = isNegativeTypeOpt\n\t\t}\n\t\ttypeOpts = append(typeOpts, int32(len(v.Bytes())))\n\t\ttypeOpts = append(typeOpts, isNegative)\n\t\tbytes = append(bytes, v.Bytes()...)\n\t}\n\ttypedLeafListInt := TypedLeafListInt{\n\t\tBytes: bytes,\n\t\tType: ValueType_LEAFLIST_INT,\n\t\tTypeOpts: typeOpts,\n\t}\n\treturn &typedLeafListInt\n}", "func NewIntFile(i *int) *File {\n\treturn NewFile(&intElement{Data: i})\n}", "func NewInt64(val int64) *int64 {\n\tp := new(int64)\n\t*p = val\n\treturn p\n}" ]
[ "0.85864913", "0.8524492", "0.8404486", "0.8297176", "0.8228805", "0.81092215", "0.80563784", "0.80309343", "0.80285656", "0.7866015", "0.7390446", "0.737557", "0.73207617", "0.72236663", "0.71351486", "0.7033165", "0.6993391", "0.6844066", "0.67741734", "0.6760597", "0.66253567", "0.65911955", "0.655377", "0.65039396", "0.6474109", "0.6412423", "0.6351758", "0.63368213", "0.63200915", "0.63052183", "0.63052183", "0.6264277", "0.62400275", "0.62400275", "0.62400275", "0.62400275", "0.62400275", "0.62400275", "0.62400275", "0.62400275", "0.62281936", "0.62204945", "0.61785865", "0.61785865", "0.61785865", "0.61561334", "0.61431885", "0.61410695", "0.6132066", "0.61161757", "0.5991942", "0.5976709", "0.5967298", "0.5963341", "0.59621793", "0.59621793", "0.59621793", "0.59621793", "0.5926105", "0.59224826", "0.5899448", "0.58696544", "0.583774", "0.5829424", "0.5827778", "0.5811976", "0.58119625", "0.58119625", "0.5810961", "0.5803579", "0.58032095", "0.57878774", "0.5765699", "0.57627636", "0.57571113", "0.5741985", "0.5730342", "0.5718259", "0.57057714", "0.56948245", "0.5688831", "0.5669783", "0.56670916", "0.56659573", "0.5624446", "0.56209", "0.5614971", "0.5609607", "0.5604859", "0.5575495", "0.5574733", "0.5573049", "0.556705", "0.5558011", "0.55529135", "0.5530322", "0.5520706", "0.55165225", "0.55090094", "0.54856193" ]
0.7451826
10
NewFloat returns a new floating point value.
func NewFloat(f float64) T { return big.NewFloat(f) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewFloat(x float64) *Float {}", "func NewFloat(x float64) *Float { return new(Float).SetFloat64(x) }", "func NewFloat(v float64) Float {\n\treturn Float{v, true}\n}", "func NewFloat(f float64) *Value {\n\treturn &Value{f, Float}\n}", "func NewFloat(x float64) *big.Float", "func (tp Float) NewFloat(data float32) Float {\n\treturn Float(data)\n}", "func NewFloat(i float64) Float { return &i }", "func NewFloat(f float64, valid bool) Float {\n\treturn Float{\n\t\tNullFloat64: sql.NullFloat64{\n\t\t\tFloat64: f,\n\t\t\tValid: valid,\n\t\t},\n\t}\n}", "func newFloat(prec int64) (*types.T, error) {\n\tif prec < 1 {\n\t\treturn nil, errFloatPrecAtLeast1\n\t}\n\tif prec <= 24 {\n\t\treturn types.Float4, nil\n\t}\n\tif prec <= 54 {\n\t\treturn types.Float, nil\n\t}\n\treturn nil, errFloatPrecMax54\n}", "func NewFloat(t token.Token, value float64) *FloatLiteral {\n\treturn &FloatLiteral{\n\t\tToken: t,\n\t\tTypeOf: NewFloatType(),\n\t\tValue: value,\n\t}\n}", "func NewFloat(typ *types.FloatType, x float64) *ConstFloat {\n\tif math.IsNaN(x) {\n\t\t// TODO: store sign of NaN?\n\t\treturn &ConstFloat{Typ: typ, NaN: true}\n\t}\n\treturn &ConstFloat{Typ: typ, X: big.NewFloat(x)}\n}", "func newFloat(value *big.Float) *TypedFloat {\n\tbytes, _ := value.GobEncode()\n\ttypedFloat := TypedFloat{\n\t\tBytes: bytes,\n\t\tType: ValueType_FLOAT,\n\t}\n\treturn &typedFloat\n}", "func NewFloatValue(value float64) FloatValue {\n\treturn FloatValue(value)\n}", "func NewFloatValue(value float64) FloatValue {\n\treturn FloatValue(value)\n}", "func Float(value float64) *float64 {\n\treturn New(value).(*float64)\n}", "func Float(name string, value float, usage string) *float {\n\tp := new(float);\n\tFloatVar(p, name, value, usage);\n\treturn p;\n}", "func Float(v float64) *float64 {\n\treturn &v\n}", "func Float(f float64) *float64 {\n\treturn &f\n}", "func (c *Constructor[_]) Float(name string, value float64, help string) *float64 {\n\tp := new(float64)\n\tc.FloatVar(p, name, value, help)\n\treturn p\n}", "func NewFloatValue(e expr.Expr, ts time.Time, val float64) Sequence {\n\treturn NewValue(e, ts, expr.FloatParams(val), nil)\n}", "func NewNumberFloat(f float64) *Number {\n\trat := big.Rat{}\n\trat.SetFloat64(f)\n\tif rat.IsInt() {\n\t\treturn NewNumberBig(rat.Num())\n\t}\n\treturn &Number{isInteger: false, floating: f}\n}", "func NewF(f float64) Fixed {\n\tif math.IsNaN(f) {\n\t\treturn Fixed{fp: nan}\n\t}\n\tif f >= MAX || f <= -MAX {\n\t\treturn NaN\n\t}\n\tround := .5\n\tif f < 0 {\n\t\tround = -0.5\n\t}\n\n\treturn Fixed{fp: int64(f*float64(scale) + round)}\n}", "func (a *DatumAlloc) NewDFloat(v tree.DFloat) *tree.DFloat {\n\tif a.AllocSize == 0 {\n\t\ta.AllocSize = defaultDatumAllocSize\n\t}\n\tbuf := &a.dfloatAlloc\n\tif len(*buf) == 0 {\n\t\t*buf = make([]tree.DFloat, a.AllocSize)\n\t}\n\tr := &(*buf)[0]\n\t*r = v\n\t*buf = (*buf)[1:]\n\treturn r\n}", "func NewFloat64(a, b, c float64) *Float64 {\n\tx := new(Float64)\n\tx[0] = a\n\tx[1] = b\n\tx[2] = c\n\treturn x\n}", "func (f Fixed) Float() float64 {\n\tif f.IsNaN() {\n\t\treturn math.NaN()\n\t}\n\treturn float64(f.fp) / float64(scale)\n}", "func (f Fixed) Float() float64 {\n\tif f.IsNaN() {\n\t\treturn math.NaN()\n\t}\n\treturn float64(f.fp) / float64(scale)\n}", "func (s *Slider) Float(f *Float) *Slider {\n\ts.float = f\n\treturn s\n}", "func (n *Number) Float() float64 {\n\treturn n.floating\n}", "func Float(flag string, value float64, description string) *float64 {\n\tvar v float64\n\tFloatVar(&v, flag, value, description)\n\treturn &v\n}", "func NewFloat64(a, b float64) *Float64 {\n\tz := new(Float64)\n\tz.SetPair(a, b)\n\treturn z\n}", "func NewFromFloat(value float64) Decimal {\n\treturn Decimal{val: decimal.NewFromFloat(value)}\n}", "func FloatFrom(f float64) Float {\n\treturn NewFloat(f, true)\n}", "func NewFloatField(defaults Defaults) FloatField {\n\tfield := FloatField{}\n\tfor fieldName, value := range defaults {\n\t\tswitch fieldName {\n\t\tcase \"Required\":\n\t\t\tif v, ok := value.(bool); ok {\n\t\t\t\tfield.Required = v\n\t\t\t}\n\t\t}\n\t}\n return field\n}", "func NewFloat(m map[string]float64) ByteMap {\n\treturn Build(func(cb func(string, interface{})) {\n\t\tfor key, value := range m {\n\t\t\tcb(key, value)\n\t\t}\n\t}, func(key string) interface{} {\n\t\treturn m[key]\n\t}, false)\n}", "func NewFloatImage(r image.Rectangle) *FloatImage {\n\tw, h := r.Dx(), r.Dy()\n\tpix := make([]ColorValue, 4*w*h)\n\treturn &FloatImage{pix, 4 * w, r}\n}", "func (x *Big) Float(z *big.Float) *big.Float {\n\tif debug {\n\t\tx.validate()\n\t}\n\n\tif z == nil {\n\t\tz = new(big.Float)\n\t}\n\n\tswitch x.form {\n\tcase finite, finite | signbit:\n\t\tif x.isZero() {\n\t\t\tz.SetUint64(0)\n\t\t} else {\n\t\t\tz.SetRat(x.Rat(nil))\n\t\t}\n\tcase pinf, ninf:\n\t\tz.SetInf(x.form == pinf)\n\tdefault: // snan, qnan, ssnan, sqnan:\n\t\tz.SetUint64(0)\n\t}\n\treturn z\n}", "func MakeFloat(in *float32) *google_protobuf.FloatValue {\n\tif in == nil {\n\t\treturn nil\n\t}\n\n\treturn &google_protobuf.FloatValue{\n\t\tValue: *in,\n\t}\n}", "func NewFromFloat(val float64) Decimal {\n\treturn decimal.NewFromFloat(val)\n}", "func AddFloat(x, y *big.Float) *big.Float {\n\treturn new(big.Float).Add(x, y)\n}", "func (e Entry) Float(k string, v float64) Entry {\n\te.enc.FloatKey(k, v)\n\treturn e\n}", "func NewFromFloat(value float64) Decimal {\n\tif value == 0 {\n\t\treturn New(0, 0)\n\t}\n\treturn newFromFloat(value, math.Float64bits(value), &float64info)\n}", "func NewFloatImage(width, height int) *FloatImage {\n\tarea := width * height\n\treturn &FloatImage{\n\t\tIp: [3][]float32{make([]float32, area), make([]float32, area), make([]float32, area)},\n\t\tWidth: width,\n\t\tHeight: height,\n\t}\n}", "func NewBigFloat(f float64) *big.Float {\n\tr := big.NewFloat(f)\n\tr.SetPrec(CurrentPrecision)\n\treturn r\n}", "func Float() Scalar {\n\treturn floatTypeInstance\n}", "func NewFloatArray(p uint32, u int32) *FloatArray {\n\treturn &FloatArray{p, u}\n}", "func (v Value) Float(bitSize int) (float64, error) {\n\tif v.typ != Number {\n\t\treturn 0, v.newError(\"%s is not a number\", v.Raw())\n\t}\n\tf, err := strconv.ParseFloat(v.Raw(), bitSize)\n\tif err != nil {\n\t\treturn 0, v.newError(\"%v\", err)\n\t}\n\treturn f, nil\n}", "func NewFloat64() *Float64 {\n\tself := Float64{}\n\tself.SetDefaults()\n\treturn &self\n}", "func (v *Value) Float() (*encoding.Float, error) {\n\tif v.vfloat != nil {\n\t\treturn v.vfloat, nil\n\t}\n\n\tvfloat := encoding.NewFloat()\n\terr := vfloat.UnmarshalBinary(v.Raw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tv.vfloat = vfloat\n\treturn vfloat, nil\n}", "func newBigFloat(n uint64) *big.Float {\n\ttmp := new(big.Float).SetUint64(n)\n\ttmp.SetPrec(ENCODER_DECODER_PREC)\n\treturn tmp\n}", "func NewFloatCmd(args ...interface{}) *redis.FloatCmd {\n\treturn redis.NewFloatCmd(args...)\n}", "func (c *TRandomFloat) NextFloat(min float32, max float32) float32 {\n\tif max-min <= 0 {\n\t\treturn min\n\t}\n\n\treturn min + rand.Float32()*(max-min)\n}", "func NewFromFloat(f float64) Fixed {\n\tif math.IsNaN(f) {\n\t\treturn Fixed{fp: nan}\n\t}\n\tif f >= max || f < 0 {\n\t\tpanic(errOverflow)\n\t}\n\tintPart := decimal.NewFromFloat(f).Mul(decimal.NewFromFloat(float64(scale))).IntPart()\n\treturn Fixed{fp: uint64(intPart)}\n}", "func FloatFromPtr(f *float64) Float {\n\tif f == nil {\n\t\treturn NewFloat(0, false)\n\t}\n\treturn NewFloat(*f, true)\n}", "func (z *Float) Copy(x *Float) *Float {}", "func (obj *Value) SetFloat(v float32) {\n\tobj.Candy().Guify(\"g_value_set_float\", obj, v)\n}", "func (c *C) Float() Type {\n\treturn FloatT(4)\n}", "func (v *missingValue) GetFloat() (float64, bool) {\n\treturn 0.0, false\n}", "func (v *missingValue) SetFloat(value float64) bool {\n\treturn false\n}", "func NewFiniteField(order int64) FiniteField {\n\treturn FiniteField{order}\n}", "func createPointer() *float64 {\n\tmyFloat := 98.5\n\treturn &myFloat\n}", "func (d *DSP) ParameterFloat(index C.int, value *C.float, valuestr *C.char, valuestrlen C.int) error {\n\t//FMOD_RESULT F_API FMOD_DSP_GetParameterFloat(FMOD_DSP *dsp, int index, float *value, char *valuestr, int valuestrlen);\n\treturn ErrNoImpl\n}", "func Make_Tv_Float(val float64) *vector_tile.Tile_Value {\n\ttv := new(vector_tile.Tile_Value)\n\tt := float64(val)\n\ttv.DoubleValue = &t\n\treturn tv\n}", "func Make_Tv_Float(val float64) *vector_tile.Tile_Value {\n\ttv := new(vector_tile.Tile_Value)\n\tt := float64(val)\n\ttv.DoubleValue = &t\n\treturn tv\n}", "func (c *Context) Float(v float64) *AST {\n\t//TODO: test if this could work\n\treturn &AST{\n\t\trawCtx: c.raw,\n\t\trawAST: C.Z3_mk_real(c.raw, C.int(v), C.int(1)),\n\t}\n}", "func (c *Configurator) Float64F(name string, value float64, usage string) *float64 {\n\tp := new(float64)\n\n\tc.Float64VarF(p, name, value, usage)\n\n\treturn p\n}", "func (p *Parser) parseFloatLiteral() asti.ExpressionI {\n\tflo := &ast.FloatLiteral{Token: p.curToken}\n\tvalue, err := strconv.ParseFloat(p.curToken.Literal, 64)\n\tif err != nil {\n\t\tp.AddError(\"could not parse %q as float\", p.curToken.Literal)\n\t\treturn nil\n\t}\n\tflo.Value = value\n\treturn flo\n}", "func newLeafListFloat(values []float32) *TypedLeafListFloat {\n\tbytes := make([]byte, 0)\n\tfor _, f := range values {\n\t\tbuf := make([]byte, 8)\n\t\tbinary.LittleEndian.PutUint64(buf, math.Float64bits(float64(f)))\n\t\tbytes = append(bytes, buf...)\n\t}\n\ttypedLeafListFloat := TypedLeafListFloat{\n\t\tBytes: bytes,\n\t\tType: ValueType_LEAFLIST_FLOAT,\n\t}\n\treturn &typedLeafListFloat\n}", "func NewFromBits(a, b uint64) Float {\n\treturn Float{a: a, b: b}\n}", "func NewFromBits(a, b uint64) Float {\n\treturn Float{a: a, b: b}\n}", "func (v Value) Float() float64 {\n\tpanic(message)\n}", "func Float(val string) (out *big.Float, err error) {\n\tvalue, ret := new(big.Float).SetString(val)\n\tif !ret {\n\t\terr = fmt.Errorf(\"invalid va\")\n\t\treturn\n\t}\n\treturn value, err\n}", "func (f Number) Float(context.Context) float64 {\n\treturn float64(f)\n}", "func (v *Value) SetFloat(val float32) {\n\tC.g_value_set_float(v.Native(), C.gfloat(val))\n}", "func NewFromFloat64(x float64) (f Float, exact bool) {\n\tintRep := math.Float64bits(x)\n\tsign := intRep&0x8000000000000000 != 0\n\texp := intRep & 0x7FF0000000000000 >> 52\n\tmant := intRep & 0xFFFFFFFFFFFFF\n\tleftMant := mant & 0xFFFFFFFFFFFF0 >> 4\n\tvar a uint64\n\tb := mant & 0xF << 60\n\n\tswitch exp {\n\t// 0b11111111\n\tcase 0x7FF:\n\t\t// NaN or Inf\n\t\tif mant == 0 {\n\t\t\t// +-Inf\n\t\t\ta = 0x7FFF000000000000\n\t\t\tif sign {\n\t\t\t\ta = 0xFFFF000000000000\n\t\t\t}\n\t\t\treturn Float{a: a, b: b}, true\n\t\t}\n\t\t// +-NaN\n\n\t\ta = 0\n\t\tif sign {\n\t\t\ta = 0x8000000000000000\n\t\t}\n\t\ta = a | 0x7FFF000000000000\n\n\t\tnewMant := leftMant\n\t\ta |= newMant\n\n\t\treturn Float{a: a, b: b}, true\n\t\t// 0b00000000\n\tcase 0x0:\n\t\tif mant == 0 {\n\t\t\t// +-Zero\n\t\t\tvar a uint64\n\t\t\tif sign {\n\t\t\t\ta = 0x8000000000000000\n\t\t\t}\n\t\t\treturn Float{a: a, b: b}, true\n\t\t}\n\t}\n\n\tif sign {\n\t\ta = 0x8000000000000000\n\t}\n\n\tnewExp := (exp - 1023 + 16383) << 48\n\ta |= newExp\n\n\ta |= leftMant\n\n\treturn Float{a: a, b: b}, true\n}", "func SetFloatField(env *C.JNIEnv, obj C.jobject, fieldID C.jfieldID, val C.jfloat) {\n\tC._GoJniSetFloatField(env, obj, fieldID, val)\n}", "func NewNumber(f float64) *Number {\n\treturn &Number{num: f}\n}", "func NewFloatSorter[F Float]() zermelo.Sorter[F] {\n\treturn newFloatSorter[F]()\n}", "func (v Base) Float() float64 {\n\treturn float64(v)\n}", "func NumberFromFloat(f float64, precision int8) *Number {\n\treturn &Number{\n\t\tvalue: toFixed(f, precision),\n\t\tprecision: precision,\n\t}\n}", "func (o *FloatObject) AsFloat() (float64) {\n return o.Value\n}", "func NewFFilled(size int, f func() float64) Vec {\n\treturn New(size).FFill(f)\n}", "func (p *parser) parseFloat(annotations []Symbol) Float {\n\treturn Float{annotations: annotations, isSet: true, text: p.next().Val}\n}", "func (z *Big) SetFloat(x *big.Float) *Big {\n\tif x.IsInf() {\n\t\tif x.Signbit() {\n\t\t\tz.form = ninf\n\t\t} else {\n\t\t\tz.form = pinf\n\t\t}\n\t\treturn z\n\t}\n\n\tneg := x.Signbit()\n\tif x.Sign() == 0 {\n\t\tif neg {\n\t\t\tz.form |= signbit\n\t\t}\n\t\tz.compact = 0\n\t\tz.precision = 1\n\t\treturn z\n\t}\n\n\tz.exp = 0\n\tx0 := new(big.Float).Copy(x).SetPrec(big.MaxPrec)\n\tx0.Abs(x0)\n\tif !x.IsInt() {\n\t\tfor !x0.IsInt() {\n\t\t\tx0.Mul(x0, c.TenFloat)\n\t\t\tz.exp--\n\t\t}\n\t}\n\n\tif mant, acc := x0.Uint64(); acc == big.Exact {\n\t\tz.compact = mant\n\t\tz.precision = arith.Length(mant)\n\t} else {\n\t\tz.compact = c.Inflated\n\t\tx0.Int(&z.unscaled)\n\t\tz.precision = arith.BigLength(&z.unscaled)\n\t}\n\tz.form = finite\n\tif neg {\n\t\tz.form |= signbit\n\t}\n\treturn z\n}", "func FloatValue(f float64) Value {\n\tf2 := Float(f)\n\treturn Value{FloatValue: &f2}\n}", "func NewFloat2(x float64, prec uint) *Float {\n\tf := new(Float)\n\tf.prec = prec\n\tf.SetFloat64(x)\n\treturn f\n}", "func (recv *Value) SetFloat(vFloat float32) {\n\tc_v_float := (C.gfloat)(vFloat)\n\n\tC.g_value_set_float((*C.GValue)(recv.native), c_v_float)\n\n\treturn\n}", "func FloatCopy(z *big.Float, x *big.Float,) *big.Float", "func (ja *Array) AddFloat(v float64) {\n\tja.Values = append(ja.Values, &floatValue{\n\t\tvalue: v,\n\t})\n}", "func ParseFloat(s string, base int, prec uint, mode RoundingMode) (f *Float, b int, err error) {}", "func NewFromBits(bits uint16) Float {\n\treturn Float{bits: bits}\n}", "func (a ASTNode) Float() float64 {\n\tif a.t != tval {\n\t\tpanic(ConfErr{a.pos, errors.New(\"Not a basic value\")})\n\t}\n\tv, err := strconv.ParseFloat(a.val.(string), 64)\n\tif err != nil {\n\t\tpanic(ConfErr{a.pos, err})\n\t}\n\treturn v\n}", "func (c *TRandomFloat) UpdateFloat(value float32, interval float32) float32 {\n\tif interval <= 0 {\n\t\tinterval = 0.1 * value\n\t}\n\tminValue := value - interval\n\tmaxValue := value + interval\n\treturn c.NextFloat(minValue, maxValue)\n}", "func RandomNumberFloat(min, max Number) *Number {\n\trand.Seed(time.Now().UTC().UnixNano())\n\treturn NewNumber(min.AsFloat64() + rand.Float64()*(max.AsFloat64()-min.AsFloat64()))\n}", "func (o *FakeObject) Float() float64 { return o.Value.(float64) }", "func (f FormField) Float() float64 {\n\tif result, err := strconv.ParseFloat(f.Value, 64); err == nil {\n\t\treturn result\n\t}\n\treturn 0.0\n}", "func (c *CellValue) SetFloat(v float32) {\n\tc.IntVal = 0\n\tc.FloatVal = v\n\tc.StringVal = \"\"\n}", "func NewFloatGray(w, h int) *FloatGray {\n pix := make([]FloatGrayColor, w*h)\n return &FloatGray{pix, w, image.Rectangle{image.ZP, image.Point{w, h}}}\n}", "func (v *Value) Float() float64 {\n\treturn (float64)(C.value_get_double(v.value))\n}", "func NewFromFloat(value float64) Decimal {\n\tfloor := math.Floor(value)\n\n\t// fast path, where float is an int\n\tif floor == value && value <= math.MaxInt64 && value >= math.MinInt64 {\n\t\treturn NewDec(int64(value), 0)\n\t}\n\n\t// slow path: float is a decimal\n\t// HACK(vadim): do this the slow hacky way for now because the logic to\n\t// convert a base-2 float to base-10 properly is not trivial\n\tstr := strconv.FormatFloat(value, 'f', -1, 64)\n\tdec, err := NewFromString(str)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn dec\n}", "func (s *Smpval) SetFloat(f float64) bool {\n\tif s.flag == Float && s.val.CanSet() {\n\t\ts.val.SetFloat(f)\n\t\ts.f = s.val.Float()\n\t\treturn true\n\t}\n\treturn false\n}" ]
[ "0.8411921", "0.83798766", "0.8243816", "0.81184137", "0.80630434", "0.8013916", "0.77668375", "0.7746606", "0.74970555", "0.7403216", "0.7179319", "0.71489006", "0.6873199", "0.6873199", "0.6780973", "0.6665791", "0.6455097", "0.6424199", "0.636417", "0.6320907", "0.6287495", "0.6237109", "0.6158143", "0.61416835", "0.608246", "0.608246", "0.6043871", "0.60437125", "0.6016504", "0.6013062", "0.59661335", "0.5956103", "0.5954564", "0.59521705", "0.5895987", "0.58628356", "0.5843307", "0.5829937", "0.5825089", "0.58179873", "0.5791295", "0.5749928", "0.57445735", "0.5735663", "0.5724267", "0.57120186", "0.5703712", "0.57036406", "0.57030666", "0.56967545", "0.5693666", "0.5690539", "0.56828916", "0.5679658", "0.56576055", "0.564227", "0.56306285", "0.5628242", "0.56223094", "0.5613402", "0.5602902", "0.55973905", "0.55973905", "0.55958444", "0.55947155", "0.5588107", "0.55800945", "0.5573062", "0.5573062", "0.5567659", "0.55647427", "0.55624247", "0.5562118", "0.5560384", "0.5554191", "0.5552284", "0.5549066", "0.5539056", "0.5536999", "0.5523927", "0.551951", "0.55184555", "0.5507379", "0.5504909", "0.55018026", "0.5498141", "0.5496435", "0.54951924", "0.549479", "0.549279", "0.5490408", "0.54743135", "0.5459166", "0.5458209", "0.54574317", "0.54514104", "0.5439537", "0.54225904", "0.5407319", "0.5401117" ]
0.74249166
9
Lookup looks up the provided key in map m. The caller must provide the key's digest which is used as a hash.
func (m Map) Lookup(d digest.Digest, key T) T { if _, ok := m.tab[d]; !ok { return nil } entry := *m.tab[d] for entry != nil && Less(entry.Key, key) { entry = entry.Next } if entry == nil || !Equal(entry.Key, key) { return nil } return entry.Value }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *HashRing) Lookup(key string) (string, bool) {\n\tstrs := r.LookupN(key, 1)\n\tif len(strs) == 0 {\n\t\treturn \"\", false\n\t}\n\treturn strs[0], true\n}", "func Get(m Map, key Key) (val Value, ok bool) {\n\thash := key.Hash()\n\tval, ok = m.find(0, hash, key)\n\treturn val, ok\n}", "func (kvStore *KVStore) Lookup(hash KademliaID) (output []byte, err error) {\n kvStore.mutex.Lock()\n if val, ok := kvStore.mapping[hash]; ok {\n output = val.data\n } else {\n err = NotFoundError\n }\n kvStore.mutex.Unlock()\n return\n}", "func (dc *DigestCache) Lookup(hash []byte) []byte {\n\tif r, ok := dc.Records[string(hash)]; ok {\n\t\treturn r\n\t}\n\treturn nil\n}", "func (r *ring) Lookup(\n\tkey string,\n) (HostInfo, error) {\n\taddr, found := r.ring().Lookup(key)\n\tif !found {\n\t\tselect {\n\t\tcase r.refreshChan <- &ChangedEvent{}:\n\t\tdefault:\n\t\t}\n\t\treturn HostInfo{}, ErrInsufficientHosts\n\t}\n\tr.members.RLock()\n\tdefer r.members.RUnlock()\n\thost, ok := r.members.keys[addr]\n\tif !ok {\n\t\treturn HostInfo{}, fmt.Errorf(\"host not found in member keys, host: %q\", addr)\n\t}\n\treturn host, nil\n}", "func Lookup() {\n\tTEXT(\"Lookup\", NOSPLIT, \"func(keyset []byte, key []byte) int\")\n\tDoc(\"Lookup searches for a key in a set of keys, returning its index if \",\n\t\t\"found. If the key cannot be found, the number of keys is returned.\")\n\n\t// Load inputs.\n\tkeyset := Load(Param(\"keyset\").Base(), GP64())\n\tcount := Load(Param(\"keyset\").Len(), GP64())\n\tSHRQ(Imm(4), count)\n\tkeyPtr := Load(Param(\"key\").Base(), GP64())\n\tkeyLen := Load(Param(\"key\").Len(), GP64())\n\tkeyCap := Load(Param(\"key\").Cap(), GP64())\n\n\t// None of the keys are larger than maxLength.\n\tCMPQ(keyLen, Imm(maxLength))\n\tJA(LabelRef(\"not_found\"))\n\n\t// We're going to be unconditionally loading 16 bytes from the input key\n\t// so first check if it's safe to do so (cap >= 16). If not, defer to\n\t// safe_load for additional checks.\n\tCMPQ(keyCap, Imm(maxLength))\n\tJB(LabelRef(\"safe_load\"))\n\n\t// Load the input key and pad with zeroes to 16 bytes.\n\tLabel(\"load\")\n\tkey := XMM()\n\tVMOVUPS(Mem{Base: keyPtr}, key)\n\tLabel(\"prepare\")\n\tzeroes := XMM()\n\tVPXOR(zeroes, zeroes, zeroes)\n\tones := XMM()\n\tVPCMPEQB(ones, ones, ones)\n\tvar blendBytes [maxLength * 2]byte\n\tfor j := 0; j < maxLength; j++ {\n\t\tblendBytes[j] = 0xFF\n\t}\n\tblendMasks := ConstBytes(\"blend_masks\", blendBytes[:])\n\tblendMasksPtr := GP64()\n\tLEAQ(blendMasks.Offset(maxLength), blendMasksPtr)\n\tSUBQ(keyLen, blendMasksPtr)\n\tblend := XMM()\n\tVMOVUPS(Mem{Base: blendMasksPtr}, blend)\n\tVPBLENDVB(blend, key, zeroes, key)\n\n\t// Zero out i so we can use it as the loop increment.\n\ti := GP64()\n\tXORQ(i, i)\n\n\t// Round the key count down to the nearest multiple of unroll to determine\n\t// how many iterations of the big loop we'll need.\n\ttruncatedCount := GP64()\n\tMOVQ(count, truncatedCount)\n\tshift := uint64(math.Log2(float64(unroll)))\n\tSHRQ(Imm(shift), truncatedCount)\n\tSHLQ(Imm(shift), truncatedCount)\n\n\t// Loop over multiple keys in the big loop.\n\tLabel(\"bigloop\")\n\tCMPQ(i, truncatedCount)\n\tJE(LabelRef(\"loop\"))\n\n\tx := []VecPhysical{X8, X9, X10, X11, X12, X13, X14, X15}\n\tfor n := 0; n < unroll; n++ {\n\t\tVPCMPEQB(Mem{Base: keyset, Disp: maxLength * n}, key, x[n])\n\t\tVPTEST(ones, x[n])\n\t\tvar target string\n\t\tif n == 0 {\n\t\t\ttarget = \"done\"\n\t\t} else {\n\t\t\ttarget = fmt.Sprintf(\"found%d\", n)\n\t\t}\n\t\tJCS(LabelRef(target))\n\t}\n\n\t// Advance and loop again.\n\tADDQ(Imm(unroll), i)\n\tADDQ(Imm(unroll*maxLength), keyset)\n\tJMP(LabelRef(\"bigloop\"))\n\n\t// Loop over the remaining keys.\n\tLabel(\"loop\")\n\tCMPQ(i, count)\n\tJE(LabelRef(\"done\"))\n\n\t// Try to match against the input key.\n\tmatch := XMM()\n\tVPCMPEQB(Mem{Base: keyset}, key, match)\n\tVPTEST(ones, match)\n\tJCS(LabelRef(\"done\"))\n\n\t// Advance and loop again.\n\tLabel(\"next\")\n\tINCQ(i)\n\tADDQ(Imm(maxLength), keyset)\n\tJMP(LabelRef(\"loop\"))\n\tJMP(LabelRef(\"done\"))\n\n\t// Return the loop increment, or the count if the key wasn't found. If we're\n\t// here from a jump within the big loop, the loop increment needs\n\t// correcting first.\n\tfor j := unroll - 1; j > 0; j-- {\n\t\tLabel(fmt.Sprintf(\"found%d\", j))\n\t\tINCQ(i)\n\t}\n\tLabel(\"done\")\n\tStore(i, ReturnIndex(0))\n\tRET()\n\tLabel(\"not_found\")\n\tStore(count, ReturnIndex(0))\n\tRET()\n\n\t// If the input key is near a page boundary, we must change the way we load\n\t// it to avoid a fault. We instead want to load the 16 bytes up to and\n\t// including the key, then shuffle the key forward in the register. E.g. for\n\t// key \"foo\" we would load the 13 bytes prior to the key along with \"foo\"\n\t// and then move the last 3 bytes forward so the first 3 bytes are equal\n\t// to \"foo\".\n\tLabel(\"safe_load\")\n\tpageOffset := GP64()\n\tMOVQ(keyPtr, pageOffset)\n\tANDQ(U32(pageSize-1), pageOffset)\n\tCMPQ(pageOffset, U32(pageSize-maxLength))\n\tJBE(LabelRef(\"load\")) // Not near a page boundary.\n\toffset := GP64()\n\tMOVQ(^U64(0)-maxLength+1, offset)\n\tADDQ(keyLen, offset)\n\tVMOVUPS(Mem{Base: keyPtr, Index: offset, Scale: 1}, key)\n\tvar shuffleBytes [maxLength * 2]byte\n\tfor j := 0; j < maxLength; j++ {\n\t\tshuffleBytes[j] = byte(j)\n\t\tshuffleBytes[j+maxLength] = byte(j)\n\t}\n\tshuffleMasks := ConstBytes(\"shuffle_masks\", shuffleBytes[:])\n\tshuffleMasksPtr := GP64()\n\tLEAQ(shuffleMasks.Offset(maxLength), shuffleMasksPtr)\n\tSUBQ(keyLen, shuffleMasksPtr)\n\tshuffle := XMM()\n\tVMOVUPS(Mem{Base: shuffleMasksPtr}, shuffle)\n\tVPSHUFB(shuffle, key, key)\n\tJMP(LabelRef(\"prepare\"))\n}", "func (m *DHTModule) Lookup(key string) string {\n\n\tif m.IsAttached() {\n\t\tval, err := m.Client.DHT().Lookup(key)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn base64.StdEncoding.EncodeToString([]byte(val))\n\t}\n\n\tctx, cn := context.WithTimeout(context.Background(), 60*time.Second)\n\tdefer cn()\n\tbz, err := m.dht.Lookup(ctx, dht2.MakeKey(key))\n\tif err != nil {\n\t\tpanic(errors.ReqErr(500, StatusCodeServerErr, \"key\", err.Error()))\n\t}\n\treturn base64.StdEncoding.EncodeToString(bz)\n}", "func (c *Cache) Lookup(buildid int64) (string, error) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\tif hash, ok := c.hashes[buildid]; !ok {\n\t\treturn \"\", fmt.Errorf(\"BuildId not found in cache: %d\", buildid)\n\t} else {\n\t\treturn hash, nil\n\t}\n}", "func (rp *Ringpop) Lookup(key string) string {\n\tstartTime := time.Now()\n\n\tdest, ok := rp.ring.Lookup(key)\n\n\trp.emit(LookupEvent{key, time.Now().Sub(startTime)})\n\n\tif !ok {\n\t\trp.log.WithField(\"key\", key).Warn(\"could not find destination for key\")\n\t\treturn rp.WhoAmI()\n\t}\n\n\treturn dest\n}", "func (kp *KeyPool) Lookup(keyid [signkeys.KeyIDSize]byte) (*signkeys.PublicKey, error) {\n\tkp.mapMutex.RLock()\n\tdefer kp.mapMutex.RUnlock()\n\tkey, err := kp.lookup(keyid)\n\tif err == ErrNotFound && kp.FetchKeyCallBack != nil {\n\t\t// Use fetchkey callback\n\t\tfetchedKeyMarshalled, err := kp.FetchKeyCallBack(keyid[:])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfetchedKey, err := new(signkeys.PublicKey).Unmarshal(fetchedKeyMarshalled)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tkeyidFetch, err := kp.loadKey(fetchedKey)\n\t\tif err != nil && err != ErrExists {\n\t\t\treturn nil, ErrNotFound\n\t\t}\n\t\tif *keyidFetch == keyid {\n\t\t\treturn fetchedKey, nil\n\t\t}\n\t}\n\treturn key, err\n}", "func (m Map) Lookup(index MalType) (MalType, bool) {\n\treturn m.Imm.Get(index)\n}", "func (h *hashtable) Lookup(key LRUItem) *CacheItem {\n\tkeyHash := key.Hash()\n\tbucketIndex := int(keyHash) % h.bucketcount\n\tfor node := h.buckets[bucketIndex]; node != nil; node = node.chain {\n\t\tif node.hash == keyHash {\n\t\t\tif node.data.Equals(key) {\n\t\t\t\treturn node\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (fi *FsCache) lookup(key *fs.FsFile) (*fs.FsData, bool) {\n\tfdata, ok := fi.dict[key.String()]\n\treturn fdata, ok\n}", "func (ms *MemoizeSigner) lookup(msg []byte) ([]byte, bool) {\n\tms.RLock()\n\tdefer ms.RUnlock()\n\tsig, exists := ms.memory[msgDigest(msg)]\n\treturn sig, exists\n}", "func (fs *fsMutable) lookup(p fuseops.InodeID, c string) (le lookupEntry, found bool, lk []byte) {\n\treturn lookup(p, c, fs.lookupTree)\n}", "func (fs *fsMutable) lookup(p fuseops.InodeID, c string) (le lookupEntry, found bool, lk []byte) {\n\treturn lookup(p, c, fs.lookupTree)\n}", "func (filter *BloomFilter) Lookup(msg []byte) (bool, error) {\n\tfor _, v := range filter.keys {\n\t\tval := filter.hashFunc(msg, v)\n\t\tif x, _ := filter.bitMap.GetPosition(val % filter.m); x != 1 {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\treturn true, nil\n}", "func (m *Map) Get(key string) string {\n\tif m.IsEmpty() {\n\t\treturn \"\"\n\t}\n\n\thash := int(m.hash([]byte(key)))\n\n\t// Binary search for appropriate replica.\n\tidx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash })\n\n\t// Means we have cycled back to the first replica.\n\tif idx == len(m.keys) {\n\t\tidx = 0\n\t}\n\n\treturn m.hashMap[m.keys[idx]]\n}", "func (a Address) Lookup(k string) (any, error) {\n\tswitch k {\n\tcase \"port\":\n\t\tt, err := net.ResolveTCPAddr(\"tcp\", a.String())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn t.Port, nil\n\tcase \"ip\":\n\t\tt, err := net.ResolveTCPAddr(\"tcp\", a.String())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn t.IP.String(), nil\n\t}\n\treturn nil, ErrKeyNotFound{Key: k}\n}", "func (rad *Radix) Lookup(key string) interface{} {\n\trad.lock.Lock()\n\tdefer rad.lock.Unlock()\n\tif x, ok := rad.root.lookup([]rune(key)); ok {\n\t\treturn x.value\n\t}\n\treturn nil\n}", "func (kp *KeyPool) lookup(keyid [signkeys.KeyIDSize]byte) (*signkeys.PublicKey, error) {\n\tif d, ok := kp.keys[keyid]; ok {\n\t\tif d.Expire > times.Now() {\n\t\t\treturn d, nil\n\t\t}\n\t\treturn nil, ErrExpired\n\t}\n\treturn nil, ErrNotFound\n}", "func (manager *KeysManager) Lookup(keyID string) (*jose.JSONWebKey, error) {\n\tvar jwk *jose.JSONWebKey\n\tjwk, exists := manager.KeyMap[keyID]\n\t// If no key is found, refresh the stored key set.\n\tif !exists {\n\t\tif err := manager.Refresh(); err != nil {\n\t\t\treturn jwk, err\n\t\t}\n\t\tjwk, exists = manager.KeyMap[keyID]\n\t\t// If still no key is found, return an error.\n\t\tif !exists {\n\t\t\treturn nil, missingKey(keyID)\n\t\t}\n\t}\n\treturn jwk, nil\n}", "func (h *hash) Get(k string) reference.I {\n\tif h == nil {\n\t\treturn nil\n\t}\n\n\th.RLock()\n\tdefer h.RUnlock()\n\n\treturn h.m[k]\n}", "func lookup(p fuseops.InodeID, c string, lookupTree *iradix.Tree) (le lookupEntry, found bool, lk []byte) {\n\tlk = formLookupKey(p, c)\n\tval, found := lookupTree.Get(lk)\n\tif found {\n\t\tle = val.(lookupEntry)\n\t\treturn le, found, lk\n\t}\n\treturn lookupEntry{}, found, lk\n}", "func lookup(p fuseops.InodeID, c string, lookupTree *iradix.Tree) (le lookupEntry, found bool, lk []byte) {\n\tlk = formLookupKey(p, c)\n\tval, found := lookupTree.Get(lk)\n\tif found {\n\t\tle = val.(lookupEntry)\n\t\treturn le, found, lk\n\t}\n\treturn lookupEntry{}, found, lk\n}", "func (n *NoOP) Lookup(key []byte) []byte {\n\treturn make([]byte, 1)\n}", "func (m *Hashmap) Get(key int) (val string, found bool) {\n\tidx := hash(key) % m.size\n\tcurr := m.buckets[idx]\n\tfor curr.key != key {\n\t\t// If the key doesn't exist in the hashmap\n\t\tif curr.next == nil {\n\t\t\treturn \"\", false\n\t\t}\n\t\tcurr = *curr.next\n\t}\n\treturn curr.val, true\n}", "func (idx *Tree) Lookup(key []byte) (value uint64, found bool) {\n\tid := idx.allocatorQueue.get()\n\tvalue, found = idx.allocators[id].Lookup(key)\n\tidx.allocatorQueue.put(id)\n\treturn\n}", "func (sm *ConcurrentStateMachine) Lookup(query []byte) ([]byte, error) {\n\treturn sm.sm.Lookup(query)\n}", "func (m *Map) Get(key string) string {\n\tif m.IsEmpty() {\n\t\treturn \"\"\n\t}\n\thash := m.hash([]byte(key))\n\tn := node{hash: hash, key: key}\n\titer := floor(&m.nodes.Tree, &n)\n\tif iter == m.nodes.End() {\n\t\titer = m.nodes.Begin()\n\t}\n\treturn iter.Node().Key.(*node).key\n}", "func (t *Table) Lookup(s string) (n uint32, ok bool) {\n\ti0 := int(murmurSeed(0).hash(s)) & t.level0Mask\n\tseed := t.level0[i0]\n\ti1 := int(murmurSeed(seed).hash(s)) & t.level1Mask\n\tn = t.level1[i1]\n\treturn n, s == t.keys[int(n)]\n}", "func (this *LruMap) Get(key string) (rv interface{}, ok bool) {\n\treturn this.GetByHash(HashString(key))\n}", "func Lookup(node *Node, key int) bool {\n\tif node == nil {\n\t\treturn false\n\t} else {\n\t\tif node.Key == key {\n\t\t\treturn true\n\t\t} else {\n\t\t\tif node.Key > key {\n\t\t\t\treturn Lookup(node.Left, key)\n\t\t\t} else {\n\t\t\t\treturn Lookup(node.Right, key)\n\t\t\t}\n\t\t}\n\t}\n}", "func Lookup(v interface{}) (string, error) {\n\tvar mac net.HardwareAddr\n\tswitch v.(type) {\n\tcase string:\n\t\tvar err error\n\t\tmac, err = net.ParseMAC(v.(string))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\tcase net.HardwareAddr:\n\t\tmac = v.(net.HardwareAddr)\n\tdefault:\n\t\treturn \"\", errCannotResolveType\n\t}\n\n\tprefix := mac[:3].String()\n\tif val, ok := mapping[strings.ToLower(prefix)]; ok {\n\t\treturn val, nil\n\t}\n\n\treturn \"\", nil\n}", "func (idx *LearnedIndex) Lookup(key float64) (offsets []int, err error) {\n\tguess, lower, upper := idx.GuessIndex(key)\n\ti := 0\n\t// k, o, err := store.Get(guess_i)\n\t// st, err := store.STExtract(guess_i+1, upper+1)\n\n\tif key > idx.ST.Keys[guess] {\n\t\tsubKeys := idx.ST.Keys[guess+1 : upper+1]\n\t\ti = sort.SearchFloat64s(subKeys, key) + guess + 1\n\t} else if key <= idx.ST.Keys[guess] {\n\t\tsubKeys := idx.ST.Keys[lower : guess+1]\n\t\ti = sort.SearchFloat64s(subKeys, key) + lower\n\t}\n\n\t// iterate to get all equal keys\n\tfor ; i < upper+1; i++ {\n\t\tif idx.ST.Keys[i] == key {\n\t\t\toffsets = append(offsets, idx.ST.Offsets[i])\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif len(offsets) == 0 {\n\t\terr = fmt.Errorf(\"The following key <%f> is not found in the index\", key)\n\t}\n\n\treturn offsets, err\n}", "func (fi *FsCache) Lookup(key *fs.FsFile) (*fs.FsData, bool) {\n\tfi.l.RLock()\n\tdefer fi.l.RUnlock()\n\tfdata, ok := fi.lookup(key)\n\treturn fdata, ok\n}", "func (r propertyTypeRegistry) Lookup(module, key string) bool {\n\t_, found := r[module][key]\n\treturn found\n}", "func (a *netAPI) HashLookup(ctx context.Context, hashPrefix []byte,\n\tthreatTypes []pb.ThreatType) (*pb.SearchHashesResponse, error) {\n\tresp := new(pb.SearchHashesResponse)\n\tu := *a.url // Make a copy of URL\n\t// Add fields from SearchHashesRequest to URL request\n\tq := u.Query()\n\tq.Set(hashPrefixString, base64.StdEncoding.EncodeToString(hashPrefix))\n\tfor _, threatType := range threatTypes {\n\t\tq.Add(threatTypesString, threatType.String())\n\t}\n\tu.RawQuery = q.Encode()\n\tu.Path = findHashPath\n\treturn resp, a.doRequest(ctx, u.String(), resp)\n}", "func (c *LRUCache) Get(key interface{}) (interface{}, error) {\n\tif v, ok := c.lookupTable[key]; ok {\n\t\treturn v, nil\n\t}\n\treturn nil, fmt.Errorf(\"%s Not found\", key)\n}", "func (w *WindowedMap) Lookup(uid UID) (interface{}, bool) {\n\tw.ExpireOldEntries()\n\titem, ok := w.uidMap[uid]\n\tif !ok {\n\t\treturn nil, false\n\t}\n\tw.Put(uid, item.value)\n\treturn item.value, true\n}", "func (r *Ring) Lookup(n int, key []byte) ([]*Vnode, error) {\n\t// Ensure that n is sane\n\tif n > r.config.NumSuccessors {\n\t\treturn nil, fmt.Errorf(\"Cannot ask for more successors than NumSuccessors!\")\n\t}\n\n\t// Hash the key\n\th := r.config.HashFunc()\n\th.Write(key)\n\tkey_hash := h.Sum(nil)\n\n\t// Find the nearest local vnode\n\tnearest := r.nearestVnode(key_hash)\n\n\t// Use the nearest node for the lookup\n\tsuccessors, err := nearest.FindSuccessors(n, key_hash)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Trim the nil successors\n\tfor successors[len(successors)-1] == nil {\n\t\tsuccessors = successors[:len(successors)-1]\n\t}\n\treturn successors, nil\n}", "func (c *Config) Lookup(key string) (any, error) {\n\t// check thet key is valid, meaning it starts with one of\n\t// the fields of the config struct\n\tif !c.isValidKey(key) {\n\t\treturn nil, nil\n\t}\n\tval, err := lookupByType(key, reflect.ValueOf(c))\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"lookup: error on key '%s'\", key)\n\t}\n\treturn val, nil\n}", "func (h *Hash) Retrieve(key string) (int, error) {\n\n\t// For the specified key, identify what bucket in\n\t// the slice we need to store the key/value inside of.\n\tidx := h.hashKey(key)\n\n\t// Iterate over the entries for the specified bucket.\n\tfor _, entry := range h.buckets[idx] {\n\n\t\t// Compare the keys and if there is a match return\n\t\t// the value associated with the key.\n\t\tif entry.key == key {\n\t\t\treturn entry.value, nil\n\t\t}\n\t}\n\n\t// The key was not found so return the error.\n\treturn 0, fmt.Errorf(\"%q not found\", key)\n}", "func (local *Node) Lookup(key string) (nodes []RemoteNode, err error) {\n\t// TODO: students should implement this\n\treturn\n}", "func Lookup(k byte, x *[16]byte) int32", "func (c *Cache) lookupResult(\n\tpodName, nodeName, predicateKey string,\n\tequivalenceHash uint64,\n) (value predicateResult, ok bool) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\tglog.V(5).Infof(\"Cache lookup: node=%s,predicate=%s,pod=%s\", nodeName, predicateKey, podName)\n\tvalue, ok = c.cache[nodeName][predicateKey][equivalenceHash]\n\treturn value, ok\n}", "func (k *Keychain) MatchKey(s Signed) ([]byte, error) {\n\tk.m.RLock()\n\tif len(k.keys) == 0 {\n\t\tk.m.RUnlock()\n\t\treturn nil, ErrNoKeys\n\t}\n\tfor _, key := range k.keys {\n\t\tif ok, err := IsAuthentic(s, key); err != nil {\n\t\t\tk.m.RUnlock()\n\t\t\treturn nil, err\n\t\t} else if ok {\n\t\t\tk.m.RUnlock()\n\t\t\treturn key, nil\n\t\t}\n\t}\n\tk.m.RUnlock()\n\treturn nil, ErrNoMatchingKey\n}", "func (sm *RegularStateMachine) Lookup(query []byte) ([]byte, error) {\n\treturn sm.sm.Lookup(query), nil\n}", "func (l *RandomAccessGroupLookup) Lookup(key flux.GroupKey) (interface{}, bool) {\n\tid := l.idForKey(key)\n\te, ok := l.index[string(id)]\n\tif !ok || e.Deleted {\n\t\treturn nil, false\n\t}\n\treturn e.Value, true\n}", "func (this *Map) Get(key interface{}) interface{} {\n\tnode := this.tree.FindNode(key)\n\tif node != nil {\n\t\treturn node.Value()\n\t}\n\treturn nil\n}", "func (d *Driver) Lookup(id string) ([]byte, error) {\n\td.lockfile.Lock()\n\tdefer d.lockfile.Unlock()\n\n\tsecretData, err := d.getAllData()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif data, ok := secretData[id]; ok {\n\t\treturn data, nil\n\t}\n\treturn nil, fmt.Errorf(\"%s: %w\", id, errNoSecretData)\n}", "func (t *middNode) Lookup(searchKey string) HandlerFuncs {\r\n\tsearchKey = strings.Split(searchKey, \" \")[0]\r\n\tif searchKey[len(searchKey) - 1] == '*' {\r\n\t\tsearchKey = searchKey[:len(searchKey) - 1]\r\n\t}\r\n\treturn t.recursiveLoopup(searchKey)\r\n}", "func (l *GroupLookup) Lookup(key flux.GroupKey) (interface{}, bool) {\n\tif key == nil || len(l.groups) == 0 {\n\t\treturn nil, false\n\t}\n\n\tgroup := l.lookupGroup(key)\n\tif group == -1 {\n\t\treturn nil, false\n\t}\n\n\ti := l.groups[group].Index(key)\n\tif i != -1 {\n\t\treturn l.groups[group].At(i), true\n\t}\n\treturn nil, false\n}", "func (m *hashRing) Get(key string) *destination {\n\tif m.IsEmpty() {\n\t\treturn nil\n\t}\n\n\thash := int(m.hash([]byte(key)))\n\n\t// Binary search for appropriate replica.\n\tidx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash })\n\n\t// Means we have cycled back to the first replica.\n\tif idx == len(m.keys) {\n\t\tidx = 0\n\t}\n\n\treturn m.hashMap[m.keys[idx]]\n}", "func resolve(f logrus.FieldMap, key string) string {\n\tfor k, v := range f {\n\t\tif string(k) == key {\n\t\t\treturn v\n\t\t}\n\t}\n\n\treturn key\n}", "func (e entry) lookup(name interface{}) (interface{}, bool) {\n\tif res, ok := e[name]; ok {\n\t\treturn res, true\n\t}\n\treturn nil, false\n}", "func (s *ConcurrentStateMachine) Lookup(query interface{}) (interface{}, error) {\n\treturn s.sm.Lookup(query)\n}", "func (d Dir) Lookup(path string) (file reflow.File, ok bool) {\n\tfile, ok = d.contents[path]\n\treturn file, ok\n}", "func (h *HashMap) Get(key Key) (value interface{}, err error) {\n\ti := h.HashFunc(h.BlockSize, key)\n\tentry := h.Buckets[i]\n\tfor entry != nil {\n\t\tif entry.Key == key {\n\t\t\treturn entry.Value, nil\n\t\t}\n\t\tentry = entry.NextEntry\n\t}\n\treturn nil, fmt.Errorf(\"key %s not found\", key)\n}", "func (r *Resolver) lookupFunc(key string) func() (interface{}, error) {\n\tif len(key) == 0 {\n\t\tpanic(\"lookupFunc with empty key\")\n\t}\n\tresolver := net.DefaultResolver\n\tif r.Resolver != nil {\n\t\tresolver = r.Resolver\n\t}\n\tswitch key[0] {\n\tcase 'h':\n\t\treturn func() (interface{}, error) {\n\t\t\tctx, cancel := r.getCtx()\n\t\t\tdefer cancel()\n\t\t\treturn resolver.LookupHost(ctx, key[1:])\n\t\t}\n\tcase 'r':\n\t\treturn func() (interface{}, error) {\n\t\t\tctx, cancel := r.getCtx()\n\t\t\tdefer cancel()\n\t\t\treturn resolver.LookupAddr(ctx, key[1:])\n\t\t}\n\tcase 'm':\n\t\treturn func() (interface{}, error) {\n\t\t\tctx, cancel := r.getCtx()\n\t\t\tdefer cancel()\n\t\t\treturn resolver.LookupMX(ctx, key[1:])\n\t\t}\n\tdefault:\n\t\tpanic(\"lookupFunc invalid key type: \" + key)\n\t}\n}", "func MKeyExists[M ~map[T]U, T comparable, U any](obj M, key T, a ...any) (val U) {\n\tvar ok bool\n\tval, ok = obj[key]\n\n\tif !ok {\n\t\tdefMsg := fmt.Sprintf(assertionMsg+\": key '%v' doesn't exist\", key)\n\t\tDefault().reportAssertionFault(defMsg, a...)\n\t}\n\treturn val\n}", "func (this *MyHashMap) Get(key int) int {\n\thashKey := key % m\n\tfor i := 0; i < len(this.arr[hashKey]); i++ {\n\t\tif this.arr[hashKey][i].Key == key {\n\t\t\treturn this.arr[hashKey][i].Value\n\t\t}\n\t}\n\treturn -1\n}", "func (l *localStore) Get(key string) ([]byte, error) {\n\tb, ok := l.m[key]\n\tif !ok {\n\t\treturn []byte{}, ErrKeyNotFound\n\t}\n\treturn b, nil\n}", "func (hm HashMap) Get(key string) (string, error) {\n\tl := hm.findList(key)\n\tentry, _ := hm.findEntry(key, l)\n\tif entry != nil {\n\t\treturn entry.value, nil\n\t}\n\treturn \"\", fmt.Errorf(\"No entry with key %s\", key)\n}", "func (this *MultiMap) Get(key interface{}) interface{} {\n\tnode := this.tree.FindNode(key)\n\tif node != nil {\n\t\treturn node.Value()\n\t}\n\treturn nil\n}", "func (c *compiler) mapLookup(m *LLVMValue, key Value, insert bool) (elem *LLVMValue, notnull *LLVMValue) {\n\tmapType := m.Type().Underlying().(*types.Map)\n\tmaplookup := c.NamedFunction(\"runtime.maplookup\", \"func f(t, m, k uintptr, insert bool) uintptr\")\n\tptrType := c.target.IntPtrType()\n\targs := make([]llvm.Value, 4)\n\targs[0] = llvm.ConstPtrToInt(c.types.ToRuntime(mapType), ptrType)\n\targs[1] = c.builder.CreatePtrToInt(m.LLVMValue(), ptrType, \"\")\n\tif insert {\n\t\targs[3] = llvm.ConstAllOnes(llvm.Int1Type())\n\t} else {\n\t\targs[3] = llvm.ConstNull(llvm.Int1Type())\n\t}\n\n\tif lv, islv := key.(*LLVMValue); islv && lv.pointer != nil {\n\t\targs[2] = c.builder.CreatePtrToInt(lv.pointer.LLVMValue(), ptrType, \"\")\n\t}\n\tif args[2].IsNil() {\n\t\tstackval := c.builder.CreateAlloca(c.types.ToLLVM(key.Type()), \"\")\n\t\tc.builder.CreateStore(key.LLVMValue(), stackval)\n\t\targs[2] = c.builder.CreatePtrToInt(stackval, ptrType, \"\")\n\t}\n\n\teltPtrType := types.NewPointer(mapType.Elem())\n\tllvmtyp := c.types.ToLLVM(eltPtrType)\n\tzeroglobal := llvm.AddGlobal(c.module.Module, llvmtyp.ElementType(), \"\")\n\tzeroglobal.SetInitializer(llvm.ConstNull(llvmtyp.ElementType()))\n\tresult := c.builder.CreateCall(maplookup, args, \"\")\n\tresult = c.builder.CreateIntToPtr(result, llvmtyp, \"\")\n\tnotnull_ := c.builder.CreateIsNotNull(result, \"\")\n\tresult = c.builder.CreateSelect(notnull_, result, zeroglobal, \"\")\n\tvalue := c.NewValue(result, eltPtrType)\n\treturn value.makePointee(), c.NewValue(notnull_, types.Typ[types.Bool])\n}", "func (d *gossipDiscoveryImpl) Lookup(PKIID common.PKIidType) *NetworkMember {\n\tif bytes.Equal(PKIID, d.self.PKIid) {\n\t\treturn &d.self\n\t}\n\td.lock.RLock()\n\tdefer d.lock.RUnlock()\n\treturn copyNetworkMember(d.id2Member[string(PKIID)])\n}", "func (m *Map) Get(key string) (Item, error) {\n\tm.store.RLock()\n\tdefer m.store.RUnlock()\n\tif m.keeper.drained {\n\t\treturn zeroItem, ErrDrained\n\t}\n\tif pqi := m.store.kv[key]; pqi != nil {\n\t\titem := *pqi.item\n\t\treturn item, nil\n\t}\n\treturn zeroItem, ErrNotExist\n}", "func (db *Overlay) FetchDirBlockInfoByKeyMR(hash interfaces.IHash) (interfaces.IDirBlockInfo, error) {\n\tblock, err := db.FetchBlock(DIRBLOCKINFO_UNCONFIRMED, hash, dbInfo.NewDirBlockInfo())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif block == nil {\n\t\tblock, err = db.FetchBlock(DIRBLOCKINFO, hash, dbInfo.NewDirBlockInfo())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif block == nil {\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\treturn block.(interfaces.IDirBlockInfo), nil\n}", "func (server *LogEx) GetEntryByEncodedHash(key log.EncodedHash, logEntry *log.LogEntry) error {\n return server.p2p.GetEntryByEncodedHash(server, key, logEntry)\n}", "func (lmem *lockedMemRepo) Get(name string) (types.KeyInfo, error) {\n\tif err := lmem.checkToken(); err != nil {\n\t\treturn types.KeyInfo{}, err\n\t}\n\tlmem.RLock()\n\tdefer lmem.RUnlock()\n\n\tkey, ok := lmem.mem.keystore[name]\n\tif !ok {\n\t\treturn types.KeyInfo{}, xerrors.Errorf(\"getting key '%s': %w\", name, types.ErrKeyInfoNotFound)\n\t}\n\treturn key, nil\n}", "func (s *flowMap) Put(k, v *flow.Flow) {\n\td := k.Digest()\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif _, ok := s.m[d]; ok {\n\t\treturn\n\t}\n\ts.m[d] = v\n}", "func (t MemTree) Lookup(n *Node, s string) (nres *Node, part, match string) {\n\tdebugf(\"Lookup: NODE<%+v> for '%s'\\n\", *n, s)\n\tif s == \"\" {\n\t\treturn n, \"\", \"\"\n\t}\n\tfor _, c := range n.Children {\n\t\tdebugf(\"\\tchild %s\", c.Edgename)\n\t\tmatch = matchprefix(c.Edgename, s)\n\t\tif match == \"\" {\n\t\t\tdebug(\" does not match\")\n\t\t\tcontinue\n\t\t} else {\n\t\t\tdebug(\" matches\", len(match), \"characters ->\", match)\n\t\t\tif len(match) < len(c.Edgename) {\n\t\t\t\treturn c, \"\", match\n\t\t\t}\n\t\t\tvar m string\n\t\t\tnres, part, m = t.Lookup(c, s[len(match):])\n\t\t\tmatch += m\n\t\t\tif part == \"\" {\n\t\t\t\tpart = m\n\t\t\t}\n\t\t\t// for a partial match\n\t\t\tif nres == nil {\n\t\t\t\treturn c, part, match\n\t\t\t}\n\t\t\treturn nres, part, match\n\t\t}\n\t}\n\treturn nil, \"\", \"\"\n}", "func (r *renderer) lookup(name string, ln, col int, key []string) (reflect.Value, error) {\n\tv := lookupKeysStack(key, r.stack)\n\tif !v.IsValid() && r.template.ContextErrorsEnabled {\n\t\treturn v, fmt.Errorf(\"%s:%d:%d: cannot find value %s in context\", name, ln, col, strings.Join(key, \".\"))\n\t}\n\treturn v, nil\n}", "func (x *Index) Lookup(s []byte, n int) (result []int)", "func (c *Cache) Lookup(key string, factory func() interface{}) chan interface{} {\n\tc.lock.Lock()\n\trchan := make(chan interface{}, 1) // Return results.\n\t// Check if we know the key.\n\tif e, ok := c.cache[key]; ok {\n\t\te.lock.Lock() // Switch lock to element mutex\n\t\tdefer e.lock.Unlock()\n\t\tc.lock.Unlock()\n\n\t\tif e.result != nil { // We have a result.\n\t\t\trchan <- e.result\n\t\t\tclose(rchan)\n\t\t} else {\n\t\t\te.futures = append(e.futures, rchan)\n\t\t}\n\t} else {\n\t\tt := &cacheObject{\n\t\t\tfutures: []chan interface{}{rchan},\n\t\t\tlock: new(sync.Mutex),\n\t\t}\n\t\tc.cache[key] = t\n\t\tc.lock.Unlock()\n\t\t// Call factory\n\t\tgo func() {\n\t\t\tvalue := factory()\n\t\t\tc.register(key, value)\n\t\t}()\n\t}\n\treturn rchan\n}", "func (cm CMap) FindBy(key interface{}, mf MatchF) *CMapEntry {\n\tfor _, entry := range cm.Entries {\n\t\tif mf(key, entry.Key) {\n\t\t\treturn &entry\n\t\t}\n\t}\n\treturn nil\n}", "func (f *Forward) Lookup(state request.Request, name string, typ uint16) (*dns.Msg, error) {\n\tif f == nil {\n\t\treturn nil, ErrNoForward\n\t}\n\n\treq := new(dns.Msg)\n\treq.SetQuestion(name, typ)\n\tstate.SizeAndDo(req)\n\n\tstate2 := request.Request{W: state.W, Req: req}\n\n\treturn f.Forward(state2)\n}", "func (x *Index) Lookup(s []byte, n int) (result []int) {}", "func LookupEqualFold[T any | string](m map[string]T, key string) (T, bool) {\n\tif v, found := m[key]; found {\n\t\treturn v, true\n\t}\n\tfor k, v := range m {\n\t\tif strings.EqualFold(k, key) {\n\t\t\treturn v, true\n\t\t}\n\t}\n\tvar s T\n\treturn s, false\n}", "func (c *rPathCacheContainer) lookup(cPath string) ([]byte, string) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tif cPath == c.cPath {\n\t\t// hit\n\t\treturn c.dirIV, c.pPath\n\t}\n\t// miss\n\treturn nil, \"\"\n}", "func (i StringHashMap[T, V]) Get(key T) (V, bool) {\n\thash := key.Hash()\n\tval, ok := i.hashToVal[hash]\n\treturn val, ok\n}", "func (hm *MyHashMap) Get(key int) int {\n\tl := len(hm.HMap)\n\tidx := key % l\n\tcur := hm.HMap[idx]\n\tfor cur != nil {\n\t\tif cur.Key == key {\n\t\t\treturn cur.Val\n\t\t}\n\t\tcur = cur.Next\n\t}\n\treturn -1\n}", "func (h *hashMap) Get(strKey string) (string, error) {\n\tkey := GetHash(strKey)\n\n\tbucket := h.getBucket(key)\n\n\titem := bucket.get(Regularkey(key))\n\n\tif item == nil {\n\t\treturn \"\", errors.New(\"ghost: no such key\")\n\t}\n\n\treturn item.Val, nil\n}", "func (f *NaiveMap) Get(key string) interface{} {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\t// Goes through keys list to find key match\n\tfor i, _ := range f.keys {\n\t\tif f.keys[i] == key {\n\t\t\treturn f.values[i]\n\t\t}\n\t}\n\treturn nil\n}", "func (lh *LHash) Find(key []byte) (*client.ObjectRef, error) {\n\tres, _, err := lh.Conn.RunTransaction(func(txn *client.Txn) (interface{}, error) {\n\t\terr := lh.populate()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbucket, err := lh.newBucket(lh.refs[lh.root.BucketIndex(lh.hash(key))])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn bucket.find(key)\n\t})\n\tif err == nil {\n\t\treturn res.(*client.ObjectRef), nil\n\t} else {\n\t\treturn nil, err\n\t}\n}", "func LookupMap(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) {\n\tlog.Debugf(\"[LookupMap] IN PARAM: %+v %+v\", *in, *param)\n\tif !in.Contains(param) {\n\t\treturn pongo2.AsValue(\"\"), nil\n\t}\n\tii := in.Interface()\n\tlog.Debugf(\"[LookupMap] Lookup value: %v %s\", ii, param.String())\n\tswitch ii.(type) {\n\tcase map[string]interface{}:\n\t\tvv := reflect.ValueOf(ii).MapIndex(reflect.ValueOf(param.String()))\n\t\tlog.Debugf(\"Found value: %v\", vv.Interface())\n\t\treturn pongo2.AsValue(vv.Interface()), nil\n\tdefault:\n\t\treturn nil, &pongo2.Error{\n\t\t\tSender: \"filterLookupMap\",\n\t\t\tErrorMsg: fmt.Sprintf(\"Error looking up value - lookup error '%v' in '%v'\", param, in),\n\t\t}\n\t}\n}", "func (a *addrBook) Lookup(addr p2pcrypto.PublicKey) (*node.Info, error) {\n\ta.mtx.Lock()\n\td := a.lookup(addr)\n\ta.mtx.Unlock()\n\tif d == nil {\n\t\t// Todo: just return empty without error ?\n\t\treturn nil, ErrLookupFailed\n\t}\n\treturn d.na, nil\n}", "func (s *OnDiskStateMachine) Lookup(query interface{}) (interface{}, error) {\n\tif !s.opened {\n\t\tpanic(\"Lookup called when not opened\")\n\t}\n\treturn s.sm.Lookup(query)\n}", "func (da *DoubleArray) Lookup(key string) (int, bool) {\n\tnpos := 0\n\tdepth := 0\n\n\tfor ; depth < len(key); depth++ {\n\t\tif da.array[npos].base < 0 {\n\t\t\tbreak\n\t\t}\n\t\tcpos := da.array[npos].base ^ int(key[depth])\n\t\tif da.array[cpos].check != npos {\n\t\t\treturn 0, false\n\t\t}\n\t\tnpos = cpos\n\t}\n\n\tif da.array[npos].base >= 0 {\n\t\tcpos := da.array[npos].base // ^ int(terminator)\n\t\tif da.array[cpos].check != npos {\n\t\t\treturn 0, false\n\t\t}\n\t\treturn da.array[cpos].base, true\n\t}\n\n\ttpos := -da.array[npos].base\n\tfor ; depth < len(key); depth++ {\n\t\tif da.tail[tpos] != key[depth] {\n\t\t\treturn 0, false\n\t\t}\n\t\ttpos++\n\t}\n\n\tif da.tail[tpos] != terminator {\n\t\treturn 0, false\n\t}\n\treturn da.getValue(tpos + 1), true\n}", "func (k *Kubernetes) Lookup(ctx context.Context, state request.Request, name string, typ uint16) (*dns.Msg, error) {\n\treturn k.Upstream.Lookup(ctx, state, name, typ)\n}", "func (t *chaining) Search(key string) interface{} {\n\thash := t.hash(key)\n\tlist := t.values[hash]\n\tif list == nil || list.Len == 0 {\n\t\treturn nil\n\t}\n\thead := list.Start().Prev\n\tfor head != list.End() {\n\t\thead = head.Next\n\t\tpair := head.Value.(*pair)\n\t\tif pair.key == key {\n\t\t\treturn pair.value\n\t\t}\n\t}\n\treturn nil\n}", "func Lookup(opts []map[string]interface{}, key string) (interface{}, bool) {\n\tif len(opts) == 0 {\n\t\treturn nil, false\n\t}\n\tv, ok := opts[0][key]\n\treturn v, ok\n}", "func (s *RegularStateMachine) Lookup(query interface{}) (interface{}, error) {\n\treturn s.sm.Lookup(query)\n}", "func (m Map) Find(k Entry) interface{} {\n\te := m.set.find(k)\n\n\tif e == nil {\n\t\treturn nil\n\t}\n\n\treturn e.(keyValue).value\n}", "func (this *LruMap) GetByBytes(key []byte) (rv interface{}, ok bool) {\n\treturn this.GetByHash(HashBytes(key))\n}", "func (registry *Registry) Lookup(handle string) *task.Task {\n\t// TODO: Refactor the interface here to explicitly add an `ok`\n\t// return value (in the style of reading a map[...]...)\n\t// to differentiate a present nil value return vs. a\n\t// not-present-at-all value.\n\tregistry.lock.RLock()\n\tdefer registry.lock.RUnlock()\n\n\tif t, exists := registry.db[handle]; exists {\n\t\treturn t\n\t}\n\n\treturn nil\n}", "func (dir HgmDir) Lookup(ctx context.Context, name string) (fs.Node, error) {\n\tlocalDirent := dir.localDir + name // dirs are ending with a slash -> just append the name\n\ta := fuse.Attr{}\n\td := HgmDir{hgmFs: dir.hgmFs, localDir: localDirent}\n\terr := d.Attr(ctx, &a)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif (a.Mode & os.ModeType) == os.ModeDir {\n\t\treturn HgmDir{hgmFs: dir.hgmFs, localDir: localDirent + \"/\"}, nil\n\t}\n\n\treturn &HgmFile{hgmFs: dir.hgmFs, localFile: localDirent, fileSize: a.Size}, nil\n}", "func (m *Map) Get(key string) string {\n\tentries, _ := m.entries.Load().(map[string]string)\n\tif entries == nil {\n\t\treturn \"\"\n\t}\n\treturn entries[key]\n}", "func (m *Cmap) Load(key interface{}) (value interface{}, ok bool) {\n\thash := ehash(key)\n\t_, b := m.getInodeAndBucket(hash)\n\treturn b.tryLoad(key)\n}" ]
[ "0.61509585", "0.61406755", "0.6085308", "0.58752364", "0.581275", "0.5706347", "0.56950957", "0.56889385", "0.56715703", "0.56559145", "0.5624322", "0.5610662", "0.55261457", "0.5507308", "0.54677755", "0.54677755", "0.546431", "0.54311925", "0.5379146", "0.53141755", "0.52929825", "0.52577317", "0.52353674", "0.523482", "0.523482", "0.5220728", "0.5146866", "0.5134752", "0.51287", "0.5128644", "0.51279193", "0.51119024", "0.5096941", "0.50612366", "0.503864", "0.50212777", "0.50202066", "0.49657938", "0.4961869", "0.49561414", "0.49542475", "0.4948697", "0.49417707", "0.4940845", "0.4938142", "0.49322146", "0.49300778", "0.49253702", "0.49132812", "0.4912673", "0.49113446", "0.49103686", "0.49089834", "0.48992637", "0.4895491", "0.48914465", "0.48708227", "0.48707652", "0.48697895", "0.48670474", "0.48578423", "0.4856735", "0.48496237", "0.48456597", "0.48429707", "0.4833702", "0.4829362", "0.48281443", "0.48215917", "0.48201725", "0.48066464", "0.48060217", "0.47899002", "0.4786911", "0.47852316", "0.47543555", "0.47329667", "0.47310382", "0.4729767", "0.47292298", "0.47246677", "0.47161674", "0.4707693", "0.47019637", "0.46985778", "0.4690439", "0.46840084", "0.4680117", "0.46767786", "0.4669885", "0.46678075", "0.466622", "0.46628582", "0.46437678", "0.46325696", "0.4632543", "0.46311152", "0.4628634", "0.4623873", "0.46208426" ]
0.660354
0
Insert inserts the provided keyvalue pair into the map, overriding any previous definiton of the key. The caller must provide the digest which is used as a hash.
func (m *Map) Insert(d digest.Digest, key, value T) { if m.tab[d] == nil { entry := &mapEntry{Key: key, Value: value} if m.tab == nil { m.tab = make(map[digest.Digest]**mapEntry) } m.n++ m.tab[d] = &entry return } entryp := m.tab[d] for *entryp != nil && Less((*entryp).Key, key) { entryp = &(*entryp).Next } if *entryp == nil || !Equal((*entryp).Key, key) { *entryp = &mapEntry{Key: key, Value: value, Next: *entryp} m.n++ } else { (*entryp).Value = value } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (t *chaining) Insert(key string, val interface{}) {\n\tif t.loadFactor() > t.maxLoad {\n\t\tt.tableDouble()\n\t}\n\thash := t.hash(key)\n\tif t.values[hash] == nil {\n\t\tt.values[hash] = list.New()\n\t}\n\tt.values[hash].Insert(&pair{key, val})\n\tt.len++\n}", "func (t *Table) Insert(key, value string) {\n\ti := t.hash(key)\n\n\tfor j, kv := range t.table[i] {\n\t\tif key == kv.Key {\n\t\t\t// Overwrite previous value for the same key.\n\t\t\tt.table[i][j].Value = value\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Add a new value to the table.\n\tt.table[i] = append(t.table[i], kv{\n\t\tKey: key,\n\t\tValue: value,\n\t})\n}", "func (am AttributeMap) Insert(k string, v AttributeValue) {\n\tif _, existing := am.Get(k); !existing {\n\t\t*am.orig = append(*am.orig, newAttributeKeyValue(k, v))\n\t}\n}", "func (am AttributeMap) Insert(k string, v AttributeValue) {\n\tif _, existing := am.Get(k); !existing {\n\t\t*am.orig = append(*am.orig, newAttributeKeyValue(k, v))\n\t}\n}", "func (d *Dtrie) Insert(key, value interface{}) *Dtrie {\n\troot := insert(d.root, &entry{d.hasher(key), key, value})\n\treturn &Dtrie{root, d.hasher}\n}", "func (this *Map) Insert(key, value interface{}) {\n\tnode := this.tree.FindNode(key)\n\tif node != nil {\n\t\tnode.SetValue(value)\n\t\treturn\n\t}\n\tthis.tree.Insert(key, value)\n}", "func (t *openAddressing) Insert(key string, value interface{}) {\n\tif t.loadFactor() > 0.5 {\n\t\tt.tableDouble()\n\t}\n\tround := 0\n\tfor round != len(t.values) {\n\t\thash := t.hash(key, round)\n\t\tif t.values[hash] == nil || t.values[hash].deleted {\n\t\t\tt.values[hash] = &deletablePair{\n\t\t\t\tpair: pair{key, value},\n\t\t\t\tdeleted: false,\n\t\t\t}\n\t\t\tt.len++\n\t\t\treturn\n\t\t}\n\t\tround++\n\t}\n}", "func (sm StringMap) Insert(k, v string) {\n\tif _, existing := sm.Get(k); !existing {\n\t\t*sm.orig = append(*sm.orig, newStringKeyValue(k, v))\n\t}\n}", "func (sm StringMap) Insert(k, v string) {\n\tif _, existing := sm.Get(k); !existing {\n\t\t*sm.orig = append(*sm.orig, NewStringKeyValue(k, v).orig)\n\t}\n}", "func (hm *HashMap) Insert(h common.Hash, u types.Unit) {\n\thm.lock.Lock()\n\tdefer hm.lock.Unlock()\n\thm.data[h] = u\n}", "func (self *MapUtil) Insert(k, v string) {\n\tself.m[k] = v\n}", "func (c *Cache) Insert(key string, value string) {\n\treg := &Registry{\n\t\ttime.Now(),\n\t\tvalue,\n\t}\n\n\tc.Lock.Lock()\n\tc.Map[key] = reg\n\tc.Lock.Unlock()\n}", "func (o *KeyValueOrdered) Insert(key Key, idx int, value Value) {\n\to.Remove(key)\n\to.m[key] = idx\n\to.shift(idx, len(o.s), 1)\n\to.s = append(append(append(make([]KeyValueCapsule, 0, len(o.s)+1), o.s[:idx]...), KeyValueCapsule{key, value}), o.s[idx:]...)\n}", "func Put(key string, value string){\n \n h := sha256.New()\n h.Write([]byte(value))\n sha := base64.URLEncoding.EncodeToString(h.Sum(nil))\n \n //fmt.Println(sha)\n var n Data \n \n n.val = value //storing key value in keyValue hash map\n n.hash = sha // storing key hash in keyHash hash map \n \n keyValue[key] = n\n}", "func (c *Cache) Insert(key string, val Codes) {\r\n\tc.MagicKeys[key] = val\r\n}", "func (m *OrderedMap[K,V]) Insert(k K, v V) {\n\tif m.Has(k) {\n\t\tm.mp[k].Value = Pair[K,V]{Key: k, Value: v}\n\t} else {\n\t\tpair := Pair[K,V]{Key: k, Value: v}\n\t\te := m.list.PushBack(pair)\n\t\tm.mp[k] = e\n\t}\n}", "func (s *flowMap) Put(k, v *flow.Flow) {\n\td := k.Digest()\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif _, ok := s.m[d]; ok {\n\t\treturn\n\t}\n\ts.m[d] = v\n}", "func (d *DirectAddress) Insert(key int, value interface{}) {\n\tif err := d.validateKey(key); err != nil {\n\t\treturn\n\t}\n\td.array[key-d.uMin] = value\n}", "func (t *HashTable) Insert(k string) {\n\tindex := t.hash(k)\n\tb := t.array[index]\n\tif _, exist := b.search(k); !exist {\n\t\tb.insert(k)\n\t}\n}", "func (fdb *fdbSlice) Insert(k Key, v Value) error {\n\n\tfdb.cmdCh <- kv{k: k, v: v}\n\treturn fdb.fatalDbErr\n\n}", "func (this *MultiMap) Insert(key, value interface{}) {\n\tthis.tree.Insert(key, value)\n}", "func (sm safeMap) Insert(key string, value interface{}) {\n\tsm <- commandData{action: INSERT, key: key, value: value}\n}", "func (m MultiValueMap) Insert(key, value string) MultiValueMap {\n\tsets.InsertOrNew(m, key, value)\n\treturn m\n}", "func (n *TreeNode) Insert(key string, value Entry) {\n\tn.mutex.Lock()\n\tn.files[key] = value\n\tn.mutex.Unlock()\n}", "func (i StringHashMap[T, V]) Add(key T, val V) {\n\thash := key.Hash()\n\ti.hashToKey[hash] = key\n\ti.hashToVal[hash] = val\n}", "func (c *OrderedMap) Insert(index int, key string, value interface{}) (interface{}, bool) {\n\toldValue, exists := c.Map[key]\n\tc.Map[key] = value\n\tif exists {\n\t\treturn oldValue, true\n\t}\n\tif index == len(c.Keys) {\n\t\tc.Keys = append(c.Keys, key)\n\t} else {\n\t\tc.Keys = append(c.Keys[:index+1], c.Keys[index:]...)\n\t\tc.Keys[index] = key\n\t}\n\treturn nil, false\n}", "func (m *OrderedMap) Add(key, value string) {\n\tm.Lock()\n\tdefer m.Unlock()\n\tm.add(key, value)\n}", "func (vm *ValueMap) Insert(name string, value llvm.Value) {\n\tvm.vmap[name] = value\n}", "func (table *ConcurrentHashMap) Insert(key KeyType, value ValueType) bool {\n\thashValue := table.mhash(key)\n\tshard := table.getShard(hashValue)\n\n\ttable.RWLocks[shard].Lock()\n\n\twasPairInserted := table.shards[shard].shardInsert(key, hashValue, value)\n\n\ttable.RWLocks[shard].Unlock()\n\n\treturn wasPairInserted\n}", "func (self *Cache) Insert(key interface{}, value interface{}, sizeBytes uint64) (canonicalValue interface{}, in_cache bool) {\n\tself.insertLock.Lock()\n\tdefer self.insertLock.Unlock()\n\n\t_, canonicalValue, in_cache = self.insert(key, value, sizeBytes)\n\treturn\n}", "func (m HMSketch) Insert(kvs map[string]string, value, count float64) HMSketch {\n\tm = m.insert(map[string]string{\"__global__\": \"__global__\"}, value, count)\n\tm = m.insert(kvs, value, count)\n\treturn m\n}", "func (rad *Radix) Insert(key string, value interface{}) error {\n\trad.lock.Lock()\n\tif value == nil {\n\t\treturn errors.New(\"undefined value\")\n\t}\n\tdefer rad.lock.Unlock()\n\treturn rad.root.insert([]rune(key), value)\n}", "func (m HMSketch) insert(kvs map[string]string, value, count float64) HMSketch {\n\thMux.Lock()\n\tfor key, val := range kvs {\n\t\tlocation := hash(key, val)\n\t\tif _, ok := m.Index[location]; !ok {\n\t\t\tm.Index[location] = m.Max\n\t\t\tm.Max++\n\t\t\tm.Registers = append(m.Registers, hist.New(m.Resolution))\n\t\t}\n\t\tm.Registers[m.Index[location]] = m.Registers[m.Index[location]].Insert(value, count)\n\t}\n\thMux.Unlock()\n\treturn m\n}", "func (m Map) Insert(k Entry, v interface{}) Map {\n\treturn Map{m.set.Insert(newKeyValue(k, v))}\n}", "func (shard *hashShard) shardInsert(key KeyType, hashValue IndexType, value ValueType) bool {\n\tshard.rehash()\n\tinitIdx := shard.searchExists(key, hashValue)\n\n\tif (*shard.data)[initIdx].isOccupied && (*shard.data)[initIdx].key == key {\n\t\treturn false\n\t}\n\n\tinsertIdx := shard.searchToInsert(key, hashValue)\n\tif !(*shard.data)[insertIdx].isOccupied {\n\t\tshard.occupied++\n\t}\n\n\t(*shard.data)[insertIdx] = hashElement{true, true, key, value}\n\tshard.size++\n\treturn true\n}", "func (m *Map) Insert(key, value interface{}) (inserted bool) {\n\tm.root, inserted = m.insert(m.root, key, value)\n\tm.root.red = false\n\tif inserted {\n\t\tm.length++\n\t}\n\treturn inserted\n}", "func (t *ArtTree) Insert(key []byte, value interface{}) {\n\tkey = ensureNullTerminatedKey(key)\n\tt.insertHelper(t.root, &t.root, key, value, 0)\n}", "func (bst *StringBinarySearchTree) Insert(key int, value string) {\n\tbst.lock.Lock()\n\tdefer bst.lock.Unlock()\n\tn := &Node{key, value, nil, nil}\n\tif bst.root == nil {\n\t\tbst.root = n\n\t} else {\n\t\tinsertNode(bst.root, n)\n\t}\n}", "func (bst *BST) Insert(key interface{}, v interface{}) {\n bst.root = insert(bst.root, bst.root, key, v)\n}", "func (tree *Trie) Insert(key string, value interface{}) {\n\tnode := tree.root\n\trunes := []rune(key)\n\tlevel := uint(1)\n\n\tfor _, r := range runes {\n\t\tif child, ok := node.children[r]; ok {\n\t\t\tnode = child\n\t\t} else {\n\t\t\tnew := NewNode(r, node)\n\t\t\tnode.children[r] = new\n\t\t\tnode = new\n\t\t}\n\t\tlevel++\n\t}\n\tnode.data = value\n\tnode.terminal = true\n}", "func (mpt *MerklePatriciaTrie) Insert(path Path, value Serializable) (Key, error) {\n\tif value == nil {\n\t\tLogger.Debug(\"Insert nil value, delete data on path:\",\n\t\t\tzap.String(\"path\", string(path)))\n\t\treturn mpt.Delete(path)\n\t}\n\teval := value.Encode()\n\tif eval == nil || len(eval) == 0 {\n\t\tLogger.Debug(\"Insert encoded nil value, delete data on path:\",\n\t\t\tzap.String(\"path\", string(path)))\n\t\treturn mpt.Delete(path)\n\t}\n\n\tvalueCopy := &SecureSerializableValue{eval}\n\tmpt.mutex.Lock()\n\tdefer mpt.mutex.Unlock()\n\tvar err error\n\tvar newRootHash Key\n\tif mpt.root == nil {\n\t\t_, newRootHash, err = mpt.insertLeaf(nil, valueCopy, Path(\"\"), path)\n\t} else {\n\t\t_, newRootHash, err = mpt.insert(valueCopy, mpt.root, Path(\"\"), path)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmpt.setRoot(newRootHash)\n\treturn newRootHash, nil\n}", "func (r *RedisClusterCache) Insert(key string, value []byte, ttl time.Duration) error {\n\tresp := r.cluster.Cmd(\"SET\", key, value, \"EX\", ttl.Seconds())\n\treturn resp.Err\n}", "func Insert(key []byte, value []byte) error {\n\tif db == nil {\n\t\treturn errors.New(\"database not initialized\")\n\t}\n\n\tif len(key) == 0 {\n\t\treturn errors.New(\"empty key provided\")\n\t}\n\n\treturn db.Put(key, value, nil)\n}", "func (tree *BST) Insert(key int, value interface{}) {\n\ttree.lock.Lock()\n\tdefer tree.lock.Unlock()\n\tnewNode := &Node{key, value, nil, nil}\n\n\tif tree.root == nil {\n\t\ttree.root = newNode\n\t} else {\n\t\tInsertNode(tree.root, newNode)\n\t}\n}", "func (hm *HashMap) Put(key, value string) {\n\tindex := hm.getIndex(key)\n\thm.rwMutex.Lock()\n\tdefer hm.rwMutex.Unlock()\n\thas, _ := hm.getNode(key)\n\tif has != nil {\n\t\thas.value = value\n\t\treturn\n\t}\n\thm.nodes[index].append(&node{key: key, value:value})\n}", "func (m RedisCache) Insert(key string, val *string, ttl int) {\n\terr := m.Rdb.Set(ctx, key, *val, time.Duration(ttl)*time.Minute).Err()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (s *Store) Add(ctx context.Context, key interface{}, v json.Marshaler) error {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tdefault:\n\t}\n\n\tb, err := v.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tdefault:\n\t}\n\n\tif _, ok := s.m[key]; ok {\n\t\treturn store.ErrKeyExists\n\t}\n\n\ts.m[key] = entry{data: b}\n\treturn nil\n}", "func (m *Uint64) Insert(key interface{}, val *atomics.Uint64) {\n\tm.m.Store(key, val)\n}", "func (hm *HashMap) Add(k Key, v interface{}) {\n\t// Double table size if 50% full\n\tif cap(hm.arr) <= hm.size*2 {\n\t\thm.resize(cap(hm.arr) * 2)\n\t}\n\n\th := hm.Hash(k)\n\n\t// Find empty slot\n\tfor hm.arr[h] != nil && hm.arr[h] != deleted && hm.arr[h].k != k {\n\t\th = (h + 1) % cap(hm.arr)\n\t}\n\n\tif hm.arr[h] == nil || hm.arr[h] == deleted {\n\t\thm.size++\n\t}\n\n\tnode := &mapNode{k, v}\n\thm.arr[h] = node\n}", "func (t *binarySearchTree) Insert(key int, value string) error {\n\tif t.root == nil {\n\t\tt.root = newBinarySearchTreeNode(key, value)\n\t\tt.history.append(t.root)\n\t\treturn nil\n\t}\n\tnode, err := insert(t.root, key, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\tt.history.append(node)\n\treturn nil\n}", "func (ht *ValueHashtable) Put(k [HashSize]byte, v []byte) {\r\n\tht.lock.Lock()\r\n\tdefer ht.lock.Unlock()\r\n\tif ht.items == nil {\r\n\t\tht.items = make(map[[HashSize]byte][]byte)\r\n\t}\r\n\tht.items[k] = v\r\n}", "func (db *DB) Insert(key interface{}, value interface{}) error {\n\treturn db.bolt.Update(func(tx *bolt.Tx) error {\n\t\treturn db.InsertTx(tx, key, value)\n\t})\n}", "func (n *Node) InsertVal(req KVPair, done *bool) error {\n\t*done = false\n\tn.Data.lock.Lock()\n\tn.Data.Map[req.Key] = req.Val\n\tn.Data.lock.Unlock()\n\n\t_ = n.FixSuc()\n\tclient, err := rpc.Dial(\"tcp\", n.GetWorkingSuc().IP)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = client.Call(\"NetNode.PutValBackup\", req, done)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = client.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\t*done = true\n\treturn nil\n}", "func (am AttributeMap) InsertString(k string, v string) {\n\tif _, existing := am.Get(k); !existing {\n\t\t*am.orig = append(*am.orig, newAttributeKeyValueString(k, v))\n\t}\n}", "func (am AttributeMap) InsertString(k string, v string) {\n\tif _, existing := am.Get(k); !existing {\n\t\t*am.orig = append(*am.orig, newAttributeKeyValueString(k, v))\n\t}\n}", "func (hm *HashMap) Add(key []byte, val []byte) error {\n\t// check the whether hashMap if full\n\tif hm.Full() {\n\t\treturn fmt.Errorf(\"hashMap is full\")\n\t}\n\n\t// validate hashKey\n\tif err := hm.np.validate(key, val); err != nil {\n\t\treturn err\n\t}\n\n\t// 1. calculate the hash num\n\thashNum := hm.hashFunc(key) % uint64(hm.haSize)\n\n\t// 2. check if the key slice exist\n\tif hm.exist(hashNum, key) {\n\t\treturn nil\n\t}\n\n\t// 3. add the key into nodePool\n\thead := hm.ha[hashNum]\n\tnewHead, err := hm.np.add(head, key, val)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// 4. point to the new list head node\n\thm.ha[hashNum] = newHead\n\n\treturn nil\n}", "func (filter *BloomFilter) Insert(msg []byte) error {\n\tfor _, v := range filter.keys {\n\t\tval := filter.hashFunc(msg, v)\n\t\tfilter.bitMap.SetOne(val % filter.m)\n\t}\n\treturn nil\n}", "func (s *TreeMap) Insert(key interface{}, value interface{}) interface{} {\n\ts.CheckInit()\n\tif key == nil {\n\t\tlog.Panic(\"Nil key.\")\n\t}\n\t// Use kc from here on\n\tkc, ok := key.(collection.Comparer)\n\tif !ok {\n\t\tlog.Fatal(\"Key doesn't implement collection.Comparer.\")\n\t}\n\n\tif s.Threadsafe() {\n\t\ts.Lockb.Lock()\n\t\tdefer s.Lockb.Unlock()\n\t}\n\n\tcurrent := s.root\n\tvisited := worklist.NewStackUnsafe()\n\n\tfor current != nil && kc.Compare(current.K) != 0 {\n\t\tvisited.Push(current)\n\t\tdirection := direction(kc, current.K)\n\t\tcurrent = child(current, direction)\n\t}\n\n\tif current == nil {\n\t\tcurrent = newNode(kc, value, 0)\n\t\tfor !visited.Empty() {\n\t\t\tparent, _ := visited.Pop().(*node)\n\t\t\tdir1 := direction(kc, parent.K)\n\t\t\tparent.C[dir1] = current\n\t\t\tif balanced(current) {\n\t\t\t\tdir2 := tallestDir(current)\n\t\t\t\tparent = rotate(parent, dir1, dir2)\n\t\t\t}\n\t\t\tupdateHeight(parent)\n\t\t\tcurrent = parent\n\t\t}\n\t\ts.Sizeb += 1\n\t\ts.root = current\n\t} else if kc.Compare(current.K) == 0 {\n\t\told := current.V\n\t\tcurrent.V = value\n\t\treturn old\n\t}\n\treturn nil\n}", "func (r *RedisCli) Insert(jk string) error {\n\tk := r.Hash(jk)\n\terr := r.Client.Set(k, jk, time.Hour*24).Err()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"Insertion into redis, KEY: %s\", k)\n\treturn nil\n}", "func (b *Builder) Insert(key []byte, val uint64) error {\n\t// ensure items are added in lexicographic order\n\tif bytes.Compare(key, b.last) < 0 {\n\t\treturn ErrOutOfOrder\n\t}\n\tif len(key) == 0 {\n\t\tb.len = 1\n\t\tb.unfinished.setRootOutput(val)\n\t\treturn nil\n\t}\n\n\tprefixLen, out := b.unfinished.findCommonPrefixAndSetOutput(key, val)\n\tb.len++\n\terr := b.compileFrom(prefixLen)\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.copyLastKey(key)\n\tb.unfinished.addSuffix(key[prefixLen:], out)\n\n\treturn nil\n}", "func (sm *StackedMap) Put(key, value interface{}) {\n\ttop := sm.mapStack.top().(*level)\n\ttop.kvs[key] = value\n\ttop.journal = append(top.journal, &journalEntry{key: key, value: value})\n\n\t// records key revision for fast access\n\trev := len(sm.mapStack) - 1\n\tif revs, ok := sm.keyRevisionMap[key]; ok {\n\t\tif revs.top().(int) != rev {\n\t\t\trevs.push(rev)\n\t\t}\n\t} else {\n\t\tsm.keyRevisionMap[key] = &stack{rev}\n\t}\n}", "func (tree *Tree) Insert(key int, value string) {\n\ttree.Root = tree.Root.insert(key, value)\n}", "func (v *Value) insert(params ...interface{}) (interface{}, error) {\n\tif len(params) != 2 {\n\t\treturn nil, newParamLenErr(len(params), 2)\n\t}\n\n\tvar (\n\t\tok bool\n\t\tkey string\n\t\tvalue string\n\t)\n\n\tkey, ok = params[0].(string)\n\tif !ok {\n\t\treturn nil, newParamTypeErr(params[0], key)\n\t}\n\n\tvalue, ok = params[1].(string)\n\tif !ok {\n\t\treturn nil, newParamTypeErr(params[1], value)\n\t}\n\n\t(*v)[key] = value\n\treturn key, nil\n}", "func (m *Hashmap) Put(key int, val string) {\n\tif m.size*loadFactor <= m.entries {\n\t\tm.rehash()\n\t}\n\n\tidx := hash(key) % m.size\n\tvar curr *node = &m.buckets[idx]\n\t// If the key doesn't exist yet, traverse\n\t// the list until it reaches the end.\n\t// If the key does exist, traverse until\n\t// it reaches the key\n\tfor curr.next != nil && curr.key != key {\n\t\tcurr = curr.next\n\t}\n\tif curr.next == nil {\n\t\t// If it reached the end of the list (didn't encounter the key)\n\t\tcurr.key = key\n\t\tcurr.next = &node{0, \"\", nil}\n\t}\n\tcurr.val = val\n\tm.entries++\n}", "func (manager *KeysManager) Insert(key jose.JSONWebKey) {\n\tmanager.KeyMap[key.KeyID] = &key\n\tmanager.KeyList = append(manager.KeyList, &key)\n}", "func (i IntHashMap[T, V]) Add(key T, val V) {\n\thash := key.Hash()\n\ti.hashToKey[hash] = key\n\ti.hashToVal[hash] = val\n}", "func (c *KeyStringValueChanger) Add(k, v string) error {\n\tif c.err != nil {\n\t\treturn c.err\n\t}\n\t_, err := c.node.addChild(keyValue{k, v})\n\treturn err\n}", "func (db *DB) Put(key []byte, value []byte) error {\n\tif len(key) > MaxKeyLength {\n\t\treturn errKeyTooLarge\n\t}\n\tif len(value) > MaxValueLength {\n\t\treturn errValueTooLarge\n\t}\n\th := db.hash(key)\n\tdb.metrics.Puts.Add(1)\n\tdb.mu.Lock()\n\tdefer db.mu.Unlock()\n\n\tsegID, offset, err := db.datalog.put(key, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsl := slot{\n\t\thash: h,\n\t\tsegmentID: segID,\n\t\tkeySize: uint16(len(key)),\n\t\tvalueSize: uint32(len(value)),\n\t\toffset: offset,\n\t}\n\n\tif err := db.put(sl, key); err != nil {\n\t\treturn err\n\t}\n\n\tif db.syncWrites {\n\t\treturn db.sync()\n\t}\n\treturn nil\n}", "func (c *kvStatsCollector) Add(key sstable.InternalKey, value []byte) error {\n\tc.curSize += len(key.UserKey) + len(value)\n\tc.lastKey = append(c.lastKey[:0], key.UserKey...)\n\tif c.curSize >= c.bucketSize {\n\t\tc.addBucket()\n\t}\n\treturn nil\n}", "func (kv *KV) Add(k, v string) error {\n\treturn kv.db.Update(func(tx *bolt.Tx) error {\n\t\tb, err := tx.CreateBucketIfNotExists(kvBucket)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn b.Put([]byte(k), []byte(v))\n\t})\n}", "func (me *TrieNode) InsertOrUpdate(key *TrieKey, data interface{}) (newHead *TrieNode, err error) {\n\tif key == nil {\n\t\treturn nil, fmt.Errorf(\"cannot insert nil key\")\n\t}\n\treturn me.insert(&TrieNode{TrieKey: *key, Data: data}, true, true, 0)\n}", "func (p OrderedMap) Add(k interface{}, v interface{}) {\n\t_, e := p.m[k]\n\tif e {\n\t\tp.m[k] = v\n\t} else {\n\t\tp.keys = append(p.keys, k)\n\t\tp.m[k] = v\n\t}\n}", "func (t *Tree) Insert(key interface{}, value interface{}, issmaller Comparable) (new *TreeNode, err error) {\n\tif t == nil {\n\t\treturn nil, errors.Errorf(\"unable to insert into nil tree\")\n\t}\n\tt.lock.Lock()\n\tdefer t.lock.Unlock()\n\tt.root, new = t.root.insert(key, value, issmaller)\n\tt.Count++\n\treturn new, nil\n}", "func (me *TrieNode) Insert(key *TrieKey, data interface{}) (newHead *TrieNode, err error) {\n\tif key == nil {\n\t\treturn nil, fmt.Errorf(\"cannot insert nil key\")\n\t}\n\treturn me.insert(&TrieNode{TrieKey: *key, Data: data}, true, false, 0)\n}", "func (m *OMap) Add(k string, v interface{}) error {\n\tif _, ok := m.baseMap[k]; ok {\n\t\treturn ErrorKeyExisting\n\t}\n\tm.add(k, v)\n\treturn nil\n}", "func (d Database) Insert(key string, value string) error {\n\tif d.connection == nil {\n\t\treturn errors.New(\"connection not initialized\")\n\t}\n\t_, err := d.connection.Set(d.ctx, key, value, 0).Result()\n\treturn err\n}", "func (t *vhostTrie) Insert(key string, site *SiteConfig) {\n\thost, path := t.splitHostPath(key)\n\tif _, ok := t.edges[host]; !ok {\n\t\tt.edges[host] = newVHostTrie()\n\t}\n\tt.edges[host].insertPath(path, path, site)\n}", "func (ds *DataStore) Add(key uint64, value []byte) error {\n\tds.dataStoreLock.Lock()\n\tdefer ds.dataStoreLock.Unlock()\n\tif _, present := ds.kvSet[key]; present {\n\t\tds.kvTime[key] = time.Now().UnixNano()\n\t\treturn fmt.Errorf(\"Key %d already exists\", key)\n\t}\n\tds.kvSet[key] = value\n\treturn nil\n}", "func (q *dStarLiteQueue) insert(u *dStarLiteNode, k key) {\n\tu.key = k\n\theap.Push(q, u)\n}", "func (b *BigcacheCache) Insert(key string, value []byte, ttl time.Duration) error {\n\treturn b.cache.Set(key, value)\n}", "func (t tagSet) Insert(p tagPair) tagSet {\n\ti := t.Search(p.key)\n\tif i < len(t) && t[i].key == p.key {\n\t\tt[i].value = p.value\n\t\treturn t // exists\n\t}\n\t// append t to the end of the slice\n\tif i == len(t) {\n\t\treturn append(t, p)\n\t}\n\t// insert p\n\tt = append(t, tagPair{})\n\tcopy(t[i+1:], t[i:])\n\tt[i] = p\n\treturn t\n}", "func (m *MyMap) Add(k, v string) {\n\t(*m)[k] = v\n}", "func (h *hashDisk) Set(value []byte, fileIndex, fileOffset uint32) error {\n\tif bytes.Equal(value, h.emptyValue) {\n\t\treturn ErrInvalidKey\n\t}\n\tif h.totalEntries >= h.MaxSize {\n\t\treturn ErrNoSpace\n\t}\n\tnewEntry := true\n\t// Compute hash\n\tslot := hyperloglog.MurmurBytes(value) % h.entries\n\toffset := slot * h.entrySize\n\tfor { // Try to find an empty slot\n\t\tslotValue := h.m[offset : offset+keySize]\n\t\tif bytes.Equal(slotValue, value) {\n\t\t\t// Found same key, override. We could just return instead but it was found in\n\t\t\t// benchmarks that it hardly change anything at all so it's better to be able to override\n\t\t\tnewEntry = false\n\t\t\tbreak\n\t\t}\n\t\tif bytes.Equal(slotValue, h.emptyValue) {\n\t\t\t// Found empty slot\n\t\t\tbreak\n\t\t}\n\t\tslot = (slot + 1) % h.entries\n\t\toffset = slot * h.entrySize\n\t}\n\t// Insert\n\tindexes := make([]byte, 4+4)\n\tencoding.PutUint32(indexes[0:4], fileIndex)\n\tencoding.PutUint32(indexes[4:8], fileOffset)\n\tcopy(h.m[offset:offset+keySize], value)\n\tcopy(h.m[offset+keySize:offset+keySize+8], indexes)\n\tif newEntry {\n\t\th.totalEntries++\n\t}\n\treturn nil\n}", "func (mmap *stateSyncMap) add(key string, value interface{}) error {\n\treturn mmap.Add(key, value)\n}", "func (hm *MyHashMap) Put(key int, value int) {\n\tvar prev *Node\n\tindex := hm.HashFunc(key)\n\tentry := hm.Arrays[index]\n\tfor entry != nil && entry.Key != key {\n\t\tprev = entry\n\t\tentry = entry.Next\n\t}\n\n\tif entry == nil {\n\t\tif prev == nil {\n\t\t\tprev = hm.NewNode(key, value)\n\t\t\thm.Arrays[index] = prev\n\t\t} else {\n\t\t\tentry = hm.NewNode(key, value)\n\t\t\tprev.Next = entry\n\t\t}\n\t} else {\n\t\tentry.Value = value\n\t}\n\n}", "func (d Dictionary) Add(key, value string) error {\n\t_, err := d.Search(key)\n\tswitch err {\n\tcase ErrNotFound:\n\t\td[key] = value\n\tcase nil:\n\t\treturn ErrKeyExists\n\tdefault:\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *Action) Insert(c *cli.Context) error {\n\tctx := ctxutil.WithGlobalFlags(c)\n\techo := c.Bool(\"echo\")\n\tmultiline := c.Bool(\"multiline\")\n\tforce := c.Bool(\"force\")\n\tappending := c.Bool(\"append\")\n\n\targs, kvps := parseArgs(c)\n\tname := args.Get(0)\n\tkey := args.Get(1)\n\n\tif name == \"\" {\n\t\treturn exit.Error(exit.NoName, nil, \"Usage: %s insert name\", s.Name)\n\t}\n\n\treturn s.insert(ctx, c, name, key, echo, multiline, force, appending, kvps)\n}", "func (dc *DigestCache) Add(hash []byte, text []byte) {\n\tdc.Records[string(hash)] = text\n}", "func (i *BTreeIndex) Insert(key string) {\r\n\ti.Lock()\r\n\tdefer i.Unlock()\r\n\tif i.BTree == nil || i.LessFunction == nil {\r\n\t\tpanic(\"uninitialized index\")\r\n\t}\r\n\ti.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})\r\n}", "func (c *KeyValueChanger) Add(k string, v interface{}) error {\n\tif c.err != nil {\n\t\treturn c.err\n\t}\n\t_, err := c.node.addChild(keyValue{k, v})\n\treturn err\n}", "func (tt *TtTable) Put(key position.Key, move Move, depth int8, value Value, valueType ValueType, mateThreat bool) {\n\n\t// if the size of the TT = 0 we\n\t// do not store anything\n\tif tt.maxNumberOfEntries == 0 {\n\t\treturn\n\t}\n\n\t// read the entries for this hash\n\tentryDataPtr := &tt.data[tt.hash(key)]\n\t// encode value into the move if it is a valid value (min < v < max)\n\tif value.IsValid() {\n\t\tmove = move.SetValue(value)\n\t} else {\n\t\ttt.log.Warningf(\"TT Put: Tried to store an invalid Value into the TT %s (%d)\", value.String(), int(value))\n\t}\n\n\ttt.Stats.numberOfPuts++\n\n\t// NewTtTable entry\n\tif entryDataPtr.Key == 0 {\n\t\ttt.numberOfEntries++\n\t\tentryDataPtr.Key = key\n\t\tentryDataPtr.Move = move\n\t\tentryDataPtr.Depth = depth\n\t\tentryDataPtr.Age = 1\n\t\tentryDataPtr.Type = valueType\n\t\tentryDataPtr.MateThreat = mateThreat\n\t\treturn\n\t}\n\n\t// Same hash but different position\n\tif entryDataPtr.Key != key {\n\t\ttt.Stats.numberOfCollisions++\n\t\t// overwrite if\n\t\t// - the new entry's depth is higher\n\t\t// - the new entry's depth is same and the previous entry is old (is aged)\n\t\tif depth > entryDataPtr.Depth ||\n\t\t\t(depth == entryDataPtr.Depth && entryDataPtr.Age > 1) {\n\t\t\ttt.Stats.numberOfOverwrites++\n\t\t\tentryDataPtr.Key = key\n\t\t\tentryDataPtr.Move = move\n\t\t\tentryDataPtr.Depth = depth\n\t\t\tentryDataPtr.Age = 1\n\t\t\tentryDataPtr.Type = valueType\n\t\t\tentryDataPtr.MateThreat = mateThreat\n\t\t}\n\t\treturn\n\t}\n\n\t// Same hash and same position -> update entry?\n\tif entryDataPtr.Key == key {\n\t\ttt.Stats.numberOfUpdates++\n\t\t// we always update as the stored moved can't be any good otherwise\n\t\t// we would have found this during the search in a previous probe\n\t\t// and we would not have come to store it again\n\t\tentryDataPtr.Key = key\n\t\tentryDataPtr.Move = move\n\t\tentryDataPtr.Depth = depth\n\t\tentryDataPtr.Age = 1\n\t\tentryDataPtr.Type = valueType\n\t\tentryDataPtr.MateThreat = mateThreat\n\t\treturn\n\t}\n}", "func (kv *KVStore) Put(key, value string) error {\n\tpayload := kvPayload{\n\t\tKey: key,\n\t\tValue: value,\n\t\tOp: OpPut,\n\t}\n\n\t_, err := kv.db.Add(&payload)\n\treturn err\n}", "func (w *WindowedMap) Put(uid UID, value interface{}) {\n\tif _, ok := w.uidMap[uid]; ok {\n\t\tw.Remove(uid)\n\t}\n\titem := &uidItem{uid, w.clock.Now().Add(w.lifetime), -1, value}\n\theap.Push(&w.uidList, item)\n\tw.uidMap[uid] = item\n}", "func (ring *HashRing) Put(key string, value interface{}) interface{} {\n\n\tring.Lock()\n\tdefer ring.Unlock()\n\n\tid := ring.hash([]byte(key))\n\n\tif old, ok := ring.values[id]; ok {\n\t\treturn old\n\t}\n\n\tring.circle = append(ring.circle, id)\n\n\tring.values[id] = value\n\n\tsort.Sort(ring.circle)\n\n\treturn nil\n}", "func (d *DB) Insert(keyVal *Pair) (bool, error) {\n\t// find insert position\n\tfound, cursor := recursiveSearch(keyVal.Key, d.root)\n\tif found && noDup { // found dup key\n\t\tkv := getKeyVal(cursor)\n\t\tlog.Printf(\"found dup key: %v -> %v\\n\", *keyVal, *kv)\n\t\tsetKeyVal(cursor, keyVal) // overwrite dup key\n\t\treturn true, nil\n\t}\n\tif cursor.Node == nil {\n\t\tlog.Panic(\"found invalid cursor!\")\n\t}\n\t// insert this kv pair first to make it really full;\n\tok, err := d.insertIntoNode(cursor, keyVal)\n\tif !ok {\n\t\tlog.Printf(\"failed insertIntoNode - cursor:%v, kv:%v\", cursor, keyVal)\n\t\treturn false, err\n\t}\n\treturn true, nil\n}", "func (mp *mirmap) insert(k voidptr, v voidptr) bool {\n\tmp.expand()\n\n\t// mp.keyalg.hash(&k, mp.keysz) // TODO compiler\n\tfnptr := mp.keyalg.hash\n\thash := fnptr(k, mp.keysz)\n\tidx := hash % usize(mp.cap_)\n\t// println(idx, hash, mp.cap_)\n\n\tvar onode *mapnode = mp.ptr[idx]\n\tfor onode != nil {\n\t\tif onode.hash == hash {\n\t\t\t// println(\"replace\", idx, k)\n\t\t\tmemcpy3(onode.val, v, mp.valsz)\n\t\t\treturn true\n\t\t}\n\t\tonode = onode.next\n\t}\n\n\tnode := &mapnode{}\n\t// node.key = k\n\t// node.val = v\n\tnode.key = malloc3(mp.keysz)\n\tmemcpy3(node.key, k, mp.keysz)\n\tnode.val = malloc3(mp.valsz)\n\tmemcpy3(node.val, v, mp.valsz)\n\tnode.hash = hash\n\tnode.next = mp.ptr[idx]\n\n\t// println(\"insert\", mp.len_, idx, mp.cap_, node)\n\tmp.ptr[idx] = node\n\tmp.len_++\n\n\treturn true\n}", "func Insert(node *Node, key int, value string) *Node {\n\tif node == nil {\n\t\treturn &Node{key, value, nil, nil}\n\t} else {\n\t\tif node.Key == key {\n\t\t\treturn node\n\t\t} else {\n\t\t\tif node.Key > key {\n\t\t\t\treturn &Node{node.Key,node.Value,Insert(node.Left, key, value), node.Right}\n\t\t\t} else { // node.Key < key\n\t\t\t\treturn &Node{node.Key,node.Value,node.Left,Insert(node.Right, key, value)}\n\t\t\t}\n\t\t}\n\t}\n}", "func (store *KVStore) add(key string, val []byte, isOrigin bool) {\n\tstore.ht[key] = val\n\tstore.isOrigin[key] = isOrigin\n}", "func (m *HashMap) Put(t TxDesc) error {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\n\t// store tx\n\ttxID, err := t.tx.CalculateHash()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar k txHash\n\tcopy(k[:], txID)\n\n\t// Let's check if we're not inserting a double key. This is, is essence,\n\t// fine for the map, but having a duplicate entry in `sorted` can cause\n\t// a panic in CalculateRoot, in the block generator.\n\n\t_, ok := m.data[k]\n\tif ok {\n\t\treturn ErrAlreadyExists\n\t}\n\n\tm.data[k] = t\n\tm.txsSize += uint32(t.size)\n\n\t// sort keys by Fee\n\t// Bulk sort like (sort.Slice) performs a few times slower than\n\t// a simple binarysearch&shift algorithm.\n\tfee, err := t.tx.Fee()\n\tif err != nil {\n\t\tlog.WithError(err).Warn(\"fee could not be read\")\n\t}\n\n\tindex := sort.Search(len(m.sorted), func(i int) bool {\n\t\treturn m.sorted[i].f < fee\n\t})\n\n\tm.sorted = append(m.sorted, keyFee{})\n\n\tcopy(m.sorted[index+1:], m.sorted[index:])\n\n\tm.sorted[index] = keyFee{k: k, f: fee}\n\treturn nil\n}", "func (_m *IDbHandler) AddOrUpdate(keyName string, value []byte) error {\n\targs := _m.Called(keyName, value)\n\treturn args.Error(0)\n}" ]
[ "0.6494451", "0.6408475", "0.6405259", "0.6405259", "0.6362644", "0.63473076", "0.63400483", "0.63359815", "0.62914497", "0.6288232", "0.6270945", "0.6207122", "0.6206332", "0.6141829", "0.61015224", "0.60288703", "0.60228056", "0.5992405", "0.59852684", "0.59811723", "0.59418166", "0.5927297", "0.5919693", "0.5878037", "0.5875371", "0.5859271", "0.58577377", "0.58431137", "0.58049625", "0.5787842", "0.57813203", "0.5778436", "0.5774901", "0.5762044", "0.5722739", "0.57127994", "0.56849766", "0.5661789", "0.56472313", "0.5644902", "0.5636997", "0.5631733", "0.56206", "0.56048137", "0.5571668", "0.5565569", "0.55622184", "0.55477786", "0.55446553", "0.55385435", "0.5522419", "0.5513213", "0.55131274", "0.551075", "0.551075", "0.5503243", "0.5485806", "0.5483801", "0.5464543", "0.5460066", "0.54559875", "0.5453087", "0.54348236", "0.54259586", "0.54253006", "0.5417282", "0.54129326", "0.5408681", "0.5406133", "0.5405342", "0.5402751", "0.5401225", "0.5380708", "0.53792125", "0.5366934", "0.53657985", "0.53610104", "0.53600806", "0.53566295", "0.53475374", "0.5346232", "0.5345243", "0.53374517", "0.5336381", "0.53346986", "0.5315201", "0.53145146", "0.53024346", "0.52963156", "0.52914685", "0.5289425", "0.5280999", "0.5278767", "0.52766424", "0.5274782", "0.5273495", "0.52645", "0.5262971", "0.5260035", "0.5258639" ]
0.65731424
0
Len returns the total number of entries in the map.
func (m Map) Len() int { return m.n }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Len(m Map) int {\n\treturn m.count()\n}", "func (m Map) Len() (n int) {\n\treturn len(m)\n}", "func (m *Map) Len() int {\n\tm.store.RLock()\n\tdefer m.store.RUnlock()\n\tn := len(m.store.kv)\n\treturn n\n}", "func (t *Map) Len() int {\n\treturn t.keys.Len()\n}", "func (tt Map) Len() int {\n\treturn len(tt)\n}", "func (hm *HashMap) Len() int {\n\treturn hm.np.elemNum()\n}", "func (self *Map) Len() int {\n\treturn len(self.MapNative())\n}", "func (m *Map) Len() int {\n\treturn int(m.len)\n}", "func (hm HashMap) Len(ctx context.Context) (int64, error) {\n\treq := newRequest(\"*2\\r\\n$4\\r\\nHLEN\\r\\n$\")\n\treq.addString(hm.name)\n\treturn hm.c.cmdInt(ctx, req)\n}", "func (fi *FastIntegerHashMap) Len() uint64 {\n\treturn fi.count\n}", "func (s *Int64Map) Len() int {\n\treturn int(atomic.LoadInt64(&s.length))\n}", "func (m *HashMap) Len() int {\n\tm.lock.RLock()\n\tdefer m.lock.RUnlock()\n\treturn len(m.data)\n}", "func (m *Map[K, V]) Len() int {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\n\treturn len(m.inner)\n}", "func (tt PMap) Len() int {\n\treturn len(tt)\n}", "func (m Map) Length() int {\n\treturn len(m)\n}", "func (hm HashMap) Len() uint32 {\n\treturn hm.len\n}", "func (sm *ScanMap) Length() int {\n\treturn len(sm.hashMap)\n}", "func (m *Map) Len() int {\n\tm.RLock()\n\tl := len(m.Items)\n\tm.RUnlock()\n\treturn l\n}", "func (m *OrderedIntMap) Len() int { return len(m.m) }", "func (this *LruMap) Len() (size int) {\n\treturn this.m.Len()\n}", "func (p *SliceOfMap) Len() int {\n\tif p == nil {\n\t\treturn 0\n\t}\n\treturn len(*p)\n}", "func (m *OrderedMap) Len() int { return len(m.m) }", "func (j *JPNSoftwareMap) Len() int { return len(*j) }", "func (p IslandTrustMap) Len() int { return len(p) }", "func (m *systrayMap) Len() int {\n\tm.lock.RLock()\n\tl := len(m.m)\n\tm.lock.RUnlock()\n\treturn l\n}", "func (m *Mapping) Len() int { return len(m.Pairs) }", "func (m *OrderedMap) Len() int {\n\treturn len(m.keypairs)\n}", "func (p OrderedMap) Len() int {\n\treturn len(p.keys)\n}", "func (c *Cache) Len() int {\n\treturn len(c.keyMap)\n}", "func (m *OrderedUintMap) Len() int { return len(m.m) }", "func (s *mapSorter) Len() int {\n\treturn len(s.keys)\n}", "func (m *OrderedMap[K, V]) Len() int {\n\tif m == nil {\n\t\treturn 0\n\t}\n\treturn m.len\n}", "func (c *OrderedMap) Len() int {\n\treturn len(c.Keys)\n}", "func (m OrderedMap[K, V]) Len() int {\n\treturn len(m.items)\n}", "func (m *dirtySeriesMap) Len() int {\n\treturn len(m.lookup)\n}", "func (accounts *Accounts) Len() int {\n\tif (accounts == nil) || (accounts.Map == nil) {\n\t\treturn 0\n\t}\n\treturn len(accounts.Map)\n}", "func (cm *CMap) Size() int {\n\treturn len(cm.Entries)\n}", "func (m *MapStringUint64) Len() int {\n\tm.mu.RLock()\n\tl := len(m.m)\n\tm.mu.RUnlock()\n\treturn l\n}", "func (m *privateSetMap) Len() int {\n\treturn int(m.len)\n}", "func (m *TMap) Length() int {\n\treturn m.root.Length()\n}", "func (i IntHashMap[T, V]) Len() int {\n\t// DEBUG:\n\tif len(i.hashToKey) != len(i.hashToVal) {\n\t\tpanic(\"hashToKey and hashToVal have different lengths\")\n\t}\n\treturn len(i.hashToKey)\n}", "func (rm *ResultMap) Len() int {\n\tl := 0\n\trm.sm.Range(func(_, _ interface{}) bool {\n\t\tl++\n\t\treturn true\n\t})\n\treturn l\n}", "func (i StringHashMap[T, V]) Len() int {\n\t// DEBUG:\n\tif len(i.hashToKey) != len(i.hashToVal) {\n\t\tpanic(\"hashToKey and hashToVal have different lengths\")\n\t}\n\treturn len(i.hashToKey)\n}", "func (s *sortMap) Len() int {\n\treturn reflect.ValueOf(s.data).Len()\n}", "func (m *OrderedMap[K,V]) Len() int {\n\treturn len(m.mp)\n}", "func (that *StrAnyMap) Size() int {\n\tthat.mu.RLock()\n\tlength := len(that.data)\n\tthat.mu.RUnlock()\n\treturn length\n}", "func (ts tagMap) Len() int {\n\treturn len(ts)\n}", "func (m *MultiMap) Size() int {\n\tsize := 0\n\tfor _, value := range m.m {\n\t\tsize += len(value)\n\t}\n\treturn size\n}", "func (sm StringMap) Len() int {\n\treturn len(*sm.orig)\n}", "func (sm StringMap) Len() int {\n\treturn len(*sm.orig)\n}", "func (m *mapReact) Length() int {\n\tvar l int\n\tm.ro.RLock()\n\tl = len(m.ma)\n\tm.ro.RUnlock()\n\treturn l\n}", "func (rm *FilteredResultMap) Len() int {\n\tl := 0\n\trm.sm.Range(func(_, _ interface{}) bool {\n\t\tl++\n\t\treturn true\n\t})\n\treturn l\n}", "func (lru *KeyLRU) Len() int {\n\treturn len(lru.m)\n}", "func (sc *Scavenger) Len() int {\n\tsc.mu.Lock()\n\tn := len(sc.entries)\n\tsc.mu.Unlock()\n\treturn n\n}", "func (c *StringValueMap) Len() int {\n\treturn len(c.value)\n}", "func (mm Uint64Uint64Map) Size() int {\n\treturn len(mm)\n}", "func (c *Cache) Len() int {\n\tn := 0\n\tfor _, shard := range c.shards {\n\t\tn += shard.Len()\n\t}\n\treturn n\n}", "func (sm safeMap) Len() int {\n\treply := make(chan interface{})\n\tsm <- commandData{action: COUNT, result: reply}\n\treturn (<-reply).(int)\n}", "func (am *ClientSetAtomicMap) Len() int {\n\treturn len(am.val.Load().(_ClientSetMap))\n}", "func (am AttributeMap) Len() int {\n\treturn len(*am.orig)\n}", "func (am AttributeMap) Len() int {\n\treturn len(*am.orig)\n}", "func (w *WindowedMap) Len() int {\n\treturn len(w.uidList)\n}", "func (tt *TtTable) Len() uint64 {\n\treturn tt.numberOfEntries\n}", "func (h *Hash) Len() int {\n\tsum := 0\n\tfor _, bucket := range h.buckets {\n\t\tsum += len(bucket)\n\t}\n\treturn sum\n}", "func (shortlist *Shortlist) Len() int {\n\tlength := 0\n\tfor _, entry := range shortlist.Entries {\n\t\tif entry != nil {\n\t\t\tlength++\n\t\t}\n\t}\n\treturn length\n}", "func (m Map) Count() int {\n\treturn m.Imm.Len()\n}", "func (m *Mapper) Len() int {\n\treturn len(m.content)\n}", "func (s *shard) len() uint64 {\n\ts.rwMutex.RLock()\n\tlength := uint64(len(s.entryIndexes))\n\ts.rwMutex.RUnlock()\n\n\treturn length\n}", "func (c *Cache) Len() int {\n\treturn len(c.entries)\n}", "func (a ByKey) Len() int {\n\treturn len(a)\n}", "func (hm HashMap) Size() int {\n\treturn hm.size\n}", "func (wm *W3CMap) Length() int {\n\tif wm == nil {\n\t\treturn 0\n\t}\n\treturn len(wm.forNode.HTMLNode().Attr)\n}", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (a ByKey) Len() int { return len(a) }", "func (m Map) Size() int {\n\treturn m.set.Size()\n}", "func (l *LRU) Len() int {\n\tl.lazyInit()\n\tvar len int\n\tfor i := 0; i < l.nshards; i++ {\n\t\tlen += l.shards[i].Len()\n\t}\n\treturn len\n}", "func (m *OMap) Count() int {\n\treturn len(m.keys)\n}", "func (c *Cache) Len() int {\n\tvar len int\n\tfor _, shard := range c.shards {\n\t\tlen += shard.policy.Len()\n\t}\n\n\treturn len\n}", "func (c *NoReplKeyCache) Len() int {\n\tc.lock.RLock()\n\tlength := len(c.cache)\n\tc.lock.RUnlock()\n\treturn length\n}", "func (h hashTables) Len() int { return len(h) }", "func (s *Set) Len() int {\n\treturn s.backingMap.Len()\n}" ]
[ "0.8701872", "0.8485359", "0.8478857", "0.8464097", "0.8455641", "0.8452391", "0.84095365", "0.83661574", "0.83578175", "0.8343607", "0.83260924", "0.83082867", "0.8297326", "0.8243439", "0.8224796", "0.8206305", "0.81053", "0.8102362", "0.807258", "0.80620295", "0.8028638", "0.8021705", "0.80125314", "0.79875773", "0.7982418", "0.7981766", "0.7962034", "0.79533124", "0.7936057", "0.7921086", "0.7920095", "0.78814787", "0.78423995", "0.7835997", "0.78272027", "0.7819052", "0.7802523", "0.7802253", "0.77966744", "0.77901405", "0.77715945", "0.7764561", "0.7761482", "0.7752915", "0.7731472", "0.7720569", "0.77079505", "0.76862127", "0.76320946", "0.76320946", "0.7578892", "0.7570916", "0.75420505", "0.7533134", "0.7509898", "0.7463084", "0.74368817", "0.74129206", "0.74042684", "0.7401709", "0.7401709", "0.73957014", "0.73616827", "0.7350345", "0.7343555", "0.7342259", "0.73309845", "0.73261464", "0.7325397", "0.73191303", "0.7300376", "0.7295106", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7286039", "0.7285952", "0.7277565", "0.7268717", "0.7263373", "0.7255748", "0.7252354", "0.7237513" ]
0.8707754
0
Each enumerates all keyvalue pairs in map m in deterministic order. TODO(marius): we really ought to use a representation that's more amenable to such (common) operations.
func (m Map) Each(fn func(k, v T)) { digests := make([]digest.Digest, 0, len(m.tab)) for d := range m.tab { digests = append(digests, d) } sort.Slice(digests, func(i, j int) bool { return digests[i].Less(digests[j]) }) for _, d := range digests { for entry := *m.tab[d]; entry != nil; entry = entry.Next { fn(entry.Key, entry.Value) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func iterateMap() {\n\tnames := map[string]string{\n\t\t\"Kakashi\": \"Hatake\",\n\t\t\"Konohamaru\": \"Sarutobi\",\n\t\t\"Iruka\": \"Umino\",\n\t}\n\tfor k, v := range names {\n\t\tfmt.Println(k, v)\n\t}\n}", "func (m *OrderedMap) Map() map[string][]string {\n\tmapOut := map[string][]string{}\n\tfor _, keyPair := range m.keypairs {\n\t\tkey := keyPair[0]\n\t\tval := keyPair[1]\n\t\tmapOut[key] = append(mapOut[key], val)\n\t}\n\treturn mapOut\n}", "func (m *bimap) Iterate(f IterateFunc) {\n\tdefer m.mtx.RUnlock()\n\tm.mtx.RLock()\n\n\tfor k, v := range m.keyToVal {\n\t\tf(k, v)\n\t}\n}", "func (m *Map) Each(f func(key interface{}, value interface{})) {\n\titerator := m.Iterator()\n\tfor iterator.Next() {\n\t\tf(iterator.Key(), iterator.Value())\n\t}\n}", "func (m *OrderedMap) Values() []interface{} {\n\tvalues := make([]interface{}, 0)\n\tfor _, key := range m.keys {\n\t\tvalues = append(values, m.m[key])\n\t}\n\treturn values\n}", "func (cm ConcMap[K, V]) Iterate(f func(K, V)) {\n\tcm.RLock()\n\tdefer cm.RUnlock()\n\tfor key, val := range cm.m {\n\t\tf(key, val)\n\t}\n}", "func (m *OrderedMap) Pairs() []KeyValue {\n\treturn m.kvs\n}", "func Pm(v map[string][]string) {\n\tfor k := range v {\n\t\tfmt.Printf(\"%+v: %+v\\n\", k, v[k])\n\t}\n}", "func (mm Uint64Uint64Map) Foreach(f func(uint64, uint64)) {\n\tfor k, v := range mm {\n\t\tf(k, v)\n\t}\n}", "func testMapSetN(n int, m map[Key]interface{}) {\n\tfor i := 0; i < n; i++ {\n\t\tm[Key(i)] = i\n\t}\n}", "func main1() {\n\twg := sync.WaitGroup{}\n\tfor i := 0; i < 20; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tkey := strconv.Itoa(i)\n\t\t\tset(key, i)\n\t\t\tfmt.Printf(\"key:%v,value:%v \\n\", key, i)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n\tfmt.Println(len(m))\n}", "func (m ViewModelCache) Iter() <-chan ViewModelCacheTuple {\n\tch := make(chan ViewModelCacheTuple)\n\tgo func() {\n\t\t// Foreach shard.\n\t\tfor _, shard := range m {\n\t\t\t// Foreach key, value pair.\n\t\t\tshard.RLock()\n\t\t\tfor key, val := range shard.items {\n\t\t\t\tch <- ViewModelCacheTuple{key, val}\n\t\t\t}\n\t\t\tshard.RUnlock()\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}", "func (m *OrderedIntMap) Values() []interface{} {\n\tvalues := make([]interface{}, 0)\n\tfor _, key := range m.keys {\n\t\tvalues = append(values, m.m[key])\n\t}\n\treturn values\n}", "func (m *OrderedUintMap) Values() []interface{} {\n\tvalues := make([]interface{}, 0)\n\tfor _, key := range m.keys {\n\t\tvalues = append(values, m.m[key])\n\t}\n\treturn values\n}", "func (m *Map) Iter() <-chan MapItem {\n\tc := make(chan MapItem)\n\n\tgo func() {\n\t\tm.RLock()\n\t\tfor k, v := range m.Items {\n\t\t\tc <- MapItem{k, v}\n\t\t}\n\t\tclose(c)\n\t\tm.RUnlock()\n\t}()\n\n\treturn c\n}", "func (sm *DefaultIDSetMap) Each(f func(key int, value *IDSet)) {\n\tif sm.key != 0 {\n\t\tf(sm.key, sm.value)\n\t}\n\tif sm.m != nil {\n\t\tfor k, v := range sm.m {\n\t\t\tf(k, v)\n\t\t}\n\t}\n}", "func testMapGetN(n int, m map[Key]interface{}) {\n\tfor i := 0; i < n; i++ {\n\t\t_ = m[Key(i)]\n\t}\n}", "func (j Json) IterMap(f func(key string, value Json) bool) int {\n\tm, ok := j.asMap()\n\tif !ok {\n\t\treturn 0\n\t}\n\n\tcount := 0\n\tfor k, v := range m {\n\t\tcount++\n\t\tif !f(k, Json{v, true}) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn count\n}", "func (m *MultiMap) Values() []interface{} {\n\tvalues := make([]interface{}, m.Size())\n\tcount := 0\n\tfor _, vs := range m.m {\n\t\tfor _, value := range vs {\n\t\t\tvalues[count] = value\n\t\t\tcount++\n\t\t}\n\t}\n\treturn values\n}", "func (m *IndexedMap[PrimaryKey, Value, Idx]) Iterate(ctx context.Context, ranger Ranger[PrimaryKey]) (Iterator[PrimaryKey, Value], error) {\n\treturn m.m.Iterate(ctx, ranger)\n}", "func EncMap(e *marshal.Enc, m map[uint64]uint64) {\n\te.PutInt(uint64(len(m)))\n\tfor key, value := range m {\n\t\te.PutInt(key)\n\t\te.PutInt(value)\n\t}\n}", "func (kvs KeyValues) Map() map[string]string {\n\tvar m = make(map[string]string)\n\tfor idx := range kvs {\n\t\tm[kvs[idx].Key] = kvs[idx].Value\n\t}\n\treturn m\n}", "func (m *MapDB) ForEach(fn ForEachFn) (ended bool) {\n\tm.mux.RLock()\n\tdefer m.mux.RUnlock()\n\t// Iterate through all the map values\n\tfor key, value := range m.m {\n\t\tif ended = fn(key, value); ended {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}", "func (self *Map) each(fn ItemFunc, opts IterOptions) error {\n\tif fn != nil {\n\t\tvar tn []string\n\n\t\tif opts.TagName != `` {\n\t\t\ttn = append(tn, opts.TagName)\n\t\t}\n\n\t\tkeys := self.StringKeys(tn...)\n\n\t\tif opts.SortKeys {\n\t\t\tsort.Strings(keys)\n\t\t}\n\n\t\tfor _, key := range keys {\n\t\t\tif err := fn(key, self.Get(key)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (bm ByteMap) Iterate(includeValue bool, includeBytes bool, cb func(key string, value interface{}, valueBytes []byte) bool) {\n\tif len(bm) == 0 {\n\t\treturn\n\t}\n\n\tkeyOffset := 0\n\tfirstValueOffset := 0\n\tfor {\n\t\tif keyOffset >= len(bm) {\n\t\t\tbreak\n\t\t}\n\t\tkeyLen := int(enc.Uint16(bm[keyOffset:]))\n\t\tkeyOffset += SizeKeyLen\n\t\tkey := string(bm[keyOffset : keyOffset+keyLen])\n\t\tkeyOffset += keyLen\n\t\tt := bm[keyOffset]\n\t\tkeyOffset += SizeValueType\n\t\tvar value interface{}\n\t\tvar bytes []byte\n\t\tif t != TypeNil {\n\t\t\tvalueOffset := int(enc.Uint32(bm[keyOffset:]))\n\t\t\tif firstValueOffset == 0 {\n\t\t\t\tfirstValueOffset = valueOffset\n\t\t\t}\n\t\t\tif includeValue {\n\t\t\t\tvalue = bm.decodeValueAt(valueOffset, t)\n\t\t\t}\n\t\t\tif includeBytes {\n\t\t\t\tbytes = bm.valueBytesAt(valueOffset, t)\n\t\t\t}\n\t\t\tkeyOffset += SizeValueOffset\n\t\t}\n\t\tif !cb(key, value, bytes) {\n\t\t\t// Stop iterating\n\t\t\treturn\n\t\t}\n\t\tif firstValueOffset > 0 && keyOffset >= firstValueOffset {\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (m Map) Values() []interface{} {\n\tvalues := make([]interface{}, 0, len(m))\n\tfor _, v := range m {\n\t\tvalues = append(values, v)\n\t}\n\treturn values\n}", "func (m *OrderedMap) Keys() []string { return m.keys }", "func itermap(n node) []mapitem {\n\titems := []mapitem{}\n\tif n.Kind != yamlast.MappingNode {\n\t\tpanic(\"expected mapping node\")\n\t}\n\tfor i := 0; i < len(n.Children)-1; i += 2 {\n\t\tk := n.Children[i]\n\t\tv := n.Children[i+1]\n\t\titems = append(items, mapitem{k.Value, v})\n\t}\n\treturn items\n}", "func M(m map[string]string) []Tag {\n\ttags := make([]Tag, 0, len(m))\n\tfor k, v := range m {\n\t\ttags = append(tags, T(k, v))\n\t}\n\treturn tags\n}", "func sortMapByValue(m map[string]int) PairVector {\n\tpairs := make(PairVector, len(m))\n\ti := 0\n\tfor k, v := range m {\n\t\tpairs[i] = Pair{k, v}\n\t\ti++\n\t}\n\tsort.Sort(pairs)\n\treturn pairs\n}", "func (_m Map) Fill(m Map) {\n\tfor key, value := range _m {\n\t\tm[key] = value\n\t}\n}", "func Range(m interface{}, f func (key string, val interface {}) bool) {\n\tv := reflect.ValueOf(m)\n\tkeys := v.MapKeys()\n\n\tsort.Slice(keys, func(a,b int) bool {\n\t\treturn keys[a].String() < keys[b].String()\n\t})\n\n\tfor _,kv := range keys {\n\t\tvv := v.MapIndex(kv)\n\t\tresult := f(kv.String(), vv.Interface())\n\t\tif !result {\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (t *Tags) Each(fn func(uint32, []byte)) {\n\tfor key, val := range t.keyvals {\n\t\tfn(key, val)\n\t}\n}", "func (m Map) Complete() {\n\tfor k := range m {\n\t\textraPriorities := m.priorityWalk(k)\n\t\textraPriorities = trimSelf(extraPriorities, k)\n\t\tm[k] = append(m[k], extras(extraPriorities, m[k])...)\n\t\tsort.Strings(m[k])\n\t}\n}", "func Values(m Map) []Value {\n\treturn m.vals()\n}", "func sortMapByValue(m map[string]int) PairList {\n\tp := make(PairList, len(m))\n\ti := 0\n\tfor k, v := range m {\n\t\tp[i] = Pair{k, v}\n\t\ti++\n\t}\n\tfmt.Printf(\">>> %v\\n\", p)\n\tsort.Sort(p)\n\tfmt.Printf(\">>> %v\\n\", p)\n\treturn p\n}", "func (m *OMap) Entries() []Entry {\n\tentries := make([]Entry, len(m.keys), 0)\n\tfor _, k := range m.keys {\n\t\tentry := Entry{Key: k, Value: m.baseMap[k]}\n\t\tentries = append(entries, entry)\n\t}\n\treturn entries\n}", "func sortMap(m map[string]string) []string {\r\n keys := make([]string, 0)\r\n for name, _ := range m {\r\n keys = append(keys, name)\r\n }\r\n sort.Strings(keys)\r\n return keys\r\n}", "func (c Collector) Each(fx StringEachfunc) {\n\tvar state bool\n\tfor k, v := range c {\n\t\tif state {\n\t\t\tbreak\n\t\t}\n\n\t\tfx(v, k, func() {\n\t\t\tstate = true\n\t\t})\n\t}\n}", "func (t *Map) Values() []interface{} {\n\tvals := make([]interface{}, t.keys.Len())\n\tfor e, i := t.keys.Front(), 0; e != nil; e, i = e.Next(), i+1 {\n\t\tvals[i] = t.entries[e.Value].val\n\t}\n\treturn vals\n}", "func (m *Map) Values() []avltree.TreeValue {\n\treturn m.inverseMap.Keys()\n}", "func (m pbMetricMap) Keys() []string {\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func (m *MultiMap) Entries() []multimap.Entry {\n\tentries := make([]multimap.Entry, m.Size())\n\tcount := 0\n\tfor key, values := range m.m {\n\t\tfor _, value := range values {\n\t\t\tentries[count] = multimap.Entry{Key: key, Value: value}\n\t\t\tcount++\n\t\t}\n\t}\n\treturn entries\n}", "func (bm ByteMap) IterateValues(cb func(key string, value interface{}) bool) {\n\tbm.Iterate(true, false, func(key string, value interface{}, valueBytes []byte) bool {\n\t\treturn cb(key, value)\n\t})\n}", "func (m Map) orderedKeys() []string {\n\tkeys := []string{}\n\tfor key := range m {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func (m *OrderedIntMap) Keys() []int { return m.keys }", "func keys(m map[string]string) []string {\n\tvar keys []string\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}", "func (i IntHashMap[T, V]) Items() []struct {\n\tKey T\n\tVal V\n} {\n\tresult := make([]struct {\n\t\tKey T\n\t\tVal V\n\t}, 0, len(i.hashToKey))\n\tfor hash, key := range i.hashToKey {\n\t\tval := i.hashToVal[hash]\n\t\tresult = append(result, struct {\n\t\t\tKey T\n\t\t\tVal V\n\t\t}{key, val})\n\t}\n\treturn result\n}", "func keys(m map[string]int32) []string {\n\tkeys := make([]string, len(m))\n\ti := 0\n\tfor k := range m {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\treturn keys\n}", "func (m ConcurrentMap) Iter() <-chan Tuple {\n\tch := make(chan Tuple, 10)\n\n\tgo func() {\n\t\tfor _, shard := range m.shards {\n\t\t\tshard.RLock()\n\n\t\t\tfor key, val := range shard.items {\n\t\t\t\tch <- Tuple{key, val}\n\t\t\t}\n\n\t\t\tshard.RUnlock()\n\t\t}\n\t\tclose(ch)\n\t}()\n\n\treturn ch\n}", "func printMap(P map[string]map[string]Pair) {\n\tfmt.Println(\"{\")\n\tfor key,special_vector := range P {\n\t\tfmt.Print(\" \", key,\": \") // Stampa il nome dell'elemento\n\n\t\tfor process,pair := range special_vector {\n\t\t\tfmt.Print(process, \": \")\n\t\t\tprintPair(pair)\n\t\t}\n\n\t\tfmt.Println()\n\t}\n\tfmt.Print(\"}\")\n}", "func sortMapByValue(m map[string]int) PairList {\r\n\tp := make(PairList, len(m))\r\n\ti := 0\r\n\tfor k, v := range m {\r\n\t\tp[i] = Pair{k, v}\r\n\t\ti++\r\n\t}\r\n\tsort.Sort(p)\r\n\treturn p\r\n}", "func (c *SyncCollector) Copy(m map[string]interface{}) {\n\tfor v, k := range m {\n\t\tc.Set(v, k)\n\t}\n}", "func printMap(m map[string]string) {\n\tfor color, hex := range m { //key, value\n\t\tfmt.Println(\"Hex code for\", color, \"is\", hex)\n\t}\n}", "func add(finalMap dict, m dict) { // redfn\n\tfor key, value := range m {\n\t\tfinalMap[key] += value\n\t}\n}", "func NormalizeMap(zv zcode.Bytes) zcode.Bytes {\n\telements := make([]keyval, 0, 8)\n\tfor it := zv.Iter(); !it.Done(); {\n\t\tkey, _, err := it.NextTagAndBody()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tval, _, err := it.NextTagAndBody()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\telements = append(elements, keyval{key, val})\n\t}\n\tif len(elements) < 2 {\n\t\treturn zv\n\t}\n\tsort.Slice(elements, func(i, j int) bool {\n\t\treturn bytes.Compare(elements[i].key, elements[j].key) == -1\n\t})\n\tnorm := make(zcode.Bytes, 0, len(zv))\n\tnorm = append(norm, elements[0].key...)\n\tnorm = append(norm, elements[0].val...)\n\tfor i := 1; i < len(elements); i++ {\n\t\t// Skip duplicates.\n\t\tif !bytes.Equal(elements[i].key, elements[i-1].key) {\n\t\t\tnorm = append(norm, elements[i].key...)\n\t\t\tnorm = append(norm, elements[i].val...)\n\t\t}\n\t}\n\treturn norm\n}", "func IterateStringsMap(data map[string]string, cb func(k string, v string)) {\n\tfor _, k := range StringsMapKeys(data) {\n\t\tcb(k, data[k])\n\t}\n}", "func (m *dirtySeriesMap) Iter() map[dirtySeriesMapHash]dirtySeriesMapEntry {\n\treturn m.lookup\n}", "func (m ConcurrentRoomInfoMap) Iter() <-chan TupleRoomInfo {\n\tch := make(chan TupleRoomInfo)\n\tgo func() {\n\t\t// Foreach shard.\n\t\tfor _, shard := range m {\n\t\t\t// Foreach key, value pair.\n\t\t\tshard.RLock()\n\t\t\tfor key, val := range shard.items {\n\t\t\t\tch <- TupleRoomInfo{key, val}\n\t\t\t}\n\t\t\tshard.RUnlock()\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}", "func (i StringHashMap[T, V]) Items() []struct {\n\tKey T\n\tVal V\n} {\n\tresult := make([]struct {\n\t\tKey T\n\t\tVal V\n\t}, 0, len(i.hashToKey))\n\tfor hash, key := range i.hashToKey {\n\t\tval := i.hashToVal[hash]\n\t\tresult = append(result, struct {\n\t\t\tKey T\n\t\t\tVal V\n\t\t}{key, val})\n\t}\n\treturn result\n}", "func PrintMap(m map[string]string) {\n\tfor k := range m {\n\t log.Println(\"\\t\",k,\"=\",m[k])\n\t}\n}", "func (m *ValuesResultArrayHash) Iter() map[ValuesResultArrayHashHash]ValuesResultArrayHashEntry {\n\treturn m.lookup\n}", "func (m *OrderedUintMap) Keys() []uint { return m.keys }", "func MapVals(m map[interface{}]interface{}) []interface{} {\n\tvals := make([]interface{}, len(m))\n\n\tndx := 0\n\tfor _, val := range m {\n\t\tvals[ndx] = val\n\t\tndx++\n\t}\n\n\treturn vals\n}", "func (c Collector) Copy(m map[string]interface{}) {\n\tfor v, k := range m {\n\t\tc.Set(v, k)\n\t}\n}", "func fullIter[K comparable, V any](m map[K]V, f func(K, V)) {\n\tseen := map[K]bool{}\n\tfor done := false; !done; {\n\t\tdone = true\n\t\tfor k, v := range m {\n\t\t\tif seen[k] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tseen[k] = true\n\t\t\tdone = false\n\n\t\t\tf(k, v)\n\t\t}\n\t}\n}", "func (obj *object) Iter(f func(*Term, *Term) error) error {\n\tfor _, node := range obj.sortedKeys() {\n\t\tif err := f(node.key, node.value); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func Keys(m Map) []Key {\n\treturn m.keys()\n}", "func EachAnyMap(mp any, fn func(key string, val any)) {\n\trv := reflect.Indirect(reflect.ValueOf(mp))\n\tif rv.Kind() != reflect.Map {\n\t\tpanic(\"not a map value\")\n\t}\n\n\tfor _, key := range rv.MapKeys() {\n\t\tfn(key.String(), rv.MapIndex(key).Interface())\n\t}\n}", "func (h HyperparametersV0) Each(f func(name string, param HyperparameterV0)) {\n\tkeys := make([]string, 0, len(h))\n\tfor k := range h {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tfor _, k := range keys {\n\t\tf(k, h[k])\n\t}\n}", "func SortMapByValue(m map[string]int) PairList {\n\tp := make(PairList, len(m))\n\ti := 0\n\tfor k, v := range m {\n\t\tp[i] = Pair{k, v}\n\t\ti++\n\t}\n\tsort.Sort(Reverse{p})\n\treturn p\n}", "func MapKeys(m interface{}) interface{} {\n\tv := vof(m)\n\tfailP1OnErrWhen(v.Kind() != reflect.Map, \"%v\", fEf(\"PARAM_INVALID_MAP\"))\n\n\tkeys := v.MapKeys()\n\tif L := len(keys); L > 0 {\n\t\tkType := tof(keys[0].Interface())\n\t\trstValue := mkSlc(sof(kType), L, L)\n\t\tfor i, k := range keys {\n\t\t\trstValue.Index(i).Set(vof(k.Interface()))\n\t\t}\n\t\t// sort keys if keys are int or float64 or string\n\t\trst := rstValue.Interface()\n\t\tswitch keys[0].Interface().(type) {\n\t\tcase int:\n\t\t\tsort.Ints(rst.([]int))\n\t\tcase float64:\n\t\t\tsort.Float64s(rst.([]float64))\n\t\tcase string:\n\t\t\tsort.Strings(rst.([]string))\n\t\t}\n\t\treturn rst\n\t}\n\treturn nil\n}", "func MapValues[K comparable, V any, R any](in map[K]V, iteratee func(V, K) R) map[K]R {\n\tresult := map[K]R{}\n\n\tfor k, v := range in {\n\t\tresult[k] = iteratee(v, k)\n\t}\n\n\treturn result\n}", "func (t *Map) Entries() []Entry {\n\tentries := make([]Entry, t.keys.Len())\n\tfor e, i := t.keys.Front(), 0; e != nil; e, i = e.Next(), i+1 {\n\t\tentries[i] = Entry{Key: e.Value, Value: t.entries[e.Value].val}\n\t}\n\treturn entries\n}", "func (t Timers) Each(f func(string, string, Timer)) {\n\tfor key, value := range t {\n\t\tfor tags, timer := range value {\n\t\t\tf(key, tags, timer)\n\t\t}\n\t}\n}", "func SwapMap(m map[string]string) map[string]string {\n newMap := map[string]string{}\n for item := range m {\n newMap[m[item]] = item\n }\n return newMap\n}", "func PrintResult(m map[string][]int) {\n\tfor key := range m {\n\t\tfmt.Print(\"\\n\", key, \": \")\n\t\tfor _, item := range m[key] {\n\t\t\tfmt.Print(item, \" \")\n\t\t}\n\t}\n}", "func split(m map[string]string) ([]string, []string) {\n\n\tvalues := make([]string, 0, len(m))\n\tkeys := make([]string, 0, len(m))\n\n\tfor k, v := range m {\n\t\tkeys = append(keys, k)\n\t\tvalues = append(values, v)\n\t}\n\n\treturn keys, values\n}", "func (enum Enum) Entries() []KeyWithTranslation {\n\tentries := make([]KeyWithTranslation, len(enum))\n\n\ti := 0\n\tfor k, v := range enum {\n\t\tentries[i] = KeyWithTranslation{\n\t\t\tEnumKey: k,\n\t\t\tTranslationKey: v,\n\t\t}\n\t\ti++\n\t}\n\n\treturn entries\n}", "func walk(om *openapi.OrderedMap) {\n\tfor _, p := range om.Pairs() {\n\t\tswitch p := p.Value.(type) {\n\t\tcase *openapi.OrderedMap:\n\t\t\twalk(p)\n\t\tcase []*openapi.OrderedMap:\n\t\t\tfor _, om := range p {\n\t\t\t\twalk(om)\n\t\t\t}\n\t\t}\n\t}\n}", "func testMapUnsetN(n int, m map[Key]interface{}) {\n\tfor i := 0; i < n; i++ {\n\t\tdelete(m, Key(i))\n\t}\n}", "func (self *SafeMap) Items() map[interface{}]interface{} {\n\tself.lock.RLock()\n\tdefer self.lock.RUnlock()\n\n\tr := make(map[interface{}]interface{})\n\n\tfor k, v := range self.sm {\n\t\tr[k] = v\n\t}\n\n\treturn r\n}", "func (m StringMap) MapSort() []string {\n\tvar keys []string\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}", "func (m Map) Keys() []string {\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}", "func (c *Cuckoo) Map(iter func(c *Cuckoo, key Key, val Value) (stop bool)) {\n\tif c.emptyKeyValid {\n\t\titer(c, c.emptyKey, c.emptyValue)\n\t}\n\n\tfor _, t := range c.tables {\n\t\tfor _, s := range t.buckets {\n\t\t\tfor _, b := range s {\n\t\t\t\tif b.key != c.emptyKey {\n\t\t\t\t\tif iter(c, b.key, b.val) {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *SyncCollector) Each(fx StringEachfunc) {\n\tvar state bool\n\tc.rw.RLock()\n\tfor k, v := range c.c {\n\t\tif state {\n\t\t\tbreak\n\t\t}\n\n\t\tfx(v, k, func() {\n\t\t\tstate = true\n\t\t})\n\t}\n\tc.rw.RUnlock()\n}", "func (m *HashMap) Range(fn func(k txHash, t TxDesc) error) error {\n\tm.lock.RLock()\n\tdefer m.lock.RUnlock()\n\n\tfor k, v := range m.data {\n\t\terr := fn(k, v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *MultiMap) Keys() []interface{} {\n\tkeys := make([]interface{}, m.Size())\n\tcount := 0\n\tfor key, value := range m.m {\n\t\tfor range value {\n\t\t\tkeys[count] = key\n\t\t\tcount++\n\t\t}\n\t}\n\treturn keys\n}", "func Keys(m map[string]interface{}) []string {\n\tkeys := make([]string, len(m))\n\tvar i int\n\tfor k := range m {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\treturn keys\n}", "func sortMapByValue(m map[int]data.Similar) map[int]data.Similar {\n\tp := make(PairList, len(m))\n\ti := 0\n\tfor _, v := range m {\n\t\tp[i] = v\n\t\ti++\n\t}\n\tsort.Sort(p)\n\tresult := make(map[int]data.Similar)\n\tfor j, v := range p {\n\t\tresult[j] = v\n\t}\n\treturn result\n}", "func (m *Map) Keys() []string {\n\tm.convert()\n\tif len(m.order) > 0 {\n\t\treturn m.order\n\t}\n\n\tresult := make([]string, len(m.items))\n\ti := 0\n\tfor key := range m.items {\n\t\tresult[i] = key\n\t\ti = i + 1\n\t}\n\n\tm.order = result\n\n\treturn result\n}", "func (m *Map) Map(f func(key1 interface{}, value1 interface{}) (interface{}, interface{})) *Map {\n\tnewMap := NewWith(m.keyComparator, m.valueComparator)\n\titerator := m.Iterator()\n\tfor iterator.Next() {\n\t\tkey2, value2 := f(iterator.Key(), iterator.Value())\n\t\tnewMap.Put(key2, value2)\n\t}\n\treturn newMap\n}", "func enumMapKeysStrict(v reflect.Value) []reflect.Value {\n\tkeys := v.MapKeys()\n\tsort.Sort(byCanonicalOrder(keys))\n\treturn keys\n}", "func Iterate(fn func(key string)) {\n\tdb.View(func(txn *badger.Txn) error {\n\t\topts := badger.IteratorOptions{}\n\t\t// opts.PrefetchValues = true\n\t\tit := txn.NewIterator(opts)\n\n\t\tfor it.Rewind(); it.Valid(); it.Next() {\n\t\t\tkey := it.Item().Key()\n\t\t\tfn(string(key))\n\t\t}\n\n\t\tit.Close()\n\n\t\treturn nil\n\t})\n}", "func (s *session) VisitAll(cb func(k string, v interface{})) {\n\tfor key := range s.values {\n\t\tcb(key, s.values[key])\n\t}\n}", "func (c DirCollector) Each(fx func(*VDir, string, func())) {\n\tvar state bool\n\tfor k, v := range c {\n\t\tif state {\n\t\t\tbreak\n\t\t}\n\n\t\tfx(v, k, func() {\n\t\t\tstate = true\n\t\t})\n\t}\n}", "func MapKeys(m map[string]bool) []string {\n\tks := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tks = append(ks, k)\n\t}\n\treturn ks\n}", "func (m *Map) String() string {\n\tkeys := \"\"\n\ti := 0\n\tfor k, _ := range *m {\n\t\tkeys = keys + k\n\t\tif i++; i < len(*m) {\n\t\t\tkeys = keys + \",\"\n\t\t}\n\t}\n\treturn keys\n}", "func Keys(m map[string]string) []string {\n\tr := []string{}\n\n\tfor k := range m {\n\t\tr = append(r, k)\n\t}\n\n\tsort.Strings(r)\n\n\treturn r\n}", "func (m *OrderedMap) GetAll(key string) []string {\n\tif key == \"\" {\n\t\treturn nil\n\t}\n\n\tkeyVals := []string{}\n\tfor _, param := range m.keypairs {\n\t\t// Only build up our list of params for OUR key\n\t\tparamKey := param[0]\n\t\tif key == paramKey {\n\t\t\tkeyVals = append(keyVals, param[1])\n\t\t}\n\t}\n\treturn keyVals\n}" ]
[ "0.6208769", "0.5858773", "0.5858397", "0.5811261", "0.5794884", "0.5666476", "0.56434906", "0.56392515", "0.5638658", "0.559033", "0.5568864", "0.556023", "0.5512877", "0.55019134", "0.5491342", "0.5469261", "0.5463297", "0.5460558", "0.54502535", "0.5442066", "0.54337984", "0.54004335", "0.5395336", "0.5394839", "0.5391057", "0.53887814", "0.53636974", "0.5362325", "0.53540695", "0.5353856", "0.5350044", "0.5344327", "0.53118217", "0.5311731", "0.53108084", "0.53055805", "0.5298736", "0.52964836", "0.52939594", "0.52862066", "0.5276288", "0.5272719", "0.5271519", "0.5270006", "0.52696264", "0.5245894", "0.5218849", "0.52092975", "0.5209184", "0.52036554", "0.51719254", "0.5171705", "0.51675963", "0.51651746", "0.5160724", "0.5152861", "0.5141164", "0.5134252", "0.51328385", "0.51319385", "0.51278687", "0.5115721", "0.5108259", "0.509989", "0.5096902", "0.5091173", "0.50874764", "0.5085444", "0.5085443", "0.50839734", "0.5036956", "0.5032757", "0.50299746", "0.50164366", "0.501147", "0.5008882", "0.5008338", "0.49886343", "0.49870586", "0.4981378", "0.4970507", "0.49687672", "0.49571708", "0.49569827", "0.4956142", "0.49519953", "0.49431598", "0.4938278", "0.49335572", "0.49293128", "0.4921849", "0.49199826", "0.4918978", "0.4918578", "0.49026352", "0.48958164", "0.48914236", "0.48898688", "0.4888771", "0.48751712" ]
0.63975227
0
MakeMap is a convenient way to construct a from a set of keyvalue pairs.
func MakeMap(kt *types.T, kvs ...T) *Map { if len(kvs)%2 != 0 { panic("uneven makemap") } m := new(Map) for i := 0; i < len(kvs); i += 2 { m.Insert(Digest(kvs[i], kt), kvs[i], kvs[i+1]) } return m }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MakeMap(keys ...string) (m Map) {\n\tm = make(Map, len(keys))\n\tfor _, key := range keys {\n\t\tm.Set(key)\n\t}\n\n\treturn\n}", "func MakeSet(s map[interface{}]bool) (*skylark.Set, error) {\n\tset := skylark.Set{}\n\tfor k := range s {\n\t\tkey, err := ToValue(k)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := set.Insert(key); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn &set, nil\n}", "func MakeSet(lst []string) map[string]bool {\n\tret := make(map[string]bool)\n\tfor _, s := range lst {\n\t\tret[s] = true\n\t}\n\treturn ret\n}", "func MakeMap(ints []int) map[string]int {\n\t// Your code goes here\n}", "func CreateSet(values ...interface{}) map[interface{}]struct{} {\n\treturn make(map[interface{}]struct{})\n}", "func (m *Map) MakeMap(raw map[any]any) any {\n\tma := reflect.MakeMap(toReflectType(m))\n\tfor key, value := range raw {\n\t\tma.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value))\n\t}\n\treturn ma.Interface()\n}", "func MakeMap(f Functor) template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"rowsset\": func(interface{}) string { return \"\" }, // empty pipeline\n\t\t// acepp overrides rowsset and adds setrows\n\n\t\t\"class\": f.Class,\n\t\t\"colspan\": f.Colspan,\n\t\t\"jsxClose\": f.JSXClose,\n\t}\n}", "func CreateMap(args ...interface{}) map[string]interface{} {\n\tm := make(map[string]interface{}, 0)\n\tkey := \"\"\n\tfor _, v := range args {\n\t\tif len(key) == 0 {\n\t\t\tkey = string(v.(string))\n\t\t} else {\n\t\t\tm[key] = v\n\t\t}\n\t}\n\treturn m\n}", "func makeSet() *customSet {\n\treturn &customSet{\n\t\tcontainer: make(map[string]struct{}),\n\t}\n}", "func makeStringMap(in interface{}) map[string]interface{} {\n\treturn in.(map[string]interface{})\n}", "func CreateMap(a ...[]string) (m map[string]struct{}) {\n\tm = make(map[string]struct{})\n\tfor _, aa := range a {\n\t\tfor _, val := range aa {\n\t\t\tm[val] = struct{}{}\n\t\t}\n\t}\n\treturn m\n}", "func MakeMset (values ...interface{}) Mset {\n\tht := MakeHashtable(true)\n\tmset := Mset{&ht, uint64(len(values))}\n\tfor _, elt := range values {\n\t\tk, v := ht.Get(elt)\n\t\tif k == nil {\n\t\t\tht.Put(elt, uint64(1))\n\t\t} else {\n\t\t\tht.Put(elt, (*v).(uint64) + 1)\n\t\t}\n\t}\n\treturn mset\n}", "func MakeSelector(in map[string]string) labels.Selector {\n\tset := make(labels.Set)\n\tfor key, val := range in {\n\t\tset[key] = val\n\t}\n\treturn set.AsSelector()\n}", "func makeMap(yamlStruct T) map[string]string {\n\turlMap := make(map[string]string)\n\tfor _, s := range yamlStruct {\n\t\turlMap[s.P] = s.U\n\t}\n\treturn urlMap\n}", "func NewMap(values ...MalType) Map {\n\timm := immutable.NewMap(hasher{})\n\tif len(values) > 0 {\n\t\tb := immutable.NewMapBuilder(imm)\n\t\tfor i := 0; i < len(values); i += 2 {\n\t\t\tb.Set(values[i], values[i+1])\n\t\t}\n\t\timm = b.Map()\n\t}\n\treturn Map{Imm: imm}\n}", "func MapOf[V any](args ...any) *Map[V] {\n\tif len(args)%2 != 0 {\n\t\tpanic(\"expecting an even number of arguments\")\n\t}\n\tret := &Map[V]{}\n\tfor i := 0; i < len(args); i += 2 {\n\t\tvar key Ident\n\t\tswitch t := args[i].(type) {\n\t\tcase Ident:\n\t\t\tkey = t\n\t\tcase string:\n\t\t\tkey = New(t)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unsupported type %T at index %d\", t, i))\n\t\t}\n\t\tret.Put(key, args[i+1].(V))\n\t}\n\treturn ret\n}", "func MakeMapThreadSafe(m map[string]interface{}) *ThreadSafeMap {\n\treturn &ThreadSafeMap{items: m}\n}", "func toMap(s *DefaultIDSetMap) map[int]*IDSet {\n\tif s.key != 0 {\n\t\treturn map[int]*IDSet{\n\t\t\ts.key: s.value,\n\t\t}\n\t}\n\n\tif s.m != nil {\n\t\tm := map[int]*IDSet{}\n\t\tfor k, v := range s.m {\n\t\t\tm[k] = v\n\t\t}\n\t\treturn m\n\t}\n\n\treturn nil\n}", "func toMap(params []interface{}) map[string]interface{} {\n\tpar := make(map[string]interface{})\n\tif len(params) == 0 {\n\t\treturn par\n\t}\n\tif len(params)%2 != 0 {\n\t\tpanic(\"WithParams: len(params) % 2 != 0\")\n\t}\n\tfor i := 0; i < len(params)/2; i++ {\n\t\tkey, ok := params[2*i].(string)\n\t\tif !ok {\n\t\t\tpanic(\"WithParams: string expected\")\n\t\t}\n\t\tpar[key] = params[2*i+1]\n\t}\n\treturn par\n}", "func MerkMake(mapfile []string) map[string]string {\n mermap := map[string]string{}\n\n for lx := 0; lx < len(mapfile); lx++ {\n hashthis := himitsu.Hashit(mapfile[lx])\n mermap[hashthis] = mapfile[lx]\n } // end for mapfile.\n\n return mermap\n\n}", "func makeInput() Map {\n\treturn Map{\n\t\t\"a\": 25,\n\t\t\"b\": float32(2.5),\n\t\t\"c\": float64(2.5),\n\t\t\"d\": true,\n\t\t\"e\": false,\n\t\t\"f\": \"25\",\n\t\t\"g\": nil,\n\t}\n}", "func (o *Aliyun) makeMapArgs(args []map[string]string) string {\n\tstr := \"\"\n\tif len(args) > 0 {\n\t\tfor _, v := range args {\n\t\t\tfor kk, vv := range v {\n\t\t\t\tstr += \"&\" + kk + \"=\" + vv + \"&\"\n\t\t\t}\n\t\t}\n\t}\n\treturn str[:len(str)-1]\n}", "func MakeDict(d map[interface{}]interface{}) (*skylark.Dict, error) {\n\tdict := skylark.Dict{}\n\tfor k, v := range d {\n\t\tkey, err := ToValue(k)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tval, err := ToValue(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdict.Set(key, val)\n\t}\n\treturn &dict, nil\n}", "func Map(v interface{}) map[string]interface{} {\n\treturn New(v).Map()\n}", "func New(in ...map[string]string) *Store {\n\tstore := make(map[string]string)\n\n\tbuf := bytes.NewBuffer(nil)\n\n\t// Note: super slow flow, i req. use sync.Map as this proc every work\n\tfor _, maps := range in {\n\t\tfor k, v := range maps {\n\t\t\tbuf.Reset()\n\n\t\t\tt, err := template.New(\"x\").\n\t\t\t\tFuncs(TemplateFunctions).\n\t\t\t\tParse(v)\n\t\t\tif err == nil {\n\t\t\t\tif err = t.Execute(buf, store); err == nil {\n\t\t\t\t\tv = buf.String()\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tstore[k] = v\n\t\t}\n\t}\n\n\treturn &Store{store: store}\n}", "func constructInputsMap(ctx *Context, inputs map[string]interface{}) (Map, error) {\n\tresult := make(Map, len(inputs))\n\tfor k, v := range inputs {\n\t\tci := v.(*constructInput)\n\n\t\tknown := !ci.value.ContainsUnknowns()\n\t\tvalue, secret, err := unmarshalPropertyValue(ctx, ci.value)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"unmarshaling input %s\", k)\n\t\t}\n\n\t\tresultType := anyOutputType\n\t\tif ot, ok := concreteTypeToOutputType.Load(reflect.TypeOf(value)); ok {\n\t\t\tresultType = ot.(reflect.Type)\n\t\t}\n\n\t\toutput := ctx.newOutput(resultType, ci.deps...)\n\t\toutput.getState().resolve(value, known, secret, nil)\n\t\tresult[k] = output\n\t}\n\treturn result, nil\n}", "func NewSet(items ...Value) *Set {\n\tmapItems := make([]privateSetMapItem, 0, len(items))\n\tvar mapValue struct{}\n\tfor _, x := range items {\n\t\tmapItems = append(mapItems, privateSetMapItem{Key: x, Value: mapValue})\n\t}\n\n\treturn &Set{backingMap: newprivateSetMap(mapItems)}\n}", "func Map(m map[string]interface{}, k string, v interface{}) map[string]interface{} {\n\tm[k] = v\n\treturn m\n}", "func (f *ConfigMapFactory) MakeConfigMap(\n\targs *types.ConfigMapArgs, options *types.GeneratorOptions) (*corev1.ConfigMap, error) {\n\tvar all []kv.Pair\n\tvar err error\n\tcm := f.makeFreshConfigMap(args)\n\n\tpairs, err := keyValuesFromEnvFile(f.ldr, args.EnvSource)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\n\t\t\t\"env source file: %s\",\n\t\t\targs.EnvSource))\n\t}\n\tall = append(all, pairs...)\n\n\tpairs, err = keyValuesFromLiteralSources(args.LiteralSources)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\n\t\t\t\"literal sources %v\", args.LiteralSources))\n\t}\n\tall = append(all, pairs...)\n\n\tpairs, err = keyValuesFromFileSources(f.ldr, args.FileSources)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\n\t\t\t\"file sources: %v\", args.FileSources))\n\t}\n\tall = append(all, pairs...)\n\n\tfor _, p := range all {\n\t\terr = addKvToConfigMap(cm, p.Key, p.Value)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif options != nil {\n\t\tcm.SetLabels(options.Labels)\n\t\tcm.SetAnnotations(options.Annotations)\n\t}\n\treturn cm, nil\n}", "func (f HTMLFuncs) MakeMap() template.FuncMap { return MakeMap(f) }", "func makeMapList(sts *schema.Structural, items []interface{}) (rv common.MapList) {\n\treturn common.MakeMapList(&model.Structural{Structural: sts}, items)\n}", "func (s StringSlice) Map() map[string]struct{} {\n\tm := map[string]struct{}{}\n\tfor _, w := range s {\n\t\tm[w] = struct{}{}\n\t}\n\treturn m\n}", "func toSet(list []string) map[string]struct{} {\n\tset := make(map[string]struct{})\n\tfor _, item := range list {\n\t\tset[item] = struct{}{}\n\t}\n\treturn set\n}", "func Map[M map[K]V, K ~int, V string | int](s []V) M {\n// func Map[M map[int]V, V string | int](s []V) M {\n\tvar m = make(M)\n\tfor i, one := range s {\n\t\tm[i] = one\n\t}\n\treturn m\n}", "func (set *AppleSet) Map(f func(Apple) Apple) *AppleSet {\n\tif set == nil {\n\t\treturn nil\n\t}\n\n\tresult := NewAppleSet()\n\tset.s.RLock()\n\tdefer set.s.RUnlock()\n\n\tfor v := range set.m {\n\t\tk := f(v)\n\t\tresult.m[k] = struct{}{}\n\t}\n\n\treturn result\n}", "func MakeMap(d *json.Decoder) MetricMap {\n\tflMap := make(MetricMap)\n\tvar output map[string]interface{}\n\n\tif d == nil {\n\t\tlog.Error(\"JSON decoder not iniatilized\")\n\t\treturn flMap\n\t}\n\n\tif err := d.Decode(&output); err != nil {\n\t\tlog.WithField(\"error\", err).Error(\"Error while decoding json\")\n\t\treturn flMap\n\t}\n\n\taddFields(&flMap, \"\", output)\n\n\treturn flMap\n}", "func StringsToMap(s []string) map[string]struct{} {\n\tm := make(map[string]struct{})\n\n\tfor _, v := range s {\n\t\tm[v] = struct{}{}\n\t}\n\n\treturn m\n}", "func mapfn(kvs ...interface{}) (map[string]interface{}, error) {\n\tif len(kvs)%2 != 0 {\n\t\treturn nil, errors.New(\"map requires even number of arguments.\")\n\t}\n\tm := make(map[string]interface{})\n\tfor i := 0; i < len(kvs); i += 2 {\n\t\ts, ok := kvs[i].(string)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"even args to map must be strings.\")\n\t\t}\n\t\tm[s] = kvs[i+1]\n\t}\n\treturn m, nil\n}", "func Create() *MapUtil {\n\treturn &MapUtil{\n\t\tm: make(map[string]string),\n\t}\n}", "func NewSet(ss ...string) Set {\n\tsset := map[string]bool{}\n\tfor _, s := range ss {\n\t\tsset[s] = true\n\t}\n\n\treturn sset\n}", "func fromMap(labelMap map[string]string, lset utils.Labels) utils.Labels {\n\ti := 0\n\n\tfor k, v := range labelMap {\n\t\tif i < len(lset) {\n\t\t\tlset[i].Name = k\n\t\t\tlset[i].Value = v\n\t\t} else {\n\t\t\tlset = append(lset, utils.Label{Name: k, Value: v})\n\t\t}\n\t\ti++\n\t}\n\tnewLset := lset[0:i]\n\tsort.Sort(newLset)\n\treturn newLset\n}", "func set(a []string) map[string]bool {\n\tr := map[string]bool{}\n\tfor _, k := range a {\n\t\tr[k] = true\n\t}\n\treturn r\n}", "func (Map) Build(vt ValueType, from, to string) *Map {\n\treturn &Map{\n\t\tType: vt,\n\t\tFrom: strings.Split(from, \".\"),\n\t\tTo: strings.Split(to, \".\"),\n\t}\n}", "func toMap(sl []string) map[string]bool {\n\tm := make(map[string]bool, len(sl))\n\tfor _, s := range sl {\n\t\tm[s] = true\n\t}\n\treturn m\n}", "func ExampleMap_fixingInitialValue() {\n\tps := newPSetForTesting() // use paramset.NewOrPanic()\n\n\tm := map[string]bool{\"x\": true}\n\tkeys := []string{\"x\", \"y\"}\n\n\tps.Add(\"my-map\", psetter.Map{Value: &m}, \"help text\")\n\n\tfmt.Println(\"Before parsing\")\n\tfor _, k := range keys {\n\t\tif v, ok := m[k]; ok {\n\t\t\tfmt.Printf(\"\\tm[%s] = %v\\n\", k, v)\n\t\t}\n\t}\n\tps.Parse([]string{\"-my-map\", \"x=false,y\"})\n\tfmt.Println(\"After parsing\")\n\tfor _, k := range keys {\n\t\tif v, ok := m[k]; ok {\n\t\t\tfmt.Printf(\"\\tm[%s] = %v\\n\", k, v)\n\t\t}\n\t}\n\t// Output:\n\t// Before parsing\n\t//\tm[x] = true\n\t// After parsing\n\t//\tm[x] = false\n\t//\tm[y] = true\n}", "func NewMap(less func(a, b interface{}) bool) *Map {\n\treturn &Map{\n\t\tless: less,\n\t}\n}", "func MakeStringDict(m map[string]interface{}) (skylark.StringDict, error) {\n\tdict := make(skylark.StringDict, len(m))\n\tfor k, v := range m {\n\t\tval, err := ToValue(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdict[k] = val\n\t}\n\treturn dict, nil\n}", "func New(m map[string]interface{}) ByteMap {\n\treturn Build(func(cb func(string, interface{})) {\n\t\tfor key, value := range m {\n\t\t\tcb(key, value)\n\t\t}\n\t}, func(key string) interface{} {\n\t\treturn m[key]\n\t}, false)\n}", "func New(m map[string]string) Metadata {\n\tmd := Metadata{}\n\tfor key, val := range m {\n\t\tmd[key] = val\n\t}\n\treturn md\n}", "func SetFrom(m map[string]string) {\n\tif Extra == nil && len(m) > 0 {\n\t\tExtra = make(map[string]string)\n\t}\n\tfor k, v := range m {\n\t\tExtra[k] = v\n\t}\n}", "func Map(vars ...Variable) map[string]interface{} {\n\tresult := map[string]interface{}{}\n\tfor _, v := range vars {\n\t\tresult[(string)(v)] = v.Value()\n\t}\n\treturn result\n}", "func (f JSXFuncs) MakeMap() template.FuncMap { return MakeMap(f) }", "func NewMap() Map {\n\treturn Map{NewSet()}\n}", "func ToSet(i []string) map[string]bool {\n\tout := make(map[string]bool)\n\tfor _, v := range i {\n\t\tout[v] = true\n\t}\n\treturn out\n}", "func CreateValues(m map[string]string) common.Values {\n\tvalues := make([]*common.Value, len(m))\n\ti := 0\n\tfor k, v := range m {\n\t\tvalue := &common.Value{}\n\t\tvalue.Name = k\n\t\tvalue.Value = v\n\t\tvalues[i] = value\n\t\ti++\n\t}\n\n\treturn common.Values{values}\n}", "func mapCreate(name, rollNo, height, weight, averageMarks, extraCurricularGrade string) map[string]string {\n\tvar mapRet = map[string]string{\n\t\t\"name\": name,\n\t\t\"roll_no\": rollNo,\n\t\t\"weight\": weight,\n\t\t\"height\": height,\n\t\t\"avg_marks\": averageMarks,\n\t\t\"extra_curr_grades\": extraCurricularGrade,\n\t}\n\treturn mapRet\n}", "func Make_Key_Value_Map(values []*vector_tile.Tile_Value, keys []string) (map[int]interface{}, map[int]string) {\n\t// making value map\n\tvaluemap := map[int]interface{}{}\n\tfor i, value := range values {\n\t\tvaluemap[i] = Get_Value(value)\n\t}\n\n\t// making keymap\n\tkeymap := map[int]string{}\n\tfor i, key := range keys {\n\t\tkeymap[i] = key\n\t}\n\n\treturn valuemap, keymap\n}", "func Map(args ...interface{}) dgo.MapType {\n\treturn internal.MapType(args...)\n}", "func createParamsMap(params []TrafficOpsParameter) map[string]map[string]string {\n\tm := make(map[string]map[string]string)\n\tfor _, param := range params {\n\t\tif m[param.ConfigFile] == nil {\n\t\t\tm[param.ConfigFile] = make(map[string]string)\n\t\t}\n\t\tm[param.ConfigFile][param.Name] = param.Value\n\t}\n\treturn m\n}", "func TestMapSet(t *testing.T) {\n\tm := map[Key]interface{}{}\n\ttestMapSetN(testN, m)\n}", "func constructMapping(allKinds []string, schema *schema.Instance) (*mapping, error) {\n\t// The mapping is constructed from the common metadata we have for the Kubernetes.\n\t// Go through Mixer's well-known kinds, and map them to collections.\n\n\tmixerKindMap := make(map[string]struct{})\n\tfor _, k := range allKinds {\n\t\tmixerKindMap[k] = struct{}{}\n\t}\n\n\t// Create a mapping of kind <=> collection for known non-legacy Mixer kinds.\n\tkindToCollection := make(map[string]string)\n\tcollectionToKind := make(map[string]string)\n\tfor _, spec := range schema.All() {\n\t\tif _, ok := mixerKindMap[spec.Kind]; ok {\n\t\t\tkindToCollection[spec.Kind] = spec.Target.Collection.String()\n\t\t\tcollectionToKind[spec.Target.Collection.String()] = spec.Kind\n\t\t}\n\t}\n\n\tvar missingKinds []string\n\tfor _, mk := range allKinds {\n\t\tif _, ok := kindToCollection[mk]; !ok {\n\t\t\tmissingKinds = append(missingKinds, mk)\n\t\t}\n\t}\n\t// We couldn't find metadata for some of the well-known Mixer kinds. This shouldn't happen\n\t// and is a fatal error.\n\tif len(missingKinds) > 0 {\n\t\treturn nil, fmt.Errorf(\"unable to map some Mixer kinds to collections: %q\",\n\t\t\tstrings.Join(missingKinds, \",\"))\n\t}\n\n\treturn &mapping{\n\t\tkindsToCollections: kindToCollection,\n\t\tcollectionsToKinds: collectionToKind,\n\t}, nil\n}", "func ToMap[S ~[]E, E any, K comparable, V any](slice S, f func(E) (K, V)) map[K]V {\n\tif len(slice) == 0 {\n\t\treturn nil\n\t}\n\tout := make(map[K]V, len(slice))\n\tfor _, elem := range slice {\n\t\tk, v := f(elem)\n\t\tout[k] = v\n\t}\n\treturn out\n}", "func MakeSet(reports []structs.Report) []structs.Report {\n\n\tset := make(map[structs.Report]bool)\n\tx := ReportSet{set}\n\tfor i := 0; i < len(reports); i++ {\n\t\tx.add(reports[i])\n\t}\n\treturn createArray(x.set)\n}", "func Map(args ...interface{}) (map[string]interface{}, error) {\n\tif len(args)%2 != 0 {\n\t\treturn nil, fmt.Errorf(\"expecting even number of arguments, got %d\", len(args))\n\t}\n\n\tm := make(map[string]interface{})\n\tfn := \"\"\n\tfor _, v := range args {\n\t\tif len(fn) == 0 {\n\t\t\tif s, ok := v.(string); ok {\n\t\t\t\tfn = s\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn m, fmt.Errorf(\"expecting string for odd numbered arguments, got %+v\", v)\n\t\t}\n\t\tm[fn] = v\n\t\tfn = \"\"\n\t}\n\n\treturn m, nil\n}", "func make_dictionary(words []string) map[string]bool {\n\tdict := make(map[string]bool)\n\tfor _, word := range words {\n\t\tdict[word] = true\n\t}\n\treturn dict\n}", "func createParamsMap(params []string) *map[string]string {\n\n\tparamsMap := map[string]string{}\n\n\tfor _, param := range params {\n\t\tkeyval := strings.Split(param, \"=\")\n\t\tif len(keyval) == 0 || len(keyval) > 2 {\n\t\t\tcontinue // weird but skip\n\t\t}\n\t\tif len(keyval) == 1 {\n\t\t\tparamsMap[strings.TrimSpace(keyval[0])] = \"\" // no value\n\t\t\tcontinue\n\t\t}\n\t\tparamsMap[strings.TrimSpace(keyval[0])] = strings.TrimSpace(keyval[1])\n\t}\n\tif len(paramsMap) < 1 {\n\t\treturn nil\n\t}\n\treturn &paramsMap\n\n}", "func newMap() map[interface{}]interface{} {\n\treturn map[interface{}]interface{}{}\n}", "func mapFilter(\n\tm map[string]*dynamodb.AttributeValue,\n\tnames ...string,\n) (n map[string]*dynamodb.AttributeValue) {\n\tn = make(map[string]*dynamodb.AttributeValue)\n\tfor _, name := range names {\n\t\tif name == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, ok := m[name]; !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tn[name] = m[name]\n\t}\n\treturn\n}", "func (kvs KeyValues) Map() map[string]string {\n\tvar m = make(map[string]string)\n\tfor idx := range kvs {\n\t\tm[kvs[idx].Key] = kvs[idx].Value\n\t}\n\treturn m\n}", "func (set Int64Set) Map(fn func(int64) int64) Int64Set {\n\tresult := NewInt64Set()\n\n\tfor v := range set {\n result[fn(v)] = struct{}{}\n\t}\n\n\treturn result\n}", "func CreateMockMap() map[string]interface{} {\n\tm := make(map[string]interface{})\n\treturn m\n}", "func makeAttributes(tags map[string]interface{}) (map[string]pdata.AttributeValue, error) {\n\tm := make(map[string]pdata.AttributeValue, len(tags))\n\t// todo: attribute val as array?\n\tfor k, v := range tags {\n\t\tswitch val := v.(type) {\n\t\tcase int64:\n\t\t\tm[k] = pdata.NewAttributeValueInt(val)\n\t\tcase bool:\n\t\t\tm[k] = pdata.NewAttributeValueBool(val)\n\t\tcase string:\n\t\t\tm[k] = pdata.NewAttributeValueString(val)\n\t\tcase float64:\n\t\t\tm[k] = pdata.NewAttributeValueDouble(val)\n\t\tcase []byte:\n\t\t\tm[k] = pdata.NewAttributeValueBytes(val)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unknown tag type %T\", v)\n\t\t}\n\t}\n\treturn m, nil\n}", "func (v CMap) Set(key, value string) CMap {\n\tv[key] = []string{value}\n\treturn v\n}", "func (s *set) Map(f func(*Term) (*Term, error)) (Set, error) {\n\tset := NewSet()\n\terr := s.Iter(func(x *Term) error {\n\t\tterm, err := f(x)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tset.Add(term)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn set, nil\n}", "func (i *Input) FromMap(values map[string]interface{}) error {\n\tvar err error\n\tvar keys interface{}\n\tif keys, err = coerce.ToAny(values[\"keys\"]); err != nil {\n\t\treturn err\n\t}\n\tswitch v := keys.(type) {\n\tcase []interface{}:\n\t\tfor _, d := range v {\n\t\t\tk := strings.TrimSpace(d.(string))\n\t\t\tif len(k) > 0 {\n\t\t\t\ti.StateKeys = append(i.StateKeys, k)\n\t\t\t}\n\t\t}\n\tcase string:\n\t\ti.StateKeys = []string{strings.TrimSpace(v)}\n\t}\n\n\tvar orgs interface{}\n\tif orgs, err = coerce.ToAny(values[\"organizations\"]); err != nil {\n\t\treturn err\n\t}\n\tswitch v := orgs.(type) {\n\tcase []interface{}:\n\t\tfor _, d := range v {\n\t\t\tk := strings.TrimSpace(d.(string))\n\t\t\tif len(k) > 0 {\n\t\t\t\ti.Organizations = append(i.Organizations, k)\n\t\t\t}\n\t\t}\n\tcase string:\n\t\ti.Organizations = []string{strings.TrimSpace(v)}\n\t}\n\n\tif i.Policy, err = coerce.ToString(values[\"policy\"]); err != nil {\n\t\treturn err\n\t}\n\tif i.PrivateCollection, err = coerce.ToString(values[\"privateCollection\"]); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func NewMap(concurrency int) *Map {\n\trand.Seed(time.Now().UTC().UnixNano())\n\tm := &Map{\n\t\tconcurrency: concurrency,\n\t\tseed: uintptr(rand.Int63()),\n\t\tmutices: make([]sync.RWMutex, concurrency),\n\t\tducklings: make([]duckling, concurrency),\n\t}\n\tfor b := 0; b < concurrency; b++ {\n\t\tm.ducklings[b] = make(duckling, duckilingDefaultSize)\n\t}\n\treturn m\n}", "func NewFromMap(v map[string]interface{}) Object {\n\treturn v\n}", "func makeAttributes(attribs map[string]*sqs.MessageAttributeValue) Attributes {\n\tattributes := make([]Attribute, 0, len(attribs))\n\tfor k, v := range attribs {\n\t\tattributes = append(attributes, Attribute{Name: k, Value: *v.StringValue})\n\t}\n\ta := Attributes(attributes)\n\treturn a\n}", "func (qq Qualifiers) Map() map[string]string {\n\tm := make(map[string]string)\n\n\tfor i := 0; i < len(qq); i++ {\n\t\tk := qq[i].Key\n\t\tv := qq[i].Value\n\t\tm[k] = v\n\t}\n\n\treturn m\n}", "func ExampleMap_standard() {\n\tps := newPSetForTesting() // use paramset.NewOrPanic()\n\n\tvar m map[string]bool\n\tkeys := []string{\"x\", \"y\"}\n\n\tps.Add(\"my-map\", psetter.Map{Value: &m}, \"help text\")\n\n\tfmt.Println(\"Before parsing\")\n\tfor _, k := range keys {\n\t\tif v, ok := m[k]; ok {\n\t\t\tfmt.Printf(\"\\tm[%s] = %v\\n\", k, v)\n\t\t}\n\t}\n\tps.Parse([]string{\"-my-map\", \"x\"})\n\tfmt.Println(\"After parsing\")\n\tfor _, k := range keys {\n\t\tif v, ok := m[k]; ok {\n\t\t\tfmt.Printf(\"\\tm[%s] = %v\\n\", k, v)\n\t\t}\n\t}\n\t// Output:\n\t// Before parsing\n\t// After parsing\n\t//\tm[x] = true\n}", "func newMap(src *map[string]interface{}) map[string]interface{} {\n\tdst := make(map[string]interface{})\n\tif src == nil {\n\t\treturn dst\n\t}\n\tfor k, v := range *src {\n\t\tif strings.HasPrefix(k, \"_\") {\n\t\t\tcontinue\n\t\t}\n\t\tdst[k] = v\n\t}\n\treturn dst\n}", "func MakeParams(cmd *cobra.Command, params []GeneratorParam) map[string]interface{} {\n\tresult := map[string]interface{}{}\n\tfor ix := range params {\n\t\tf := cmd.Flags().Lookup(params[ix].Name)\n\t\tif f != nil {\n\t\t\tresult[params[ix].Name] = f.Value.String()\n\t\t}\n\t}\n\treturn result\n}", "func NewMap(size int) *Map {\n\tif size <= 0 {\n\t\tsize = runtime.GOMAXPROCS(0)\n\t}\n\tsplits := make([]Split, size)\n\tfor i := range splits {\n\t\tsplits[i].Map = make(map[interface{}]interface{})\n\t}\n\treturn &Map{splits}\n}", "func WorkWithMaps() {\n\t// create empty map with builtin `make`` function:\n\t// `make(map[key-type]val-type`\n\tm := make(map[string]int)\n\n\t// set key/value pairs\n\tm[\"k1\"] = 7\n\tm[\"k2\"] = 13\n\n\t// print all key/value pairs\n\tfmt.Println(\"map:\", m)\n\n\t// get value associated with key\n\tv1 := m[\"k1\"]\n\tfmt.Println(\"v1:\", v1)\n\n\t// get number of key/value pairs in a map\n\tfmt.Println(\"len:\", len(m))\n\n\t// remove key/value pairs from a map with builtin `delete` function\n\tdelete(m, \"k2\")\n\tfmt.Println(\"map:\", m)\n\n\t// optional second return value when getting a value from a map\n\t// indicates if the key was present in the map\n\t// use the blank identifier if you don't need the value\n\t_, prs := m[\"k2\"]\n\tfmt.Println(\"prs:\", prs)\n\n\t// declare and initialize a new map in one line\n\tn := map[string]int{\"foo\": 1, \"bar\": 2}\n\tfmt.Println(\"map:\", n)\n}", "func (s String) Map(f func(string) string) String {\n\tmapped := make(String)\n\tfor k := range s {\n\t\tmapped[f(k)] = yes\n\t}\n\treturn mapped\n\n}", "func execNewMap(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tret := types.NewMap(args[0].(types.Type), args[1].(types.Type))\n\tp.Ret(2, ret)\n}", "func New() Set {\n\treturn make(map[string]bool)\n}", "func Example_map() {\n\tvar set suffix.Set\n\n\t// tags will keep a list of tags for each possible match\n\ttags := make(map[suffix.Match][]string)\n\n\t// set up rules and add to set\n\trules := []struct {\n\t\tName string\n\t\tTag string\n\t}{\n\t\t// will match google.com and all subdomains\n\t\t{\"google.com\", \"google\"},\n\t\t// will match google.com exactly\n\t\t{\"google.com.\", \"toplevel\"},\n\t\t// will match only subdomains *.google.com\n\t\t{\".google.com\", \"subdomain\"},\n\t\t// will match api.google.com exactly\n\t\t{\"www.google.com.\", \"websearch\"},\n\t}\n\tfor _, rule := range rules {\n\t\tfor _, match := range set.Add(rule.Name) {\n\t\t\t// create mapping between each match;\n\t\t\t// matches are not unique between names, so we need to append.\n\t\t\ttags[match] = append(tags[match], rule.Tag)\n\t\t}\n\t}\n\n\t// get all tags for www.google.com\n\tset.MatchAll(\"www.google.com\", func(m suffix.Match) bool {\n\t\tfor _, tag := range tags[m] {\n\t\t\tfmt.Println(tag)\n\t\t}\n\t\treturn true\n\t})\n\n\t// Output:\n\t// websearch\n\t// google\n\t// subdomain\n}", "func (s *String) Map(f func(v Value) Value) Set {\n\tresult := NewSet()\n\tfor e := s.Enumerator(); e.MoveNext(); {\n\t\tresult = result.With(f(e.Current()))\n\t}\n\treturn result\n}", "func (m Map) Set(k Name, v interface{}) error {\n\tswitch v.(type) {\n\tcase nil:\n\t\tm[k] = New(k, nil) // use default value\n\tcase uint8:\n\t\tm[k] = New(k, []byte{v.(uint8)})\n\tcase int:\n\t\tm[k] = New(k, []byte{uint8(v.(int))})\n\tcase string:\n\t\tm[k] = New(k, []byte(v.(string)))\n\tcase []byte:\n\t\tm[k] = New(k, []byte(v.([]byte)))\n\tcase DeliverySetting:\n\t\tm[k] = New(k, []byte{uint8(v.(DeliverySetting))})\n\tcase Body:\n\t\tm[k] = v.(Body)\n\tcase pdutext.Codec:\n\t\tc := v.(pdutext.Codec)\n\t\tm[k] = New(k, c.Encode())\n\t\tif k == ShortMessage {\n\t\t\tm[DataCoding] = &Fixed{Data: uint8(c.Type())}\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported field data: %#v\", v)\n\t}\n\tif k == ShortMessage {\n\t\tm[SMLength] = &Fixed{Data: uint8(m[k].Len())}\n\t}\n\treturn nil\n}", "func (b UpdateBuilder) SetMap(clauses map[string]interface{}) UpdateCondition {\n\tkeys := make([]string, len(clauses))\n\ti := 0\n\tfor key := range clauses {\n\t\tkeys[i] = key\n\t\ti++\n\t}\n\tsort.Strings(keys)\n\tfor _, key := range keys {\n\t\tval, _ := clauses[key]\n\t\tb = b.Set(key, val).(UpdateBuilder)\n\t}\n\treturn b\n}", "func New(mds ...map[string]string) Metadata {\n\tmd := Metadata{}\n\tfor _, m := range mds {\n\t\tfor k, v := range m {\n\t\t\tmd.Set(k, v)\n\t\t}\n\t}\n\treturn md\n}", "func (mr MrImpl) Map(key, value string) (result []mapreduce.KeyValue) {\n\tvals := strings.Split(value, \" \")\n\tif len(vals) != 3 {\n\t\treturn\n\t}\n\n\tnumbers := strings.Split(vals[2], \",\")\n\tif len(numbers) != 6 {\n\t\tfmt.Printf(\"wrong lotto results format: %s\\n\", vals[2])\n\t\treturn\n\t}\n\n\tresult = make([]mapreduce.KeyValue, 0, 6)\n\tfor _, w := range numbers {\n\t\tresult = append(result, mapreduce.KeyValue{Key: w, Value: strconv.Itoa(1)})\n\t}\n\treturn\n}", "func New(vals ...interface{}) Set {\n\ts := &setImpl{\n\t\tset: make(map[interface{}]struct{}, 0),\n\t}\n\tfor _, i := range vals {\n\t\ts.Insert(i)\n\t}\n\treturn s\n}", "func New() StringSet {\n\treturn &mapStringSet{\n\t\tstorage: make(map[string]uint64),\n\t}\n}", "func (e *exprHelper) NewMap(entries ...ast.EntryExpr) ast.Expr {\n\treturn e.exprFactory.NewMap(e.nextMacroID(), entries)\n}", "func mapCreator2() {\n\n\tvar bootstrap2 = make(map[string]float64)\n\n\tbootstrap2[\"this is fun\"] = 123e9\n\n\tfmt.Println(bootstrap2)\n}", "func NewSet(timestamp Nanotime, values map[string]struct{}, source Source, tags Tags) Set {\n\treturn Set{Values: values, Timestamp: timestamp, Source: source, Tags: tags.Copy()}\n}", "func NewAttributeMap(kv ...string) (map[string]interface{}, error) {\n\tm := make(map[string]interface{}, 0)\n\tfor _, v := range kv {\n\t\tvv := strings.Split(v, \":\")\n\t\tif len(vv) != 2 {\n\t\t\treturn nil, errors.New(\"attribute not \\\"name:value\\\" pair: \" + v)\n\t\t}\n\t\t// attributes are stored as keys prepended with hyphen\n\t\tm[\"-\"+vv[0]] = interface{}(vv[1])\n\t}\n\treturn m, nil\n}", "func New(getkey func(value interface{}) interface{}, vtype string) *SSet {\n\tvar set SSet\n\tset.list = arraylist.New()\n\tset.m = hashmap.New()\n\tset.m_index = make(map[interface{}]int)\n\tset.f = getkey\n\tset.item_type = vtype\n\tset.createline = time.Now().Unix()\n\treturn &set\n}" ]
[ "0.77125484", "0.71437055", "0.68389386", "0.6703049", "0.66405773", "0.64570063", "0.6422006", "0.6218442", "0.6120103", "0.6069982", "0.6047246", "0.59845877", "0.5967449", "0.59444195", "0.58951205", "0.5894368", "0.5869038", "0.5853379", "0.5791264", "0.5786959", "0.57680655", "0.5666097", "0.5664267", "0.5652879", "0.5650242", "0.5611836", "0.5602836", "0.55755216", "0.55616486", "0.55439514", "0.5538398", "0.55313534", "0.5530038", "0.552788", "0.55255455", "0.5514836", "0.5491388", "0.5473114", "0.5459972", "0.5458538", "0.54487765", "0.5447464", "0.54304796", "0.5428946", "0.5408746", "0.53772897", "0.53669137", "0.53577447", "0.53252184", "0.53169936", "0.5312663", "0.531232", "0.53111315", "0.5307625", "0.529461", "0.5283975", "0.5282101", "0.52772796", "0.5275599", "0.52754533", "0.5268799", "0.52555907", "0.5245184", "0.523124", "0.5200408", "0.5196151", "0.5167699", "0.51666754", "0.5163744", "0.5163189", "0.5157083", "0.5150446", "0.5147969", "0.51370996", "0.51259977", "0.5119226", "0.5119059", "0.51130486", "0.5108652", "0.510038", "0.5090141", "0.50874525", "0.50848544", "0.5082294", "0.5076811", "0.50712216", "0.50697273", "0.5067313", "0.5066816", "0.5066305", "0.5066213", "0.5058055", "0.5052922", "0.50488365", "0.50438625", "0.50305814", "0.50286233", "0.50234526", "0.5014464", "0.5012897" ]
0.71328926
2
Len returns the number of entries in the directory.
func (d Dir) Len() int { return len(d.contents) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d DirEntries) Len() int { return len(d) }", "func (d DirInfos) Len() int {\n\treturn len(d)\n}", "func Size() int {\n\treturn len(directory)\n}", "func (d Directory) Size() int { return binary.Size(d) }", "func (s AllDirectoryData) Len() int {\n\treturn len(s.Data)\n}", "func (e DirectoryEntry) Size() int { return binary.Size(e) }", "func (l Logfiles) Len() int { return len(l) }", "func (p path) Len() int {\n\treturn len(p)\n}", "func (f FileInfos) Len() int {\n\treturn len(f)\n}", "func (paths Paths) Len() int {\n\treturn len(paths)\n}", "func (sc *Scavenger) Len() int {\n\tsc.mu.Lock()\n\tn := len(sc.entries)\n\tsc.mu.Unlock()\n\treturn n\n}", "func (p Path) Len() int {\n\treturn len(p)\n}", "func (p Path) Len() int {\n\treturn len(p)\n}", "func (r *Root) Len() uint64 {\n\treturn r.count\n}", "func (d *Dirent) Size() int {\n\treturn direntSize + len(d.Name) + 8\n}", "func (t *Tree) Len() int { return t.Count }", "func (l *DList) Len() int { return l.n }", "func (sp crSortedPaths) Len() int {\n\treturn len(sp)\n}", "func (s *Store) Len(ctx context.Context) (int64, error) {\n\tvar nb int64\n\tif err := s.List(ctx, \"\", func(string) error {\n\t\tnb++\n\t\treturn nil\n\t}); err != nil {\n\t\treturn 0, err\n\t}\n\treturn nb, nil\n}", "func (files FilesByDate) Len() int {\n\treturn len(files)\n}", "func (t *Tree) Len() int {\n\treturn t.Count\n}", "func (h Handle) Len() int {\n\tl := 8 + 8 + 4 + len(h.Type) + len(h.Name)\n\tif h.MD != nil {\n\t\tswitch h.MD.(type) {\n\t\tcase *AlpcPortInfo:\n\t\t\tl += 16\n\t\tcase *MutantInfo:\n\t\t\tl += 5\n\t\tcase *FileInfo:\n\t\t\tl++\n\t\t}\n\t}\n\treturn l\n}", "func (m orderedMounts) Len() int {\n\treturn len(m)\n}", "func (s *FileSet) Len() int {\n\treturn len(s.files)\n}", "func (shortlist *Shortlist) Len() int {\n\tlength := 0\n\tfor _, entry := range shortlist.Entries {\n\t\tif entry != nil {\n\t\t\tlength++\n\t\t}\n\t}\n\treturn length\n}", "func (n Name) Len() int {\n\tvar length int\n\tfor _, l := range n {\n\t\tlength += l.Len()\n\t}\n\treturn length\n}", "func (e *BackupEnv) GetDirSize(source string) (int64) {\n directory, _ := os.Open(source);\n var sum int64 = 0;\n defer directory.Close();\n\n objects, _ := directory.Readdir(-1)\n for _, obj := range objects {\n if obj.IsDir() {\n sum += e.GetDirSize(source + \"/\" + obj.Name());\n } else {\n stat, _ := os.Stat(source + \"/\" + obj.Name());\n sum += stat.Size();\n }\n }\n\n return sum;\n}", "func (l LDAPDN) size() int {\n\treturn LDAPString(l).size()\n}", "func (lm *LevelMetadata) Len() int {\n\treturn lm.tree.Count()\n}", "func (list *ConcurrentFileMetaDataList) Length() int {\n\treturn len(list.files)\n}", "func (j *_Journal) Length() int {\n\treturn len(j.entries)\n}", "func (s *runLogEntrySorter) Len() int {\n\treturn len(s.entries)\n}", "func (f *AnonymousFile) Len() int {\n\treturn len(f.contents)\n}", "func (d *digest) Len() int64 {\n\treturn int64(d.len)\n}", "func (c *Cache) Len() int {\n\treturn len(c.entries)\n}", "func (bl NamedRepositoryList) Len() int { return len(bl) }", "func (fs *FileSystem) Len(queryCustom ...string) (l int, err error) {\n\t// prepare statement\n\tquery := \"SELECT COUNT(name) FROM FS\"\n\tif len(queryCustom) > 0 {\n\t\tquery = queryCustom[0]\n\t}\n\tstmt, err := fs.db.Prepare(query)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"preparing query: \"+query)\n\t\treturn\n\t}\n\n\tdefer stmt.Close()\n\trows, err := stmt.Query()\n\tif err != nil {\n\t\terr = errors.Wrap(err, query)\n\t\treturn\n\t}\n\n\t// loop through rows\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\terr = rows.Scan(&l)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"getRows\")\n\t\t\treturn\n\t\t}\n\t}\n\terr = rows.Err()\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"getRows\")\n\t}\n\treturn\n}", "func (s papertrailsByName) Len() int { return len(s) }", "func (i *Item) Size() int64 { return int64(i.directoryEntry.FileSize) }", "func (a *BooleanArchive) Len() int {\n\treturn a.size\n}", "func (lc *LruCache) Len() uint {\n\treturn lc.LruStore.Len()\n}", "func (storage *Storage) Len() (n int) {\n\tstorage.mutex.Lock()\n\tn = storage.lruList.Len()\n\tstorage.mutex.Unlock()\n\treturn\n}", "func (g *gnmiPath) Len() int {\n\tif g.isStringSlicePath() {\n\t\treturn len(g.stringSlicePath)\n\t}\n\treturn len(g.pathElemPath)\n}", "func Size(targetDir string) (int64, error) {\n\tfm, err := Walk(targetDir)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tvar size int64\n\tfor _, v := range fm {\n\t\tsize += v.Size()\n\t}\n\treturn size, nil\n}", "func DirCount(dir string) int {\n\tif !DirExists(dir) {\n\t\treturn 0\n\t}\n\tentries, err := os.ReadDir(dir)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn len(entries)\n}", "func (p *PakRun) Len() int\t\t{ return len(p.Files) }", "func (fs *FileSet) Length() int {\n\tsum := 0\n\tfor _, c := range fs.fileChannels {\n\t\tsum += len(c)\n\t}\n\treturn sum\n}", "func (ne nodeEntries) Len() int { return len(ne) }", "func (ac *AuthContext) Len() int {\n\tl := 1\n\tif ac.Parent != nil {\n\t\tl += ac.Parent.Len()\n\t}\n\treturn l\n}", "func (this byDmnInfo) Len() int {\n\treturn len(this)\n}", "func (path *Path) Length() int {\n\treturn path.length\n}", "func (fs Fruits) Len() int { return len(fs) }", "func (l *SList) Len() int { return l.n }", "func (l *Log) Len() int64 {\n\treturn l.count\n}", "func (r RelativeLDAPDN) size() int {\n\treturn LDAPString(r).size()\n}", "func (args *Args) len() int {\n\treturn len(args.items)\n}", "func (u URI) size() int {\n\treturn LDAPString(u).size()\n}", "func (r *aggloSorter) Len() int {\n\treturn len(r.perm)\n}", "func (t *Trie) Len() uint32 {\n\tif t.root == nil {\n\t\treturn 0\n\t}\n\treturn t.root.count\n}", "func (counter *Counter) Len() int {\n\tcounter.mutex.Lock()\n\tdefer counter.mutex.Unlock()\n\treturn len(counter.entries)\n}", "func (t AlterReplicaLogDir34) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.String(t.Path) // Path\n\tsz += sizeof.ArrayLength // Topics\n\tfor i := len(t.Topics) - 1; i >= 0; i-- {\n\t\tsz += t.Topics[i].Size(version)\n\t}\n\treturn sz\n}", "func (fib *Fib) Len() int {\n\treturn fib.tree.CountEntries()\n}", "func (s *Store) Len() int {\n\tcnt := 0\n\tdl := len(s.delim)\n\tfor i := range s.lines {\n\t\tcnt += s.lines[i].Len() + dl\n\t}\n\treturn cnt\n}", "func (s fqnSorter) Len() int {\n\treturn len(s)\n}", "func (l *List) Len() int {\n return l.size\n}", "func (c *Cache) Len() int {\n\tif c == nil {\n\t\treturn 0\n\t}\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\treturn c.lru.Len() + c.mfa.Len()\n}", "func (c *LRU) Len() int {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\treturn c.ll.Len()\n}", "func (l List) Length() (length uint) {\n\tcurrent := l.Root\n\n\tfor current != nil {\n\t\tlength++\n\t\tcurrent = current.Next\n\t}\n\treturn\n}", "func (r *Reader) Len() int {\n\tif r.file_v0 != nil {\n\t\treturn r.file_v0.Len()\n\t}\n\treturn int(r.header.num)\n}", "func (lr *logRecorder) Len() int {\n\tlr.mutex.Lock()\n\tdefer lr.mutex.Unlock()\n\n\treturn len(lr.entries)\n}", "func (c *DirentCache) Size() uint64 {\n\tif c == nil {\n\t\treturn 0\n\t}\n\tc.mu.Lock()\n\tsize := c.currentSize\n\tc.mu.Unlock()\n\treturn size\n}", "func (lru *KeyLRU) Len() int {\n\treturn len(lru.m)\n}", "func (lru *LRU) Len() int {\n\treturn lru.list.Len()\n}", "func (idx *Tree) Len() (count int) {\n\tidx.Stop()\n\tcount = int(idx.liveObjects)\n\tfor _, a := range idx.allocators {\n\t\tcount += int(a.itemCounter)\n\t}\n\tidx.Start()\n\treturn\n}", "func (f *fakeOpa) Len() int {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\treturn len(f.data)\n}", "func (i *Inode) Size() int64 {\n\tif i.IsDir() {\n\t\treturn 0\n\t}\n\treturn i.dataStore.GetSize(i.path)\n}", "func (c *Cache) Len() int {\r\n\tc.Lock()\r\n\tdefer c.Unlock()\r\n\r\n\tif c.cache == nil {\r\n\t\treturn 0\r\n\t}\r\n\treturn c.ll.Len()\r\n}", "func (s *Store) Len() int {\n\ts.access.RLock()\n\tdefer s.access.RUnlock()\n\n\treturn len(s.data)\n}", "func (r *Reader) Len() (length int) {\n\t// Lazy open.\n\terr := r.open()\n\tif err != nil {\n\t\treturn\n\t}\n\tn, _ := r.len()\n\tlength = int(n)\n\treturn\n}", "func (c *Cache) Len() int {\n\treturn c.ll.Len()\n}", "func (h CRConfigHistoryThreadsafe) Len() uint64 {\n\tif h.length == nil {\n\t\treturn 0\n\t}\n\treturn *h.length\n}", "func Len(scope common.Scope, args ...interface{}) interface{} {\n\tif s, ok := args[0].(string); ok {\n\t\treturn int64(len(s))\n\t}\n\treturn 0\n}", "func (a byCount) Len() int { return len(a) }", "func (tt *TtTable) Len() uint64 {\n\treturn tt.numberOfEntries\n}", "func (c *Cache) Length(k string) int {\n\treturn len(c.entries[k])\n}", "func (f *IndexFile) Size() int64 { return int64(len(f.data)) }", "func (radius *RADIUS) Len() (int, error) {\n\tn := radiusMinimumRecordSizeInBytes\n\tfor _, v := range radius.Attributes {\n\t\talen, err := attributeValueLength(v.Value)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tn += int(alen) + 2 // Added Type and Length\n\t}\n\treturn n, nil\n}", "func (ds *KVStorage) Len() int {\n\treturn len(ds.data)\n}", "func (ms *MultiSorter) Len() int {\n\treturn len(ms.films)\n}", "func (b *BTree) Len() int {\n\tb.mtx.Lock()\n\tdefer b.mtx.Unlock()\n\n\treturn int(b.rootNod.GetLength())\n}", "func (l *List) Len() int {\n\treturn l.size\n}", "func (s *shard) len() uint64 {\n\ts.rwMutex.RLock()\n\tlength := uint64(len(s.entryIndexes))\n\ts.rwMutex.RUnlock()\n\n\treturn length\n}", "func (n Nodes) Len() int", "func (registry *Registry) Len() int {\n\tregistry.lock.RLock()\n\tdefer registry.lock.RUnlock()\n\treturn len(registry.db)\n}", "func (o openList) Len() int {\n\treturn len(o)\n}", "func (sl *List) Len() int { return sl.len }", "func (l DNA8List) Len() int { return len(l) }", "func (ms *multiSorter) Len() int {\n\treturn len(ms.usage)\n}", "func (l *List) Len() int { return l.len }", "func (s SortedNamespaces) Len() int {\n\treturn len(s)\n}" ]
[ "0.8751995", "0.7923306", "0.771949", "0.7581758", "0.74930316", "0.7462367", "0.7380523", "0.72741103", "0.7082246", "0.6989405", "0.6983188", "0.6956884", "0.6956884", "0.69349474", "0.69129854", "0.69084084", "0.6898496", "0.6883263", "0.68695086", "0.6865111", "0.68371457", "0.6818645", "0.6807195", "0.67518127", "0.6745181", "0.67440736", "0.67346895", "0.67236733", "0.67159337", "0.66855246", "0.6678051", "0.6624784", "0.6618561", "0.6616085", "0.6613301", "0.6602759", "0.6602399", "0.6600046", "0.6598689", "0.65849084", "0.65814483", "0.65454954", "0.6542566", "0.6536563", "0.65345937", "0.6525994", "0.6510184", "0.6504921", "0.65021974", "0.6501974", "0.6500714", "0.6490132", "0.64882857", "0.6486678", "0.6485773", "0.6471449", "0.6470664", "0.6461963", "0.6456951", "0.642212", "0.6421646", "0.6404419", "0.64038527", "0.6402329", "0.64001375", "0.6394848", "0.6384847", "0.6370707", "0.6369696", "0.6366728", "0.63489676", "0.6344985", "0.63437134", "0.6343403", "0.6342366", "0.63420177", "0.63375765", "0.6335488", "0.63336235", "0.63266295", "0.63182306", "0.63164556", "0.6315529", "0.6312988", "0.6312708", "0.6304693", "0.6304306", "0.6303334", "0.6302573", "0.63016945", "0.62992597", "0.6293577", "0.62873197", "0.62848926", "0.6271983", "0.6266003", "0.6261291", "0.6261163", "0.6251643", "0.62442935" ]
0.8492832
1
Set sets the directory's entry for the provided path. Set overwrites any previous file set at path.
func (d *Dir) Set(path string, file reflow.File) { if d.contents == nil { d.contents = make(map[string]reflow.File) } d.contents[path] = file }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *IndexFile) SetPath(path string) { f.path = path }", "func (l *DirName) Set(dir string) error {\n\tif len(dir) > 0 && dir[0] == '~' {\n\t\treturn fmt.Errorf(\"log directory cannot start with '~': %s\", dir)\n\t}\n\tif len(dir) > 0 {\n\t\tabsDir, err := filepath.Abs(dir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdir = absDir\n\t}\n\tl.Lock()\n\tdefer l.Unlock()\n\tl.name = dir\n\treturn nil\n}", "func (l *LogFile) Set(path string) {\n\tl.Path = filepath.Join(path, logsFile)\n}", "func (f *LogFile) SetPath(path string) { f.path = path }", "func SetPath(p string) {\n\tcurrentPath = \"\"\n\tbeginPath = p\n\tdirsAmount = 0\n}", "func SetPath(path string) {\n\tc.setPath(path)\n}", "func SetPath(p string) error {\n\tif info, err := os.Stat(p); err != nil {\n\t\treturn err\n\t} else if !info.IsDir() {\n\t\treturn fmt.Errorf(\"path for persistence is not directory\")\n\t}\n\tdataPath = p\n\treturn nil\n}", "func SetPath(newpath string) {\n\tpath = newpath\n}", "func (z *ZkPlus) Set(path string, data []byte, version int32) (*zk.Stat, error) {\n\tz.forPath(path).Log(logkey.ZkMethod, \"Set\")\n\treturn z.blockOnConn().Set(z.realPath(path), data, version)\n}", "func (m *Win32LobAppFileSystemDetection) SetPath(value *string)() {\n err := m.GetBackingStore().Set(\"path\", value)\n if err != nil {\n panic(err)\n }\n}", "func (kvs *FS) Set(key string, value []byte) error {\n\treturn lockedfile.Write(kvs.filename(key), bytes.NewReader(value), 0600)\n}", "func (c *FileConfigReader) SetPath(path string) {\n\tc.path = path\n}", "func (mzk *MockZK) Set(path string, data []byte, version int32) (*zk.Stat, error) {\n\tmzk.Args = append(mzk.Args, []interface{}{\n\t\t\"set\",\n\t\tpath,\n\t\tdata,\n\t\tversion,\n\t})\n\treturn mzk.SetFn(path, data, version)\n}", "func (_m *requestHeaderMapUpdatable) SetPath(path string) {\n\t_m.Called(path)\n}", "func (fs *FSCache) Set(key string, content []byte) error {\n\treturn ioutil.WriteFile(\n\t\tpath.Join(fs.Root, key),\n\t\tcontent,\n\t\t0600,\n\t)\n}", "func (c *Client) SetPath(path string) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tc.path = path\n}", "func SetPackagePath(l *lua.LState, dir string) error {\n\treturn SetPackagePathRaw(l, PackagePath(dir))\n}", "func (v Values) SetAtPath(path string, value interface{}) error {\n\tsegs := strings.Split(path, \".\")\n\terr := v.setAtPath(segs, value)\n\tif err != nil {\n\t\treturn errors.Errorf(\"error adding value at path %q: %s\", path, err)\n\t}\n\treturn nil\n}", "func (pm *PathMap) set(path string, value interface{}) {\n\tparts := strings.Split(path, svnSep)\n\tdir, name := parts[:len(parts)-1], parts[len(parts)-1]\n\tpm._createTree(dir).blobs[name] = value\n}", "func (r *Input) SetPath(path string) error {\n\tquery, err := fetch.Parse(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Lock()\n\tr.Path = query\n\tr.Unlock()\n\treturn nil\n}", "func (j Json) Set(path string, value interface{}) (string, error) {\n\treturn sjson.Set(string(j), path, value)\n}", "func (c *Config) SetPath(path string) error {\n\tvar (\n\t\tisDir = false\n\t\trealPath = \"\"\n\t)\n\tif file := gres.Get(path); file != nil {\n\t\trealPath = path\n\t\tisDir = file.FileInfo().IsDir()\n\t} else {\n\t\t// Absolute path.\n\t\trealPath = gfile.RealPath(path)\n\t\tif realPath == \"\" {\n\t\t\t// Relative path.\n\t\t\tc.searchPaths.RLockFunc(func(array []string) {\n\t\t\t\tfor _, v := range array {\n\t\t\t\t\tif path, _ := gspath.Search(v, path); path != \"\" {\n\t\t\t\t\t\trealPath = path\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t\tif realPath != \"\" {\n\t\t\tisDir = gfile.IsDir(realPath)\n\t\t}\n\t}\n\t// Path not exist.\n\tif realPath == \"\" {\n\t\tbuffer := bytes.NewBuffer(nil)\n\t\tif c.searchPaths.Len() > 0 {\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"[gcfg] SetPath failed: cannot find directory \\\"%s\\\" in following paths:\", path))\n\t\t\tc.searchPaths.RLockFunc(func(array []string) {\n\t\t\t\tfor k, v := range array {\n\t\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"\\n%d. %s\", k+1, v))\n\t\t\t\t}\n\t\t\t})\n\t\t} else {\n\t\t\tbuffer.WriteString(fmt.Sprintf(`[gcfg] SetPath failed: path \"%s\" does not exist`, path))\n\t\t}\n\t\terr := gerror.New(buffer.String())\n\t\tif errorPrint() {\n\t\t\tglog.Error(err)\n\t\t}\n\t\treturn err\n\t}\n\t// Should be a directory.\n\tif !isDir {\n\t\terr := fmt.Errorf(`[gcfg] SetPath failed: path \"%s\" should be directory type`, path)\n\t\tif errorPrint() {\n\t\t\tglog.Error(err)\n\t\t}\n\t\treturn err\n\t}\n\t// Repeated path check.\n\tif c.searchPaths.Search(realPath) != -1 {\n\t\treturn nil\n\t}\n\tc.jsonMap.Clear()\n\tc.searchPaths.Clear()\n\tc.searchPaths.Append(realPath)\n\tintlog.Print(context.TODO(), \"SetPath:\", realPath)\n\treturn nil\n}", "func (j *JSONData) SetPath(v interface{}, path ...string) error {\n\tjson, err := sj.NewJson(j.data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tjson.SetPath(path, v)\n\tbt, err := json.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\tj.data = bt\n\treturn nil\n}", "func (b *Bucket) SetMetadataAtPath(pth string, md Metadata) {\n\tif b.Version == 0 {\n\t\treturn\n\t}\n\n\tx, ok := b.Metadata[pth]\n\tif ok {\n\t\tif md.Key != \"\" {\n\t\t\tx.Key = md.Key\n\t\t}\n\t\tif md.Roles != nil {\n\t\t\tx.Roles = md.Roles\n\t\t}\n\t\tif x.Info == nil {\n\t\t\tx.Info = md.Info\n\t\t} else if md.Info != nil {\n\t\t\tmergemap.Merge(x.Info, md.Info)\n\t\t}\n\t\tx.UpdatedAt = md.UpdatedAt\n\t\tb.Metadata[pth] = x\n\t} else {\n\t\tif md.Roles == nil {\n\t\t\tmd.Roles = make(map[did.DID]Role)\n\t\t}\n\t\tif md.Info == nil {\n\t\t\tmd.Info = make(map[string]interface{})\n\t\t}\n\t\tb.Metadata[pth] = md\n\t}\n}", "func (c *Cookie) SetPath(path string) {\n\tc.path = path\n}", "func (c *FileSystemCache) Set(data []byte, expire time.Duration, key ...string) error {\n\tif len(key) < 1 {\n\t\treturn fmt.Errorf(\"no key specified\")\n\t}\n\tfolder := c.keyPath(key[:len(key)-1]...)\n\t_, err := os.Stat(folder)\n\tif os.IsNotExist(err) {\n\t\tos.MkdirAll(folder, 0700)\n\t}\n\tif expire > 0 {\n\t\texpireTS := time.Now().Add(expire).UnixNano()\n\t\terr := ioutil.WriteFile(c.expirePath(key...), []byte(strconv.FormatInt(expireTS, 10)), 0600)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ioutil.WriteFile(c.keyPath(key...), data, 0600)\n}", "func SetPath(permissions string) error {\n\tif permissions != \"default\" {\n\t\tpl, err := NewPermissionsLoader(permissions)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif globalPermissions != nil {\n\t\t\tglobalPermissions.Close()\n\t\t}\n\t\tglobalPermissions = pl\n\t\tif !pl.Get().Watch {\n\t\t\tglobalPermissions.Close() // This will still keep the permissions themselves in memory\n\t\t}\n\n\t} else {\n\t\tif globalPermissions != nil {\n\t\t\tglobalPermissions.Close()\n\t\t}\n\t\tglobalPermissions = nil\n\t}\n\treturn nil\n}", "func (c *Quago) setPath(path string) {\n\tc.path = path\n}", "func (c *FakeZkConn) Set(path string, data []byte, version int32) (*zk.Stat, error) {\n\tc.history.addToHistory(\"Set\", path, data, version)\n\treturn nil, nil\n}", "func (c *CmdReal) SetPath(path string) {\n\tc.cmd.Path = path\n}", "func (m *Resource) SetPath(name string) error {\n\tif len(m.name) > 0 {\n\t\treturn errors.New(\"name already set\")\n\t}\n\tname = strings.TrimSpace(strings.ToLower(s))\n\tmatched, err := regexp.MatchString(\"^[a-z]{4,15}$\", name)\n\tif err == nil && matched {\n\t\tm.name = name\n\t\treturn nil\n\t} else {\n\t\treturn errors.New(\"name does not match requirements and has not been set: must be ^[a-z]{4-15}$\")\n\t}\n}", "func (k *Item) SetPath(s string) {\n\tk.SetString(PathKey, s)\n}", "func (t *T) Set(key, value string) error {\n\tif _, err := t.file.WriteString(fmt.Sprintf(\"\\n%s,%s\", key, value)); err != nil {\n\t\treturn err\n\t}\n\tt.links[key] = value\n\treturn nil\n}", "func (b *Binary) SetPath(pathStr string) {\n\tif pathStr == \"\" {\n\t\treturn\n\t}\n\tref, err := url.Parse(pathStr)\n\tif err != nil {\n\t\treturn\n\t}\n\tb.url = b.url.ResolveReference(ref)\n}", "func (c *clientHandler) SetPath(path string) {\n\tc.path = path\n}", "func (self *Path) SetPath(src string, des string) {\n\tself.srcPath = src\n\tself.desPath = des\n}", "func (c *Config) Set(key string, value interface{}) {\n\tif key == \"\" {\n\t\treturn\n\t}\n\n\tpath := strings.Split(key, c.separator)\n\tinsert(c.mp, path, value)\n}", "func (c DirCollector) Set(k string, v *VDir) {\n\tc[k] = v\n}", "func (hook *StackHook) SetLogPath(path string) {\n\thook.lock.Lock()\n\tdefer hook.lock.Unlock()\n\thook.path = path\n\thook.writeToFile = true\n}", "func (c *FileCache) Set(key string, entity *CacheEntity) error {\n\tpath := c.GetCachePath(key)\n\tif err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {\n\t\treturn errors.Wrap(err, \"failed to create directory for cache file\")\n\t}\n\n\t// Need to avoid race condition\n\tfile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to open/create a cache file\")\n\t}\n\tdefer file.Close()\n\n\tf := bufio.NewWriter(file)\n\tdefer f.Flush()\n\tenc := gob.NewEncoder(f)\n\tif err = enc.Encode(entity); err != nil {\n\t\treturn errors.Wrap(err, \"failed to encode Entity via gob\")\n\t}\n\n\treturn nil\n}", "func (bam BlockAddressMap) Set(loc BlockLocation, path string) {\n\tif bam[loc.FileIndex] == nil {\n\t\tbam[loc.FileIndex] = make(map[int64]string)\n\t}\n\tbam[loc.FileIndex][loc.BlockIndex] = path\n}", "func (fm *FileMapMutex) Set(UUID string, file File) {\n\tfm.mu.Lock()\n\tdefer fm.mu.Unlock()\n\tfm.Files[UUID] = file\n}", "func (fs *FileStore) Set(key string, w io.WriterTo) error {\n\tkey = fs.mangleKey(key, true)\n\tvar (\n\t\tf *os.File\n\t\terr error\n\t)\n\tif fs.tmpDir != \"\" {\n\t\tf, err = os.CreateTemp(fs.tmpDir, \"keystore\")\n\t} else {\n\t\tf, err = os.Create(filepath.Join(fs.baseDir, key))\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error opening file for writing: %w\", err)\n\t}\n\tif _, err = w.WriteTo(f); err != nil && err != io.EOF {\n\t\tf.Close()\n\t\treturn fmt.Errorf(\"error writing to file: %w\", err)\n\t} else if err = f.Close(); err != nil {\n\t\treturn fmt.Errorf(\"error closing file: %w\", err)\n\t}\n\tif fs.tmpDir != \"\" {\n\t\tfp := f.Name()\n\t\tif err = os.Rename(fp, filepath.Join(fs.baseDir, key)); err != nil {\n\t\t\tos.Remove(fp)\n\t\t\treturn fmt.Errorf(\"error moving tmp file: %w\", err)\n\t\t}\n\t}\n\treturn nil\n}", "func (p *Bucket) SetPath(path string) {\n\tp.path = path\n}", "func (i *Filename) Set(filename string) error {\n\tAssertIfFileExists(filename)\n\t*i = Filename(filename)\n\treturn nil\n}", "func (d *DiskStorage) Set(e Entry) {\n\td.Open()\n\td.memStorage.Set(e)\n\td.Save()\n}", "func (fcs *etcdStruct) Set(key, val string) error {\n\tif len(key) == 0 {\n\t\treturn ErrStoreKeyRequired\n\t}\n\n\t// counting on dotsToSlash to add initial '/' if necessary\n\tkey = dotsToSlash(key)\n\n\t_, err := fcs.client.Put(context.Background(), fcs.prefix+key, val)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (l *Level) Set(value string) error {\n\tv, err := strconv.Atoi(value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*l = Level(v)\n\treturn nil\n}", "func (m *FileMutation) SetPath(s string) {\n\tm._path = &s\n}", "func setPath(value string) {\n\tos.Setenv(pathEnvVar, value)\n}", "func (this *Value) SetPath(path string, val interface{}) {\n\n\tif this.parsedType == OBJECT {\n\t\tswitch parsedValue := this.parsedValue.(type) {\n\t\tcase map[string]*Value:\n\t\t\t// if we've already parsed the object, store it there\n\t\t\tswitch val := val.(type) {\n\t\t\tcase *Value:\n\t\t\t\tparsedValue[path] = val\n\t\t\tdefault:\n\t\t\t\tparsedValue[path] = NewValue(val)\n\t\t\t}\n\t\tcase nil:\n\t\t\t// if not store it in alias\n\t\t\tif this.alias == nil {\n\t\t\t\tthis.alias = make(map[string]*Value)\n\t\t\t}\n\t\t\tswitch val := val.(type) {\n\t\t\tcase *Value:\n\t\t\t\tthis.alias[path] = val\n\t\t\tdefault:\n\t\t\t\tthis.alias[path] = NewValue(val)\n\t\t\t}\n\n\t\t}\n\t}\n}", "func Set(object *astext.Object, path []string, value ast.Node) error {\n\tif len(path) == 0 {\n\t\treturn errors.New(\"path was empty\")\n\t}\n\n\tcurObj := object\n\n\tfor i, k := range path {\n\t\tfield, err := findField(curObj, k)\n\t\tif err != nil {\n\t\t\tswitch err.(type) {\n\t\t\tdefault:\n\t\t\t\treturn err\n\t\t\tcase *unknownField:\n\t\t\t\tfield, err = astext.CreateField(k)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.Hide = ast.ObjectFieldInherit\n\t\t\t\tcurObj.Fields = append(curObj.Fields, *field)\n\t\t\t}\n\t\t}\n\n\t\tif i == len(path)-1 {\n\t\t\tfield, _ = findField(curObj, k)\n\t\t\tif canUpdateObject(field.Expr2, value) {\n\t\t\t\treturn errors.New(\"can't set object to non object\")\n\t\t\t}\n\t\t\tfield.Expr2 = value\n\t\t\treturn nil\n\t\t}\n\n\t\tif field.Expr2 == nil {\n\t\t\tcurObj = &astext.Object{}\n\t\t\tfield.Expr2 = curObj\n\t\t} else if obj, ok := field.Expr2.(*astext.Object); ok {\n\t\t\tcurObj = obj\n\t\t} else {\n\t\t\treturn errors.Errorf(\"child is not an object at %q\", k)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (st *Store) Set(key, value []byte) error {\n\tst.lock.Lock()\n\tdefer st.lock.Unlock()\n\tif err := st.beforeSet(key, value); err != nil {\n\t\treturn err\n\t}\n\tif st.compression {\n\t\tvalue = compress(value)\n\t}\n\terr := st.store.Put(key, value, ds.ItemOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn st.writeToLogFile(key, value)\n}", "func (linux *Linux) FileSet(filePath, content string) error {\n\treturn ioutil.WriteFile(linux.applyChroot(filePath), []byte(content), 0777)\n}", "func SetConfig(file string) {\n\tPath.configFile = file\n}", "func (c *walkerContext) setCurrentDir(path string) {\n\tdirpath := filepath.Dir(path)\n\tif path != c.current.Path && dirpath != c.current.Path {\n\t\t// We have descended into a new directory, so set current to the\n\t\t// new entry.\n\t\tdir, ok := c.all[dirpath]\n\t\tif !ok {\n\t\t\tpanic(fmt.Sprintf(\"Fatal: Could not find directory: %s\", dirpath))\n\t\t}\n\t\tc.current = dir\n\t}\n}", "func (m *MetaSpec) Set(key string, value string) error {\n\tif m.IsExternal() {\n\t\treturn errors.New(\"can only meta set current build meta\")\n\t}\n\tmetaFilePath := m.MetaFilePath()\n\tvar previousMeta map[string]interface{}\n\n\tmetaJSON, err := ioutil.ReadFile(metaFilePath)\n\t// Not exist directory\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t\t_, err := m.SetupDir()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Initialize interface if first setting meta\n\t\tpreviousMeta = make(map[string]interface{})\n\t} else {\n\t\t// Exist meta.json\n\t\tif len(metaJSON) != 0 {\n\t\t\terr = json.Unmarshal(metaJSON, &previousMeta)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\t// Exist meta.json but it is empty\n\t\t\tpreviousMeta = make(map[string]interface{})\n\t\t}\n\t}\n\n\tkey, parsedValue := setMetaValueRecursive(key, value, previousMeta, m.JSONValue)\n\tpreviousMeta[key] = parsedValue\n\n\tresultJSON, err := json.Marshal(previousMeta)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(metaFilePath, resultJSON, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (fc *fileCache) Set(key, value string, ttl int) {\n\tfc.cache.Set(key, &cacheObject{\n\t\tValue: value,\n\t\tTimestamp: time.Now().Unix(),\n\t\tTTL: ttl,\n\t})\n\tfc.dirty = true\n}", "func (o *FileInfoCreateParams) SetPath(path string) {\n\to.Path = path\n}", "func (o *GetNdmpSettingsVariableParams) SetPath(path *string) {\n\to.Path = path\n}", "func (l *DolceLog) Set(key string, value []byte) {\n\tl.logMutex.Lock()\n\tdefer l.logMutex.Unlock()\n\n\twr := bufio.NewWriter(l.file)\n\t_, err := fmt.Fprintf(wr, \"%d S %s %s\\n\", l.index, key, value)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tl.index++\n\n\terr = wr.Flush()\n\tif err != nil {\n\t\tlog.Fatal(\"Data not writen in file.\")\n\t}\n}", "func (db *DB) Set(key, value string) error {\n\tif db.parent != nil {\n\t\treturn db.parent.Set(path.Join(db.scope, key), value)\n\t}\n\tp := NewPipeline(db.repo)\n\tnewTree, err := p.Base(db.tree).Set(path.Join(db.scope, key), value).Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdb.tree = newTree\n\treturn nil\n}", "func (path P) Set(obj, value interface{}) error {\n\tfn := func(p P, ctx *Context) (bool, error) {\n\t\treturn false, set(p, ctx, reflect.ValueOf(value))\n\t}\n\n\treturn path.Apply(obj, &Context{CreateIfMissing: true, Fn: fn})\n}", "func (p *Paths) Set(value string) error {\n\tfor _, val := range strings.Split(value, \",\") {\n\t\t*p = append(*p, val)\n\t}\n\treturn nil\n}", "func (o *GetZippedParams) SetPath(path string) {\n\to.Path = path\n}", "func (u *URL) SetPath(path string) {\n\tu.URL.Path = path\n}", "func (c *EnvConfigFromFile) Set(value string) error {\n\tc.FilePath = value\n\tvar fileContents []byte\n\tvar err error\n\tif c.FilePath != \"\" {\n\t\tfileContents, err = ioutil.ReadFile(c.FilePath)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"envconfigfromfile: %s\", err.Error())\n\t\t} else {\n\t\t\tc.Value = string(fileContents)\n\t\t}\n\t}\n\treturn err\n}", "func (v *IADsNameTranslate) Set(adsPath string, setType uint32) (err error) {\n\treturn ole.NewError(ole.E_NOTIMPL)\n}", "func (s *AccessRuleService) Set(path string, permission api.Permission, accountName string) (*api.AccessRule, error) {\n\treturn s.Setter.Set(path, permission, accountName)\n}", "func (t *PathTree) Put(path string, value interface{}) interface{} {\n\treturn t.put(treePaths(path), value)\n}", "func SetDir(dir string) {\n\tif dir == \"\" {\n\t\tlog.Warn(\"empty-dir\")\n\t\treturn\n\t}\n\twritingDir = dir\n\tlog.Info(\"set-dir\", \"dir\", dir)\n\tos.MkdirAll(dir, os.ModePerm)\n}", "func (c SplitSize) Set(path string, object Object) error {\n\tobjectSize := len(object.Data)\n\tfor _, child := range c {\n\t\tif objectSize <= child.MaxSize || child.MaxSize == 0 {\n\t\t\treturn child.Cache.Set(path, object)\n\t\t}\n\t}\n\n\t// No cache is large enough to hold this object, but that's ok.\n\treturn nil\n}", "func (c *SDConfig) SetDirectory(dir string) {\n\tfor i, file := range c.Files {\n\t\tc.Files[i] = config.JoinDir(dir, file)\n\t}\n}", "func (c *Client) SetDir(prefix, name string) {\n\tc.Lock()\n\tc.dir = &models.Directory{\n\t\tBase: fmt.Sprintf(\"%v/%v\", prefix, name),\n\t\tElection: fmt.Sprintf(\"%v/%v/%v\", prefix, name, DirectoryElection),\n\t\tRunning: fmt.Sprintf(\"%v/%v/%v\", prefix, name, DirectoryRunning),\n\t\tQueue: fmt.Sprintf(\"%v/%v/%v\", prefix, name, DirectoryQueue),\n\t\tNodes: fmt.Sprintf(\"%v/%v/%v\", prefix, name, DirectoryNodes),\n\t\tMasters: fmt.Sprintf(\"%v/%v/%v\", prefix, name, DirectoryMasters),\n\t}\n\tc.Unlock()\n}", "func (k Keeper) setValue(ctx sdk.Context, accessPath *vm_grpc.VMAccessPath, value []byte) {\n\tstore := ctx.KVStore(k.storeKey)\n\tkey := common_vm.GetPathKey(accessPath)\n\n\tstore.Set(key, value)\n}", "func (store *EntryStore) Set(id []byte, entry *hexalog.Entry) error {\n\tval, err := proto.Marshal(entry)\n\tif err == nil {\n\t\terr = store.db.Put(store.wo, id, val)\n\t}\n\treturn err\n}", "func (cf *ConfigFile) Set(key string, val interface{}) error {\n\tc, err := cf.Open()\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase *os.PathError:\n\t\t\terr := cf.createConfigFile()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tc, _ = cf.Open()\n\t\tdefault:\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tb, err := ioutil.ReadAll(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar m map[string]interface{}\n\terr = yaml.Unmarshal(b, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif m == nil {\n\t\tm = map[string]interface{}{}\n\t}\n\n\tm[key] = val\n\n\tout, err := yaml.Marshal(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(cf.location, out, 0600)\n}", "func (l *Lockbox) SetValue(path, value []byte) error {\n\tif l.Locked {\n\t\treturn errors.New(\"cannot set value while lockbox is locked\")\n\t}\n\n\t// encrypt the provided value with the current users encryption key.\n\tencval, err := encrypt(value, l.CurrentUser.EncryptionKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// store the encrypted value at the desired path\n\treturn l.Store.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(l.CurrentNamespace))\n\t\terr := b.Put(path, encval)\n\t\treturn err\n\t})\n}", "func (c *fileStorageClient) Set(key string, value []byte) error {\n\treturn c.Batch(SetOperation(key, value))\n}", "func (c DeferDirCollector) Set(k string, v func() *VDir) {\n\tc[k] = v\n}", "func (k Keeper) SetValue(ctx sdk.Context, accessPath *vm_grpc.VMAccessPath, value []byte) {\n\tk.modulePerms.AutoCheck(types.PermStorageWrite)\n\n\tk.setValue(ctx, accessPath, value)\n}", "func (options *EditLoadBalancerMonitorOptions) SetPath(path string) *EditLoadBalancerMonitorOptions {\n\toptions.Path = core.StringPtr(path)\n\treturn options\n}", "func (m *Message) SetPath(s []string) {\n\tm.Options = m.Options.Minus(URIPath)\n\tfor _, p := range s {\n\t\tm.Options = append(m.Options, Option{URIPath, p})\n\t}\n}", "func (mc *MenuCreate) SetPath(s string) *MenuCreate {\n\tmc.mutation.SetPath(s)\n\treturn mc\n}", "func SetFromPath(node interface{}, path string, out interface{}) (bool, error) {\n\tval, found, err := GetFromStructPath(node, path)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !found {\n\t\treturn false, nil\n\t}\n\n\treturn true, Set(val, out)\n}", "func (c FileCollector) Set(k string, v *VFile) {\n\tc[k] = v\n}", "func (options *CreateLoadBalancerMonitorOptions) SetPath(path string) *CreateLoadBalancerMonitorOptions {\n\toptions.Path = core.StringPtr(path)\n\treturn options\n}", "func (s *AccessRuleSetter) Set(path string, permission api.Permission, name string) (*api.AccessRule, error) {\n\ts.ArgPath = path\n\ts.ArgPermission = permission\n\ts.ArgName = name\n\treturn s.ReturnsAccessRule, s.Err\n}", "func (ln *LocalNode) Set(e qnr.Entry) {\n\tln.mu.Lock()\n\tdefer ln.mu.Unlock()\n\n\tln.set(e)\n}", "func (f *RPCFile) SetMediaPath(mediaDirectory string) error {\n\tif f.file == nil {\n\t\terr := errors.New(\"No grpc file. Construct with NewFile\")\n\t\tlog.Fatal().Stack().Err(err).Msg(\"\")\n\t\treturn err\n\t}\n\tf.file.MediaDirectory = mediaDirectory\n\treturn nil\n}", "func setSeek(dirPath string, name string, fPos int64) {\n\tfullname := path.Join(dirPath, seekPrefix+name)\n\tif fPos == 0 {\n\t\tos.Remove(fullname)\n\t} else {\n\t\tfile, _ := os.OpenFile(fullname, os.O_WRONLY|os.O_CREATE, 0666)\n\t\tfile.WriteString(strconv.FormatInt(fPos, 10))\n\t\tfile.Close()\n\t}\n}", "func (fv *FileView) SetPathFile(path, file, ext string) {\n\tfv.DirPath = path\n\tfv.SelFile = file\n\tfv.SetExt(ext)\n\tfv.UpdateFromPath()\n}", "func (*XMLDocument) SetDir(dir string) {\n\tmacro.Rewrite(\"$_.dir = $1\", dir)\n}", "func (me *TPathDataType) Set(s string) { (*xsdt.String)(me).Set(s) }", "func (t *FenwickTreeSimple) Set(index int, value int) {\n\tt.Update(index, value-t.Get(index))\n}", "func SetRef(ref string, oid string) error {\n\tp := filepath.Dir(UGIT_DIR + \"/\" + ref)\n\tos.MkdirAll(p, 0777)\n\terr := ioutil.WriteFile(UGIT_DIR+\"/\"+ref, []byte(oid), 0777)\n\treturn err\n}", "func (m *stateManager) Set(key types.StateEntryKey, stateEntry types.StateEntry) (err error) {\n\t// Check stateEntry is not a pointer.\n\tval := reflect.ValueOf(stateEntry)\n\tif val.Kind() == reflect.Ptr {\n\t\treturn errors.New(\"stateEntry must not be a pointer\")\n\t}\n\n\tif err = m.ensureLoaded(); err != nil {\n\t\treturn\n\t}\n\n\t// Update or add state entry.\n\tm.state[key] = stateEntry\n\tm.stateChanged = true\n\n\treturn\n}", "func (t *FileTree) SetFile(f file.Reference) error {\n\toriginal, ok := t.pathToFileRef[f.Path.ID()]\n\n\tif !ok {\n\t\treturn fmt.Errorf(\"file does not already exist in tree (cannot replace)\")\n\t}\n\tdelete(t.pathToFileRef, original.Path.ID())\n\tt.pathToFileRef[f.Path.ID()] = f\n\n\treturn nil\n}", "func (impl *ldapAuthImpl) SetCAPath(path string) error {\n\timpl.Lock()\n\tdefer impl.Unlock()\n\n\tif path != impl.caPath {\n\t\timpl.caPath = path\n\t\terr := impl.initializeCAPool()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *MerkleTree) Set(index []byte, key string, value []byte) error {\n\tcommitment, err := crypto.NewCommit([]byte(key), value)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttoAdd := userLeafNode{\n\t\tkey: key,\n\t\tvalue: append([]byte{}, value...), // make a copy of value\n\t\tindex: index,\n\t\tcommitment: commitment,\n\t}\n\tm.insertNode(index, &toAdd)\n\treturn nil\n}" ]
[ "0.7188751", "0.70497656", "0.69994205", "0.69260705", "0.6846642", "0.6761991", "0.67417395", "0.6709446", "0.6564426", "0.64639956", "0.6394431", "0.6356663", "0.63492095", "0.6277019", "0.626828", "0.6263598", "0.6247245", "0.61931396", "0.61799264", "0.61762017", "0.6155869", "0.6112429", "0.6099579", "0.6098902", "0.6058148", "0.6050812", "0.60339594", "0.60254014", "0.60162354", "0.60162", "0.6008411", "0.59825224", "0.5950835", "0.5934299", "0.5933279", "0.5926648", "0.5902473", "0.58861065", "0.5803435", "0.57950914", "0.5767735", "0.57582754", "0.57340306", "0.5725218", "0.57179344", "0.5715757", "0.57034117", "0.568525", "0.5684713", "0.5677626", "0.56582445", "0.56387633", "0.5634334", "0.5619013", "0.5607518", "0.559357", "0.5590478", "0.5572738", "0.55581707", "0.55546033", "0.55513084", "0.5539899", "0.5532587", "0.5528882", "0.55175364", "0.5503568", "0.54706484", "0.54685134", "0.54534674", "0.545049", "0.5449991", "0.5445025", "0.54311013", "0.5430382", "0.54286176", "0.54243284", "0.54071295", "0.5395389", "0.5394963", "0.5388017", "0.5363746", "0.53631604", "0.53528744", "0.5351711", "0.53505135", "0.5349776", "0.53486705", "0.5342619", "0.5339542", "0.53348845", "0.53140676", "0.53004384", "0.5288413", "0.5286081", "0.52784884", "0.527412", "0.52737665", "0.52703696", "0.5267373", "0.5259944" ]
0.8198673
0
Lookup returns the entry associated with the provided path and a boolean indicating whether the entry was found.
func (d Dir) Lookup(path string) (file reflow.File, ok bool) { file, ok = d.contents[path] return file, ok }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (registry *Registry) Lookup(handle string) *task.Task {\n\t// TODO: Refactor the interface here to explicitly add an `ok`\n\t// return value (in the style of reading a map[...]...)\n\t// to differentiate a present nil value return vs. a\n\t// not-present-at-all value.\n\tregistry.lock.RLock()\n\tdefer registry.lock.RUnlock()\n\n\tif t, exists := registry.db[handle]; exists {\n\t\treturn t\n\t}\n\n\treturn nil\n}", "func (e entry) lookup(name interface{}) (interface{}, bool) {\n\tif res, ok := e[name]; ok {\n\t\treturn res, true\n\t}\n\treturn nil, false\n}", "func (fs *fsMutable) lookup(p fuseops.InodeID, c string) (le lookupEntry, found bool, lk []byte) {\n\treturn lookup(p, c, fs.lookupTree)\n}", "func (fs *fsMutable) lookup(p fuseops.InodeID, c string) (le lookupEntry, found bool, lk []byte) {\n\treturn lookup(p, c, fs.lookupTree)\n}", "func Lookup(path string) ([]byte, error) {\n\tf, ok := files[path]\n\tif !ok {\n\t\treturn nil, os.ErrNotExist\n\t}\n\treturn f.data, nil\n}", "func (r *Router) Lookup(method, path string) (Handle, Params, bool) {\n\tif root := r.trees[method]; root != nil {\n\t\treturn root.getValue(path)\n\t}\n\treturn nil, nil, false\n}", "func (c *rPathCacheContainer) lookup(cPath string) ([]byte, string) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tif cPath == c.cPath {\n\t\t// hit\n\t\treturn c.dirIV, c.pPath\n\t}\n\t// miss\n\treturn nil, \"\"\n}", "func (fi *FsCache) Lookup(key *fs.FsFile) (*fs.FsData, bool) {\n\tfi.l.RLock()\n\tdefer fi.l.RUnlock()\n\tfdata, ok := fi.lookup(key)\n\treturn fdata, ok\n}", "func lookup(p fuseops.InodeID, c string, lookupTree *iradix.Tree) (le lookupEntry, found bool, lk []byte) {\n\tlk = formLookupKey(p, c)\n\tval, found := lookupTree.Get(lk)\n\tif found {\n\t\tle = val.(lookupEntry)\n\t\treturn le, found, lk\n\t}\n\treturn lookupEntry{}, found, lk\n}", "func lookup(p fuseops.InodeID, c string, lookupTree *iradix.Tree) (le lookupEntry, found bool, lk []byte) {\n\tlk = formLookupKey(p, c)\n\tval, found := lookupTree.Get(lk)\n\tif found {\n\t\tle = val.(lookupEntry)\n\t\treturn le, found, lk\n\t}\n\treturn lookupEntry{}, found, lk\n}", "func (r *HashRing) Lookup(key string) (string, bool) {\n\tstrs := r.LookupN(key, 1)\n\tif len(strs) == 0 {\n\t\treturn \"\", false\n\t}\n\treturn strs[0], true\n}", "func (s *OnDiskStateMachine) Lookup(query interface{}) (interface{}, error) {\n\tif !s.opened {\n\t\tpanic(\"Lookup called when not opened\")\n\t}\n\treturn s.sm.Lookup(query)\n}", "func (s *devNull) Lookup(key string) (interface{}, bool) {\n\treturn nil, false\n}", "func (fi *FsCache) lookup(key *fs.FsFile) (*fs.FsData, bool) {\n\tfdata, ok := fi.dict[key.String()]\n\treturn fdata, ok\n}", "func (s dirServer) Lookup(name upspin.PathName) (*upspin.DirEntry, error) {\n\tp, err := path.Parse(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch p.FilePath() {\n\tcase \"\": // Root directory.\n\t\treturn directory(p.Path()), nil\n\tcase access.AccessFile:\n\t\treturn s.accessEntry, nil\n\t}\n\n\tgit := s.corpus.GitHub()\n\tswitch p.NElem() {\n\tcase 1: // Owner directory.\n\t\tok := false\n\t\tgit.ForeachRepo(func(repo *maintner.GitHubRepo) error {\n\t\t\tif repo.ID().Owner == p.Elem(0) {\n\t\t\t\tok = true\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif ok {\n\t\t\treturn directory(p.Path()), nil\n\t\t}\n\tcase 2: // User directory.\n\t\tif git.Repo(p.Elem(0), p.Elem(1)) != nil {\n\t\t\treturn directory(p.Path()), nil\n\t\t}\n\tcase 3: // State directory.\n\t\tif validState(p.Elem(2)) {\n\t\t\treturn directory(p.Path()), nil\n\t\t}\n\tcase 4: // Issue file or link.\n\t\tstate := p.Elem(2)\n\t\tif !validState(state) {\n\t\t\tbreak\n\t\t}\n\t\trepo := git.Repo(p.Elem(0), p.Elem(1))\n\t\tn, err := strconv.ParseInt(p.Elem(3), 10, 32)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tissue := repo.Issue(int32(n))\n\t\tif issue == nil {\n\t\t\tbreak\n\t\t}\n\t\tif state == \"open\" && issue.Closed || state == \"closed\" && !issue.Closed {\n\t\t\tbreak\n\t\t}\n\t\tif state == \"open\" || state == \"closed\" {\n\t\t\treturn link(p.Path(), issue), upspin.ErrFollowLink\n\t\t}\n\t\tde, err := s.packIssue(p.Path(), issue)\n\t\tif err != nil {\n\t\t\treturn nil, errors.E(name, err)\n\t\t}\n\t\treturn de, nil\n\t}\n\n\treturn nil, errors.E(name, errors.NotExist)\n}", "func (state *State) LookUp(path string) (value reflect.Value, err error) {\n\tvalue, err = utils.LookUp(state.deviceInfo, path)\n\treturn\n}", "func Lookup(name string) (Plugin, bool) {\n\tp, found := directory[name]\n\treturn p, found\n}", "func (inst *hiddenInstance) Lookup(path ...string) Value {\n\treturn inst.value().Lookup(path...)\n}", "func (r propertyTypeRegistry) Lookup(module, key string) bool {\n\t_, found := r[module][key]\n\treturn found\n}", "func (cp Pack) Lookup(accountID string) (bool, Account) {\n\tfor _, c := range cp {\n\t\tfound, account := c.Lookup(accountID)\n\t\tif found {\n\t\t\treturn true, account\n\t\t}\n\t}\n\treturn false, Account{}\n}", "func (lwwset LWWSet) Lookup(value string) (bool, error) {\n\t// Return an error if the value passed is nil\n\tif value == \"\" {\n\t\treturn false, errors.New(\"empty value provided\")\n\t}\n\n\tlwwset, list := lwwset.List()\n\n\t// Iterative over the LWWSet and check if the\n\t// value is the one we're searching\n\t// return true if the value exists\n\tfor _, element := range list {\n\t\tif element == value {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\t// If the value isn't found after iterating\n\t// over the entire LWWSet we return false\n\treturn false, nil\n}", "func (dc *DigestCache) Lookup(hash []byte) []byte {\n\tif r, ok := dc.Records[string(hash)]; ok {\n\t\treturn r\n\t}\n\treturn nil\n}", "func (s *mockFSServer) Lookup(ctx context.Context, r *proto.LookupRequest) (*proto.LookupResponse, error) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\tif isDir, ok := s.filesCreated[r.Path]; ok {\n\t\treturn &proto.LookupResponse{IsDir: isDir}, nil\n\t}\n\n\treturn nil, os.ErrNotExist\n}", "func (idx *Unique) Lookup(v string) (resultPath []string, err error) {\n\tif idx.caseInsensitive {\n\t\tv = strings.ToLower(v)\n\t}\n\tsearchPath := path.Join(idx.indexRootDir, v)\n\tif err = isValidSymlink(searchPath); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\terr = &idxerrs.NotFoundErr{TypeName: idx.typeName, Key: idx.indexBy, Value: v}\n\t\t}\n\t\treturn\n\t}\n\n\tp, err := os.Readlink(searchPath)\n\tif err != nil {\n\t\treturn []string{}, nil\n\t}\n\n\treturn []string{p}, err\n}", "func (s STags) Lookup(tag string, fields ...string) (value string, ok bool) {\n\tvalue, ok = s.get(tag, fields...)\n\treturn\n}", "func Lookup(opts []map[string]interface{}, key string) (interface{}, bool) {\n\tif len(opts) == 0 {\n\t\treturn nil, false\n\t}\n\tv, ok := opts[0][key]\n\treturn v, ok\n}", "func (cache *FTPDataCache) Look(filename string) (bool, error) {\n\n\treturn false, nil\n}", "func (s *dirServer) Lookup(name upspin.PathName) (*upspin.DirEntry, error) {\n\tp, err := path.Parse(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfp := p.FilePath()\n\tswitch fp {\n\tcase \"\": // Root directory.\n\t\ts.mu.Lock()\n\t\ttotal := len(s.boxes)\n\t\ts.mu.Unlock()\n\t\treturn &upspin.DirEntry{\n\t\t\tName: p.Path(),\n\t\t\tSignedName: p.Path(),\n\t\t\tAttr: upspin.AttrDirectory,\n\t\t\tTime: upspin.Now(),\n\t\t\tSequence: int64(total * 2),\n\t\t}, nil\n\tcase access.AccessFile:\n\t\treturn s.accessEntry, nil\n\t}\n\n\tn := matchBox(fp)\n\tif n < 0 {\n\t\treturn nil, errors.E(name, errors.NotExist)\n\t}\n\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\ttotal := len(s.boxes)\n\tif n > total {\n\t\treturn nil, errors.E(name, errors.NotExist)\n\t}\n\n\tif n == total {\n\t\t// A new box is opened!\n\t\tde, data, err := s.pack(fp, int64(total*2+1), randomState())\n\t\tif err != nil {\n\t\t\treturn nil, errors.E(name, err)\n\t\t}\n\t\ts.boxes = append(s.boxes, box{de, data})\n\t\ts.open.Broadcast()\n\t}\n\n\treturn s.boxes[n].DirEntry, nil\n}", "func (s *ConcurrentStateMachine) Lookup(query interface{}) (interface{}, error) {\n\treturn s.sm.Lookup(query)\n}", "func (fsi *fsIOPool) lookupToRead(path string) (*lock.RLockedFile, bool) {\n\trlkFile, ok := fsi.readersMap[path]\n\t// File reference exists on map, validate if its\n\t// really closed and we are safe to purge it.\n\tif ok && rlkFile != nil {\n\t\t// If the file is closed and not removed from map is a bug.\n\t\tif rlkFile.IsClosed() {\n\t\t\t// Log this as an error.\n\t\t\treqInfo := (&logger.ReqInfo{}).AppendTags(\"path\", path)\n\t\t\tctx := logger.SetReqInfo(context.Background(), reqInfo)\n\t\t\tlogger.LogIf(ctx, errUnexpected)\n\n\t\t\t// Purge the cached lock path from map.\n\t\t\tdelete(fsi.readersMap, path)\n\n\t\t\t// Indicate that we can populate the new fd.\n\t\t\tok = false\n\t\t} else {\n\t\t\t// Increment the lock ref, since the file is not closed yet\n\t\t\t// and caller requested to read the file again.\n\t\t\trlkFile.IncLockRef()\n\t\t}\n\t}\n\treturn rlkFile, ok\n}", "func Lookup(domainName string) (bool, string) {\n\tfor index := 0; index < len(entries); index++ {\n\t\tif domainName == entries[index].domainName {\n\t\t\treturn true, entries[index].ip\n\t\t}\n\t}\n\n\treturn false, \"\"\n}", "func (atl Atlas) Get(rtid uintptr) (*AtlasEntry, bool) {\n\tent, ok := atl.mappings[rtid]\n\treturn ent, ok\n}", "func (t *Table) Lookup(s string) (n uint32, ok bool) {\n\ti0 := int(murmurSeed(0).hash(s)) & t.level0Mask\n\tseed := t.level0[i0]\n\ti1 := int(murmurSeed(seed).hash(s)) & t.level1Mask\n\tn = t.level1[i1]\n\treturn n, s == t.keys[int(n)]\n}", "func (d *Dir) Lookup(ctx context.Context, name string) (fs.Node, error) {\n\tlog.Println(\"dir.Lookup\")\n\tinfo, err := d.core.Stat(ctx, name)\n\tif err != nil {\n\t\treturn &notExistFile{\n\t\t\tcore: d.core,\n\t\t\tkey: name,\n\t\t}, nil\n\t}\n\treturn &existingFile{info: info}, nil\n}", "func (c *Config) Lookup(key string) (any, error) {\n\t// check thet key is valid, meaning it starts with one of\n\t// the fields of the config struct\n\tif !c.isValidKey(key) {\n\t\treturn nil, nil\n\t}\n\tval, err := lookupByType(key, reflect.ValueOf(c))\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"lookup: error on key '%s'\", key)\n\t}\n\treturn val, nil\n}", "func (c *Cache) Lookup(buildid int64) (string, error) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\tif hash, ok := c.hashes[buildid]; !ok {\n\t\treturn \"\", fmt.Errorf(\"BuildId not found in cache: %d\", buildid)\n\t} else {\n\t\treturn hash, nil\n\t}\n}", "func (w *WindowedMap) Lookup(uid UID) (interface{}, bool) {\n\tw.ExpireOldEntries()\n\titem, ok := w.uidMap[uid]\n\tif !ok {\n\t\treturn nil, false\n\t}\n\tw.Put(uid, item.value)\n\treturn item.value, true\n}", "func (r *Mux) Lookup(method, path string) (http.Handler, Params, bool) {\n\tif root := r.trees[method]; root != nil {\n\t\treturn root.getValue(path)\n\t}\n\treturn nil, nil, false\n}", "func (sm *ConcurrentStateMachine) Lookup(query []byte) ([]byte, error) {\n\treturn sm.sm.Lookup(query)\n}", "func (kvStore *KVStore) Lookup(hash KademliaID) (output []byte, err error) {\n kvStore.mutex.Lock()\n if val, ok := kvStore.mapping[hash]; ok {\n output = val.data\n } else {\n err = NotFoundError\n }\n kvStore.mutex.Unlock()\n return\n}", "func (bf BloomFilter) Lookup(entry string) (bool, error) {\n\thashes := hashEntry([]byte(entry), bf.hf)\n\tfor i := 0; i < bf.hf; i++ {\n\t\tlookup_idx := hashes[i] & (bf.size - 1)\n\t\tif exists, err := bf.getBit(lookup_idx); !exists {\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}\n\t}\n\treturn true, nil\n}", "func (d *dataUsageCache) find(path string) *dataUsageEntry {\n\tdue, ok := d.Cache[hashPath(path).Key()]\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn &due\n}", "func Lookup(name string) (value interface{}, ok bool) {\n\tinitialize()\n\tlock.RLock()\n\tvalue, ok = notGlobals[name]\n\tlock.RUnlock()\n\treturn value, ok\n}", "func (m Map) Lookup(d digest.Digest, key T) T {\n\tif _, ok := m.tab[d]; !ok {\n\t\treturn nil\n\t}\n\tentry := *m.tab[d]\n\tfor entry != nil && Less(entry.Key, key) {\n\t\tentry = entry.Next\n\t}\n\tif entry == nil || !Equal(entry.Key, key) {\n\t\treturn nil\n\t}\n\treturn entry.Value\n}", "func (r *Registry) Lookup(name string) *Object {\n\tr.Lock()\n\tdefer r.Unlock()\n\n\tif obj, ok := r.dataStores[name]; ok {\n\t\tif obj.Enabled {\n\t\t\treturn obj\n\t\t}\n\t}\n\treturn nil\n}", "func (t table) lookup(name interface{}) (interface{}, bool) {\n\tfor _, e := range t {\n\t\tif val, ok := e.lookup(name); ok {\n\t\t\treturn val, true\n\t\t}\n\t}\n\treturn nil, false\n}", "func (s *subtasks) Lookup(ctx context.Context, dir *fs.Inode, p string) (*fs.Dirent, error) {\n\ttid, err := strconv.ParseUint(p, 10, 32)\n\tif err != nil {\n\t\treturn nil, syserror.ENOENT\n\t}\n\n\ttask := s.pidns.TaskWithID(kernel.ThreadID(tid))\n\tif task == nil {\n\t\treturn nil, syserror.ENOENT\n\t}\n\tif task.ThreadGroup() != s.t.ThreadGroup() {\n\t\treturn nil, syserror.ENOENT\n\t}\n\n\ttd := newTaskDir(task, dir.MountSource, s.pidns, false)\n\treturn fs.NewDirent(td, p), nil\n}", "func Lookup(node *Node, key int) bool {\n\tif node == nil {\n\t\treturn false\n\t} else {\n\t\tif node.Key == key {\n\t\t\treturn true\n\t\t} else {\n\t\t\tif node.Key > key {\n\t\t\t\treturn Lookup(node.Left, key)\n\t\t\t} else {\n\t\t\t\treturn Lookup(node.Right, key)\n\t\t\t}\n\t\t}\n\t}\n}", "func (dir *Directory) lookup(path string) *Directory {\n\tpath = pathutil.Clean(path);\t// no trailing '/'\n\n\tif dir == nil || path == \"\" || path == \".\" {\n\t\treturn dir\n\t}\n\n\tdpath, dname := pathutil.Split(path);\n\tif dpath == \"\" {\n\t\t// directory-local name\n\t\tfor _, d := range dir.Dirs {\n\t\t\tif dname == d.Name {\n\t\t\t\treturn d\n\t\t\t}\n\t\t}\n\t\treturn nil;\n\t}\n\n\treturn dir.lookup(dpath).lookup(dname);\n}", "func (rad *Radix) Lookup(key string) interface{} {\n\trad.lock.Lock()\n\tdefer rad.lock.Unlock()\n\tif x, ok := rad.root.lookup([]rune(key)); ok {\n\t\treturn x.value\n\t}\n\treturn nil\n}", "func (dir HgmDir) Lookup(ctx context.Context, name string) (fs.Node, error) {\n\tlocalDirent := dir.localDir + name // dirs are ending with a slash -> just append the name\n\ta := fuse.Attr{}\n\td := HgmDir{hgmFs: dir.hgmFs, localDir: localDirent}\n\terr := d.Attr(ctx, &a)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif (a.Mode & os.ModeType) == os.ModeDir {\n\t\treturn HgmDir{hgmFs: dir.hgmFs, localDir: localDirent + \"/\"}, nil\n\t}\n\n\treturn &HgmFile{hgmFs: dir.hgmFs, localFile: localDirent, fileSize: a.Size}, nil\n}", "func (key Key) Lookup() (string, bool) {\n\treturn os.LookupEnv(string(key))\n}", "func (sc *simpleLRUStatsCache) Lookup(id int64) (*statistics.Table, bool) {\n\tsc.mu.Lock()\n\tdefer sc.mu.Unlock()\n\treturn sc.lookupUnsafe(id)\n}", "func (r *Router) Lookup(method, path string, ctx *Context) (RequestHandler, bool) {\n\treturn r.router.Lookup(method, path, ctx)\n}", "func (s *Store) Get(id string) (e Entry, exists bool) {\n\te, exists = (*s)[id]\n\treturn\n}", "func (f *FlagSet) Lookup(name string) *Flag {\n\treturn f.lookup(f.normalizeFlagName(name))\n}", "func (s *RegularStateMachine) Lookup(query interface{}) (interface{}, error) {\n\treturn s.sm.Lookup(query)\n}", "func Lookup(code cid.Cid) (ActorInfo, bool) {\n\tact, ok := actorInfos[code]\n\treturn act, ok\n}", "func (vars Variables) Lookup(n *scparse.VariableNode) (interface{}, bool) {\n\tv := vars.lookup(n)\n\tif !v.IsValid() {\n\t\treturn nil, false\n\t}\n\treturn v.Interface(), true\n}", "func (sm *RegularStateMachine) Lookup(query []byte) ([]byte, error) {\n\treturn sm.sm.Lookup(query), nil\n}", "func lookupPathMap(pathMaps *[]n.ApplicationGatewayURLPathMap, resourceID *string) *n.ApplicationGatewayURLPathMap {\n\tfor idx, pathMap := range *pathMaps {\n\t\tif *pathMap.ID == *resourceID {\n\t\t\treturn &(*pathMaps)[idx]\n\t\t}\n\t}\n\n\treturn nil\n}", "func lookupPath(obj interface{}, path string, kind reflect.Kind) (reflect.Value, bool) {\n\tnodes := strings.Split(path, \".\")\n\n\tvar name string\n\tvalue := reflect.ValueOf(obj)\n\nLOOP:\n\tfor _, node := range nodes {\n\t\tname = node\n\t\tif value.Kind() == reflect.Struct {\n\t\t\tt := value.Type()\n\n\t\t\tfor i := 0; i != t.NumField(); i++ {\n\t\t\t\tif t.Field(i).Name == node {\n\t\t\t\t\tvalue = value.Field(i)\n\t\t\t\t\tif value.Kind() == reflect.Interface || value.Kind() == reflect.Ptr {\n\t\t\t\t\t\tvalue = value.Elem()\n\t\t\t\t\t}\n\n\t\t\t\t\tcontinue LOOP\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tbreak LOOP\n\t\t}\n\t}\n\n\tif name != nodes[len(nodes)-1] {\n\t\treturn value, false\n\t}\n\n\tif kind == reflect.Interface {\n\t\treturn value, true\n\t}\n\n\t// convert result kind to int for all size of interger as then\n\t// only a int64 version will be retrieve by value.Int()\n\trk := value.Kind()\n\tswitch rk {\n\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\trk = reflect.Int\n\t}\n\n\treturn value, rk == kind\n}", "func (r *Wrapper) Lookup(name string) (val any, ok bool) {\n\tfv := r.rv.FieldByName(name)\n\tif !fv.IsValid() {\n\t\treturn\n\t}\n\n\tif fv.CanInterface() {\n\t\treturn fv.Interface(), true\n\t}\n\treturn\n}", "func Lookup(name string) *Flag {\n\treturn CommandLine.Lookup(name)\n}", "func Lookup(L *lua.State, path string, idx int) {\n parts := strings.Split(path, \".\")\n if idx != 0 {\n L.PushValue(idx)\n } else {\n L.GetGlobal(\"_G\")\n }\n for _, field := range parts {\n L.GetField(-1, field)\n L.Remove(-2) // remove table\n }\n}", "func (as AccountSet) Lookup(accountID string) (bool, Account) {\n\tlogger.InfoMsgf(\"looking up accountID %s in set\", accountID)\n\tfor _, a := range as {\n\t\tif a.Account == accountID {\n\t\t\treturn true, a\n\t\t}\n\t}\n\treturn false, Account{}\n}", "func (c *Cache) lookupResult(\n\tpodName, nodeName, predicateKey string,\n\tequivalenceHash uint64,\n) (value predicateResult, ok bool) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\tglog.V(5).Infof(\"Cache lookup: node=%s,predicate=%s,pod=%s\", nodeName, predicateKey, podName)\n\tvalue, ok = c.cache[nodeName][predicateKey][equivalenceHash]\n\treturn value, ok\n}", "func Lookup(files *[]string, routeFileName string) filepath.WalkFunc {\n\treturn func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tif len(routeFileName) == 0 {\n\t\t\trouteFileName = \"route_config.json.template\"\n\t\t}\n\n\t\tif info.Name() == routeFileName {\n\t\t\t*files = append(*files, path)\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func (d *Document) lookup(id uint, path []string) uint {\n\tvar (\n\t\tv *V\n\t\tn *node\n\t)\nlookup:\n\tfor _, key := range path {\n\t\tif n = d.get(id); n != nil {\n\t\t\tswitch n.info.Type() {\n\t\t\tcase TypeObject:\n\t\t\t\tfor i := range n.values {\n\t\t\t\t\tv = &n.values[i]\n\t\t\t\t\tif v.key == key {\n\t\t\t\t\t\tid = v.id\n\t\t\t\t\t\tcontinue lookup\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase TypeArray:\n\t\t\t\ti := 0\n\t\t\t\tfor _, c := range []byte(key) {\n\t\t\t\t\tif c -= '0'; 0 <= c && c <= 9 {\n\t\t\t\t\t\ti = i*10 + int(c)\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn maxUint\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif 0 <= i && i < len(n.values) {\n\t\t\t\t\tv = &n.values[i]\n\t\t\t\t\tid = v.id\n\t\t\t\t\tcontinue lookup\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn maxUint\n\t}\n\treturn id\n}", "func (idx *Tree) Lookup(key []byte) (value uint64, found bool) {\n\tid := idx.allocatorQueue.get()\n\tvalue, found = idx.allocators[id].Lookup(key)\n\tidx.allocatorQueue.put(id)\n\treturn\n}", "func (x *Index) Lookup(query string) (match *LookupResult, alt *AltWords, illegal bool) {\n\tss := strings.Split(query, \".\", 0);\n\n\t// check query syntax\n\tfor _, s := range ss {\n\t\tif !isIdentifier(s) {\n\t\t\tillegal = true;\n\t\t\treturn;\n\t\t}\n\t}\n\n\tswitch len(ss) {\n\tcase 1:\n\t\tmatch, alt = x.LookupWord(ss[0])\n\n\tcase 2:\n\t\tpakname := ss[0];\n\t\tmatch, alt = x.LookupWord(ss[1]);\n\t\tif match != nil {\n\t\t\t// found a match - filter by package name\n\t\t\tdecls := match.Decls.filter(pakname);\n\t\t\tothers := match.Others.filter(pakname);\n\t\t\tmatch = &LookupResult{decls, others};\n\t\t}\n\n\tdefault:\n\t\tillegal = true\n\t}\n\n\treturn;\n}", "func (o *OS) Lookup(key string) (interface{}, bool) {\n\treturn os.LookupEnv(key)\n}", "func (n *node) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (fs.Node, error) {\n\tvar requested string = req.Name\n\tp, nm := n.Path(), n.Name()\n\tfp, _ := filepath.Abs(filepath.Join(p, \"/\", nm))\n\ttail := n.Tail()\n\tif len(tail) > 0 {\n\t\tfor _, nn := range tail {\n\t\t\tif nn.Aliased() {\n\t\t\t\tif exists, ad := nn.Find(requested, n, nn); exists {\n\t\t\t\t\tswitch ad.Is() {\n\t\t\t\t\tcase Directory:\n\t\t\t\t\t\tad.SetPath(fp)\n\t\t\t\t\t\treturn ad, nil\n\t\t\t\t\tcase File, Fileio:\n\t\t\t\t\t\taf := NewNodeFile(ad)\n\t\t\t\t\t\taf.InitializeFile(fp, n)\n\t\t\t\t\t\treturn af, nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif nn.Name() == requested {\n\t\t\t\tswitch nn.Is() {\n\t\t\t\tcase Directory:\n\t\t\t\t\tnn.InitializeDir(fp, n)\n\t\t\t\t\treturn nn, nil\n\t\t\t\tcase File, Fileio, Socket:\n\t\t\t\t\tf := NewNodeFile(nn)\n\t\t\t\t\tf.InitializeFile(fp, n)\n\t\t\t\t\treturn f, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, fuse.ENOENT\n}", "func (bol Boolean) Find(path Ref) (Value, error) {\n\tif len(path) == 0 {\n\t\treturn bol, nil\n\t}\n\treturn nil, errFindNotFound\n}", "func (l *GroupLookup) Lookup(key flux.GroupKey) (interface{}, bool) {\n\tif key == nil || len(l.groups) == 0 {\n\t\treturn nil, false\n\t}\n\n\tgroup := l.lookupGroup(key)\n\tif group == -1 {\n\t\treturn nil, false\n\t}\n\n\ti := l.groups[group].Index(key)\n\tif i != -1 {\n\t\treturn l.groups[group].At(i), true\n\t}\n\treturn nil, false\n}", "func (d *Directory) Entry(name string) *DirectoryEntry {\n\tfor _, e := range d.Entries {\n\t\tif e.Path == name {\n\t\t\treturn e\n\t\t}\n\t}\n\treturn nil\n}", "func (c *Client) lookup(op errors.Op, entry *upspin.DirEntry, fn lookupFn, followFinal bool, s *metric.Span) (resultEntry, finalSuccessfulEntry *upspin.DirEntry, err error) {\n\tss := s.StartSpan(\"lookup\")\n\tdefer ss.End()\n\n\t// As we run, we want to maintain the incoming DirEntry to track the name,\n\t// leaving the rest alone. As the fn will return a newly allocated entry,\n\t// after each link we update the entry to achieve this.\n\toriginalName := entry.Name\n\tvar prevEntry *upspin.DirEntry\n\tcopied := false // Do we need to allocate a new entry to modify its name?\n\tfor loop := 0; loop < upspin.MaxLinkHops; loop++ {\n\t\tparsed, err := path.Parse(entry.Name)\n\t\tif err != nil {\n\t\t\treturn nil, nil, errors.E(op, err)\n\t\t}\n\t\tdir, err := c.DirServer(parsed.Path())\n\t\tif err != nil {\n\t\t\treturn nil, nil, errors.E(op, err)\n\t\t}\n\t\tresultEntry, err := fn(dir, entry, ss)\n\t\tif err == nil {\n\t\t\treturn resultEntry, entry, nil\n\t\t}\n\t\tif prevEntry != nil && errors.Is(errors.NotExist, err) {\n\t\t\treturn resultEntry, nil, errors.E(op, errors.BrokenLink, prevEntry.Name, err)\n\t\t}\n\t\tprevEntry = resultEntry\n\t\tif err != upspin.ErrFollowLink {\n\t\t\treturn resultEntry, nil, errors.E(op, originalName, err)\n\t\t}\n\t\t// Misbehaving servers could return a nil entry. Handle that explicitly. Issue 451.\n\t\tif resultEntry == nil {\n\t\t\treturn nil, nil, errors.E(op, errors.Internal, prevEntry.Name, \"server returned nil entry for link\")\n\t\t}\n\t\t// We have a link.\n\t\t// First, allocate a new entry if necessary so we don't overwrite user's memory.\n\t\tif !copied {\n\t\t\ttmp := *entry\n\t\t\tentry = &tmp\n\t\t\tcopied = true\n\t\t}\n\t\t// Take the prefix of the result entry and substitute that section of the existing name.\n\t\tparsedResult, err := path.Parse(resultEntry.Name)\n\t\tif err != nil {\n\t\t\treturn nil, nil, errors.E(op, err)\n\t\t}\n\t\tresultPath := parsedResult.Path()\n\t\t// The result entry's name must be a prefix of the name we're looking up.\n\t\tif !strings.HasPrefix(parsed.String(), string(resultPath)) {\n\t\t\treturn nil, nil, errors.E(op, resultPath, errors.Internal, \"link path not prefix\")\n\t\t}\n\t\t// Update the entry to have the new Name field.\n\t\tif resultPath == parsed.Path() {\n\t\t\t// We're on the last element. We may be done.\n\t\t\tif followFinal {\n\t\t\t\tentry.Name = resultEntry.Link\n\t\t\t} else {\n\t\t\t\t// Yes, we are done. Return this entry, which is a link.\n\t\t\t\treturn resultEntry, entry, nil\n\t\t\t}\n\t\t} else {\n\t\t\tentry.Name = path.Join(resultEntry.Link, string(parsed.Path()[len(resultPath):]))\n\t\t}\n\t}\n\treturn nil, nil, errors.E(op, errors.IO, originalName, \"link loop\")\n}", "func (l *RandomAccessGroupLookup) Lookup(key flux.GroupKey) (interface{}, bool) {\n\tid := l.idForKey(key)\n\te, ok := l.index[string(id)]\n\tif !ok || e.Deleted {\n\t\treturn nil, false\n\t}\n\treturn e.Value, true\n}", "func (d *Driver) Lookup(id string) ([]byte, error) {\n\td.lockfile.Lock()\n\tdefer d.lockfile.Unlock()\n\n\tsecretData, err := d.getAllData()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif data, ok := secretData[id]; ok {\n\t\treturn data, nil\n\t}\n\treturn nil, fmt.Errorf(\"%s: %w\", id, errNoSecretData)\n}", "func (n *Node) Lookup(ctx context.Context, name string) (fs.Node, error) {\n\tlog.Println(\"Lookup\", name)\n\tnode, ok := n.fs.Nodes[name]\n\tif ok {\n\t\treturn node, nil\n\t}\n\treturn nil, fuse.ENOENT\n}", "func (t *targetCache) Get(path string) *dep.ResolvedTarget {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\treturn t.data[path]\n}", "func findEntry(t *testing.T, ms *yang.Modules, moduleName, path string) *yang.Entry {\n\tt.Helper()\n\tmodule, errs := ms.GetModule(moduleName)\n\tif errs != nil {\n\t\tt.Fatalf(\"error getting module %q: %v\", moduleName, errs)\n\t}\n\tif path == \"\" {\n\t\treturn module\n\t}\n\tentry := module.Find(path)\n\tif entry == nil {\n\t\tt.Fatalf(\"error getting entry %q in module %q\", path, moduleName)\n\t}\n\treturn entry\n}", "func (db *DB) Lookup(addr net.IP, result interface{}) error {\n\tdb.mu.RLock()\n\tdefer db.mu.RUnlock()\n\tif db.reader != nil {\n\t\treturn db.reader.Lookup(addr, result)\n\t}\n\treturn ErrUnavailable\n}", "func (tree *Tree) Get(key interface{}) (value interface{}, found bool) {\n\tnode := tree.lookup(key)\n\tif node != nil {\n\t\treturn node.Value, true\n\t}\n\treturn nil, false\n}", "func (m *LineMap) Contains(name string) bool {\n\tif m.lookup == nil {\n\t\treturn false\n\t}\n\n\tname = strings.ToLower(name)\n\n\t_, exists := m.lookup[name]\n\treturn exists\n}", "func lookup(name string, context ...interface{}) (interface{}, bool) {\n\t// If the dot notation was used we split the word in two and perform two\n\t// consecutive lookups. If the first one fails we return no value and a\n\t// negative truth. Taken from github.com/hoisie/mustache.\n\tif name != \".\" && strings.Contains(name, \".\") {\n\t\tparts := strings.SplitN(name, \".\", 2)\n\t\tif value, ok := lookup(parts[0], context...); ok {\n\t\t\treturn lookup(parts[1], value)\n\t\t}\n\t\treturn nil, false\n\t}\n\t// Iterate over the context chain and try to match the name to a value.\n\tfor _, c := range context {\n\t\t// Reflect on the value of the current context.\n\t\treflectValue := reflect.ValueOf(c)\n\t\tresult, isTruth := lookupReflectValue(name, reflectValue)\n\t\tif result != nil {\n\t\t\treturn result, isTruth\n\t\t}\n\t\t// If by this point no value was matched, we'll move up a step in the\n\t\t// chain and try to match a value there.\n\t}\n\t// We've exhausted the whole context chain and found nothing. Return a nil\n\t// value and a negative truth.\n\treturn nil, false\n}", "func (r *UIRegistry) Lookup(name string) (webutil.DataSource, error) {\n\n\tr.rwmu.RLock()\n\tdefer r.rwmu.RUnlock()\n\n\tif ret, ok := r.reg[name]; ok {\n\t\treturn ret.DataSource, nil\n\t}\n\n\treturn nil, ErrNotFound\n}", "func (d *Dir) Lookup(ctx context.Context, name string) (fs.Node, error) {\n\t// fmt.Printf(\"doing lookup of dir at inode %d\\n\", d.inodeNum)\n\tvar offset uint64 = 0\n\ttableData, err := d.inode.readFromData(offset, d.inode.Size)\n\tif err != nil {\n\t\tfmt.Println(\"VERY BAD error doing readFromData from offset 0 in Lookup \" + err.Error())\n\t}\n\ttable := new(InodeTable)\n\ttable.UnmarshalBinary(tableData)\n\tinodeNum := table.Table[name]\n\tif inodeNum == 0 {\n\t\treturn nil, fuse.ENOENT\n\t} else {\n\t\tinode, err := getInode(inodeNum)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"VERY BAD error doing getInode on existing entry in Lookup: \" + err.Error())\n\t\t}\n\t\tvar child fs.Node\n\t\tif inode.IsDir == 1 {\n\t\t\tchild = &Dir{\n\t\t\t\tinode: inode,\n\t\t\t\tinodeNum: inodeNum,\n\t\t\t\tinodeStream: d.inodeStream,\n\t\t\t}\n\t\t} else {\n\t\t\tchild = &File{\n\t\t\t\tinode: inode,\n\t\t\t\tinodeNum: inodeNum,\n\t\t\t\tinodeStream: d.inodeStream,\n\t\t\t}\n\t\t}\n\t\treturn child, nil\n\t}\n}", "func (c *cache) Retrieve(key, dest string, mode os.FileMode) bool {\n\tsrc := c.path(key)\n\tif err := c.copier.LinkMode(src, dest, mode); err == nil {\n\t\treturn true\n\t} else if !os.IsNotExist(err) {\n\t\tlog.Warning(\"Failed to retrieve %s from cache: %s\", key, err)\n\t\treturn false\n\t}\n\treturn false\n}", "func (c *ldcache) Lookup(libPrefixes ...string) ([]string, []string) {\n\tc.logger.Debugf(\"Looking up %v in cache\", libPrefixes)\n\n\t// We define a functor to check whether a given library name matches any of the prefixes\n\tmatchesAnyPrefix := func(s string) bool {\n\t\tfor _, p := range libPrefixes {\n\t\t\tif strings.HasPrefix(s, p) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\treturn c.resolveSelected(matchesAnyPrefix)\n}", "func (bf NaiveBloomFilter) Lookup(entry string) (bool, error) {\n\thashes := hashEntry([]byte(entry), bf.hf)\n\tfor i := 0; i < bf.hf; i++ {\n\t\tlookup_idx := hashes[i] & (bf.size - 1)\n\t\tif exists, err := bf.getByte(lookup_idx); !exists {\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}\n\t}\n\treturn true, nil\n}", "func (a *addrBook) Lookup(addr p2pcrypto.PublicKey) (*node.Info, error) {\n\ta.mtx.Lock()\n\td := a.lookup(addr)\n\ta.mtx.Unlock()\n\tif d == nil {\n\t\t// Todo: just return empty without error ?\n\t\treturn nil, ErrLookupFailed\n\t}\n\treturn d.na, nil\n}", "func (t *trieNode) Lookup(path string) HandlerFuncs {\n\tfor _, i := range t.childs {\n\t\tif strings.HasPrefix(path, i.path) {\n\t\t\treturn HandlerFuncsCombine(t.vals, i.Lookup(path[len(i.path):]))\n\t\t}\n\t}\n\treturn t.vals\n}", "func Lookup(key string) (string, bool) { return os.LookupEnv(key) }", "func (h *Hosts) Lookup(host string) (ip net.IP) {\n\tif h == nil || host == \"\" {\n\t\treturn\n\t}\n\n\th.mux.RLock()\n\tdefer h.mux.RUnlock()\n\n\tfor _, h := range h.hosts {\n\t\tif h.Hostname == host {\n\t\t\tip = h.IP\n\t\t\tbreak\n\t\t}\n\t\tfor _, alias := range h.Aliases {\n\t\t\tif alias == host {\n\t\t\t\tip = h.IP\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif ip != nil && Debug {\n\t\tlog.Logf(\"[hosts] hit: %s %s\", host, ip.String())\n\t}\n\treturn\n}", "func (cf *CFilter) Lookup(item []byte) bool {\n\tf := fprint(item, cf.fpSize, cf.hashfn)\n\tj := hashfp(item) % cf.size\n\tk := (j ^ hashfp(f)) % cf.size\n\n\treturn cf.buckets[j].lookup(f) || cf.buckets[k].lookup(f)\n}", "func Lookup(configPath string, key string) ([]string, error) {\n\tout, _, err := checkedRun(exec.Command(\"sshd\", \"-T\", \"-f\", configPath))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to fetch effective config: %w\", err)\n\t}\n\n\t// sshd -T prints out lowercase options\n\tkey = strings.ToLower(key)\n\n\tlineRegexp := regexp.MustCompile(fmt.Sprintf(\"(?m)^%s (.*)$\", regexp.QuoteMeta(key)))\n\tvalues := lineRegexp.FindAllSubmatch(out, -1)\n\tret := make([]string, 0, len(values))\n\tfor _, value := range values {\n\t\tret = append(ret, string(value[1]))\n\t}\n\n\treturn ret, nil\n}", "func (t *Tree) Get(k []byte) ([]byte, bool) {\n\tif t.root == nil {\n\t\treturn nil, false\n\t}\n\treturn t.root.lookup(t, k)\n}", "func passthruLookup(name string) (string, bool) {\n\treturn \"\", false\n}", "func (n *node) Lookup(ctx context.Context, name string) (fspkg.Node, error) {\n\te, ok := n.te.LookupChild(name)\n\tif !ok {\n\t\treturn nil, syscall.ENOENT\n\t}\n\treturn &node{n.fs, e}, nil\n}" ]
[ "0.6632723", "0.6526417", "0.6502644", "0.6502644", "0.6448711", "0.6315723", "0.6277328", "0.6253912", "0.6245476", "0.6245476", "0.62231034", "0.6216919", "0.62092924", "0.6153425", "0.61027765", "0.6026127", "0.5986551", "0.59215796", "0.590627", "0.59048736", "0.5899805", "0.5864351", "0.584182", "0.58093536", "0.57999676", "0.5790596", "0.57542366", "0.5748702", "0.57421", "0.573438", "0.572914", "0.57217675", "0.57170194", "0.570233", "0.5701344", "0.5694191", "0.5689627", "0.5686498", "0.5679511", "0.5666625", "0.5665369", "0.5636752", "0.5633946", "0.5629517", "0.561042", "0.56100714", "0.55981755", "0.55961305", "0.55937165", "0.55909634", "0.5588873", "0.5579958", "0.5579237", "0.55689985", "0.55615747", "0.55557156", "0.5540026", "0.55290455", "0.5529007", "0.5516171", "0.5503529", "0.548569", "0.54795367", "0.54763424", "0.54752266", "0.54456234", "0.543484", "0.54253966", "0.5424694", "0.5422999", "0.5416094", "0.54077864", "0.5405564", "0.5401575", "0.5391069", "0.53824", "0.53783137", "0.53781563", "0.536635", "0.5362566", "0.53393525", "0.53381056", "0.533249", "0.53299534", "0.5327155", "0.5311993", "0.53061086", "0.5300892", "0.53003925", "0.5287179", "0.5282314", "0.52813387", "0.526579", "0.5250867", "0.5233717", "0.5223714", "0.5218517", "0.5217978", "0.5213142", "0.51981103" ]
0.72718596
0
Scan advances the scanner to the next entry (the first entry for a fresh scanner). It returns false when the scan stops with no more entries.
func (s *DirScanner) Scan() bool { if len(s.todo) == 0 { return false } s.path = s.todo[0] s.todo = s.todo[1:] return true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *scanner) Scan() bool {\n\tok := s.Scanner.Scan()\n\tif ok {\n\t\ts.line++\n\t}\n\treturn ok\n}", "func (sc *Scanner) Scan() bool {\n\tif sc.err != nil {\n\t\treturn false\n\t}\n\tfor {\n\t\trecord, err := sc.parseRecord()\n\t\tif err != nil {\n\t\t\tsc.err = errors.Wrap(err, \"stanza: Scan\")\n\t\t\treturn false\n\t\t}\n\t\tif record == nil {\n\t\t\tcontinue\n\t\t}\n\t\tsc.rec = record\n\t\treturn true\n\t}\n}", "func (r *Reader) Scan() bool {\n\treturn r.scan(true)\n}", "func (si *ScanIterator) Next() bool {\n\treturn si.idx <= len(si.tuples)-1\n}", "func (s *Scanner) Scan() bool {\n\tif s.err != nil {\n\t\treturn false\n\t}\n\tmsg, err := s.next()\n\tif err != nil {\n\t\ts.err = err\n\t\treturn false\n\t}\n\ts.message = msg\n\treturn true\n}", "func (sc *smallFlatTableScanner) Scan() bool {\n\tfor {\n\t\tif sc.cur == nil {\n\t\t\tnextOff := sc.curLimit\n\t\t\tsubTableIndex, subTableStart, subTableLimit, scanLimit := nextSubTable(sc.start, sc.limit, nextOff, sc.parent.cumSubTableLen)\n\t\t\tif subTableIndex < 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tsc.cur = sc.parent.subTables[subTableIndex].Scanner(\n\t\t\t\tsc.ctx,\n\t\t\t\tnextOff-subTableStart, scanLimit-subTableStart,\n\t\t\t\tsubTableLimit-subTableStart)\n\t\t\tsc.curLimit = scanLimit\n\t\t}\n\t\tif sc.cur.Scan() {\n\t\t\treturn true\n\t\t}\n\t\tsc.cur = nil\n\t}\n}", "func (s *Scanner) Scan() bool {\n\tif s.scanner != nil {\n\t\tb := s.scanner.Scan()\n\t\tif b {\n\t\t\tstr := s.scanner.Text()\n\t\t\tlower := str\n\t\t\tif s.caseInsensitive {\n\t\t\t\tlower = strings.ToLower(str)\n\t\t\t}\n\t\t\ts.token = &Token{str, lower, s.caseInsensitive}\n\t\t}\n\t\treturn b\n\t}\n\treturn false\n}", "func (g *GetCommits) Scan() bool {\n\treturn g.scanner.Scan()\n}", "func (r *Scanner) Scan() bool {\n\tif r.err != nil && r.err != ErrTimeout {\n\t\treturn false\n\t}\n\tfor {\n\t\tfileChanged, dirChanged := r.file.Watch(), r.journalDir.Watch()\n\t\tvar n int64\n\t\tn, r.err = r.message.ReadFrom(r.file)\n\t\tif r.err != nil {\n\t\t\t// rollback the reader\n\t\t\tif _, seekErr := r.file.Seek(-n, io.SeekCurrent); seekErr != nil {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\t// unexpected io error\n\t\t\tswitch r.err {\n\t\t\tcase io.EOF, io.ErrUnexpectedEOF:\n\t\t\tdefault:\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\t// not the last file, open the next journal file\n\t\t\tif !r.journalDir.IsLast(r.journalFile) {\n\t\t\t\tif r.err = r.reopenFile(); r.err != nil {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// the last file, wait for any changes\n\t\t\tvar timeoutChan <-chan time.Time\n\t\t\tif r.Timeout != 0 {\n\t\t\t\ttimeoutChan = time.After(r.Timeout)\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-dirChanged:\n\t\t\t\tif r.err = r.reopenFile(); r.err != nil {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\tcase <-fileChanged:\n\t\t\tcase <-timeoutChan:\n\t\t\t\tr.err = ErrTimeout\n\t\t\t\treturn false\n\t\t\tcase <-time.After(NotifyTimeout):\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\t// check offset\n\tif r.message.Offset != r.offset {\n\t\tr.err = &ScanOffsetError{\n\t\t\tFile: r.file.Name(),\n\t\t\tOffset: r.message.Offset,\n\t\t\tTimestamp: r.message.Timestamp,\n\t\t\tExpectedOffset: r.offset,\n\t\t}\n\t}\n\tif r.err != nil {\n\t\treturn false\n\t}\n\n\tr.offset = r.message.Offset + 1\n\treturn true\n}", "func (i *Iter) Scan(dest ...interface{}) bool {\n\treturn i.iter.Scan(dest...)\n}", "func (s *Scanner) Scan() bool {\n\tif s.err != nil {\n\t\treturn false\n\t}\n\n\tif s.p == nil {\n\t\terrs := make([]struct {\n\t\t\terr error\n\t\t\tpos pars.Position\n\t\t}, len(sequenceParsers))\n\t\tfor i, p := range sequenceParsers {\n\t\t\ts.s.Push()\n\t\t\ts.res, errs[i].err = p.Parse(s.s)\n\t\t\tif errs[i].err == nil {\n\t\t\t\ts.s.Drop()\n\t\t\t\ts.p = p\n\t\t\t\treturn true\n\t\t\t}\n\t\t\terrs[i].pos = s.s.Position()\n\t\t\ts.s.Pop()\n\t\t}\n\t\targmax := 0\n\t\tmaxpos := pars.Position{Line: 0, Byte: 0}\n\t\tfor i, v := range errs {\n\t\t\tif maxpos.Less(v.pos) {\n\t\t\t\targmax = i\n\t\t\t\tmaxpos = v.pos\n\t\t\t}\n\t\t}\n\t\ts.err = errs[argmax].err\n\t\treturn false\n\t}\n\n\ts.res, s.err = s.p.Parse(s.s)\n\treturn s.err == nil\n}", "func (c *Reader) Next() bool {\n\treturn c.Scanner.Scan()\n}", "func (ts *TokenScanner) hasNext() bool {\n\tif ts.i >= len(ts.tokens) {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (it *iterator) Next() bool {\n\t// If the iterator was not yet initialized, do it now\n\tif !it.inited {\n\t\tit.inited = true\n\t\treturn len(it.keys) > 0\n\t}\n\t// Iterator already initialize, advance it\n\tif len(it.keys) > 0 {\n\t\tit.keys = it.keys[1:]\n\t\tit.values = it.values[1:]\n\t}\n\treturn len(it.keys) > 0\n}", "func (it *iterator) Next() (ok bool) {\n\tit.next <- struct{}{}\n\tselect {\n\tcase it.err = <-it.errCh:\n\t\treturn false\n\tcase it.current, ok = <-it.items:\n\t}\n\treturn\n}", "func (di *Iterator) Next() bool {\n\tif !di.first {\n\t\tdi.iter.Next()\n\t}\n\tdi.first = false\n\treturn di.iter.Valid()\n}", "func (iter *SelectIter) Next() bool {\n\treturn iter.iterx.StructScan(iter.dest)\n}", "func (it *Iterator) Next() bool {\n\tit.n = it.n.next[0]\n\treturn it.n != nil\n}", "func (it *Iterator) Next() bool {\n\tit.n = it.n.next[0]\n\treturn it.n != nil\n}", "func (it *BytesScanner) HasNext() bool {\n\treturn it.index < len(it.source) || it.emitEmptySlice\n}", "func (sc *largeFlatTableScanner) Scan() bool {\n\tfor {\n\t\tif sc.cur == nil {\n\t\t\tif sc.srcSc == nil {\n\t\t\t\tnextOff := sc.curLimit\n\t\t\t\tsrcTableIndex, srcTableStart, srcTableLimit, scanLimit := nextSubTable(sc.start, sc.limit, nextOff, sc.parent.cumApproxSrcTableLen)\n\t\t\t\tif srcTableIndex < 0 {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tsc.srcSc = sc.parent.srcTables[srcTableIndex].Scanner(\n\t\t\t\t\tsc.ctx,\n\t\t\t\t\tnextOff-srcTableStart, scanLimit-srcTableStart,\n\t\t\t\t\tsrcTableLimit-srcTableStart)\n\t\t\t\tsc.curLimit = scanLimit\n\t\t\t}\n\t\t\tif !sc.srcSc.Scan() {\n\t\t\t\tsc.srcSc = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tval := sc.srcSc.Value()\n\t\t\tif val.Type() == StructType {\n\t\t\t\tst := val.Struct(sc.parent.ast)\n\t\t\t\tif st.Len() != 1 {\n\t\t\t\t\tPanicf(sc.parent.ast, \"flatten: subtable must contain exactly one column, but found %v\", val)\n\t\t\t\t}\n\t\t\t\tval = st.Field(0).Value\n\t\t\t}\n\t\t\tsc.cur = val.Table(sc.parent.ast).Scanner(sc.ctx, 0, 1, 1)\n\t\t}\n\t\tif sc.cur.Scan() {\n\t\t\treturn true\n\t\t}\n\t\tsc.cur = nil\n\t}\n}", "func (c *counter) next() bool {\n\thasNext := true\n\tif util.EqualsSliceUint(c.state, c.endState) {\n\t\thasNext = false\n\t} else {\n\t\tc.incrementState()\n\t}\n\n\treturn hasNext\n}", "func (r *Reader) ScanKey() bool {\n\treturn r.scan(false)\n}", "func (r *Row) Next() bool {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.s == nil {\n\t\treturn false\n\t}\n\tr.raw, r.err = r.s.DecodeBytes()\n\tr.s = nil\n\n\treturn r.err == nil && r.raw != nil\n}", "func (i *MapIterator) Next() bool {\n\ti.i++\n\treturn i.i <= i.l\n}", "func (d *Decoder) Scan() (ok bool) {\n if d.err != nil || !d.scanner.Scan() {\n return false\n }\n \n d.lineno++\n \n line := strings.Trim(d.scanner.Text(), \" \\t\\r\\n\")\n if len(line) == 0 {\n // try again\n return d.Scan()\n }\n \n if line[0] != ':' {\n d.err = fmt.Errorf(\"parse error at line %d: line does not begin with a colon\", d.lineno)\n return false\n }\n \n rec, err := DecodeRecordHex(line)\n if err != nil {\n d.err = fmt.Errorf(\"parse error at line %d: %s\", d.lineno, err.Error())\n return false\n } else {\n d.rec = rec\n return true\n }\n \n}", "func (es *LogEntScanner) Scan() bool {\n\tif es.err != nil {\n\t\treturn false\n\t}\n\n\tes.ent = LogEnt{\n\t\tattrs: make(map[string]string),\n\t}\n\n\tif es.next == nil {\n\t\t// scan \"commit SHA.*\" line\n\t\tfor es.scanner.Scan() {\n\t\t\tif m := commitPattern.FindSubmatch(es.scanner.Bytes()); m != nil {\n\t\t\t\tes.next = m\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif es.next == nil {\n\t\t\treturn false\n\t\t}\n\t}\n\tes.ent.commit = string(es.next[1])\n\n\treturn es.scanKeyVals()\n}", "func (t *Decoder) Next() bool {\n\tvar buf [1]byte\n\tif _, t.err = io.ReadFull(t.r, buf[:]); t.err != nil {\n\t\tif t.entry != nil && t.entry.Kind() == EntryEnd {\n\t\t\tt.err = nil\n\t\t}\n\t\treturn false\n\t}\n\n\tswitch buf[0] {\n\tcase EntryEnter:\n\t\tvar entry EnterEntry\n\t\tif t.err = entry.decode(t.r); t.err != nil {\n\t\t\treturn false\n\t\t}\n\t\tt.entry = &entry\n\tcase EntryLeave:\n\t\tt.entry = &LeaveEntry{}\n\tcase EntryInstruction:\n\t\tvar entry InstructionEntry\n\t\tif t.err = entry.decode(t.r); t.err != nil {\n\t\t\treturn false\n\t\t}\n\t\tt.entry = &entry\n\tcase EntryEnd:\n\t\tt.entry = &EndEntry{}\n\tdefault:\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (cur *cursor) hasNext() bool {\n\treturn cur.idx < int(cur.nd.count)-1\n}", "func (i *Iterator) Next() bool {\n\ti.n = i.n.atomicLoadNext(0)\n\treturn i.n != nil\n}", "func (s *Scanner) Scan() bool {\n\tif s.done {\n\t\treturn false\n\t}\n\tvar err error\n\tvar rest []byte\n\tvar strings []string\n\tvar n int\n\tfor {\n\t\tif s.err != nil && s.err != io.EOF {\n\t\t\treturn false\n\t\t}\n\t\tif len(s.buf) > 0 {\n\t\t\t// try to parse what we have in buf\n\t\t\trest, strings, err = ExtractStrings(s.buf)\n\t\t\tif err == nil {\n\t\t\t\tif len(strings) > 0 {\n\t\t\t\t\t// we got a log line\n\t\t\t\t\ts.strings = strings\n\t\t\t\t\ts.buf = rest\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t\t// there was no content that could be extracted\n\t\t\t\t// so we need more data, just get rid of the useless spaces\n\t\t\t\ts.buf = rest\n\t\t\t} else if err != ErrNoEndline && err != ErrQuoteLeftOpen {\n\t\t\t\t// parsing error\n\t\t\t\ts.err = err\n\t\t\t\treturn false\n\t\t\t} else if s.err == io.EOF && err == ErrNoEndline {\n\t\t\t\t// there is no more available data to read\n\t\t\t\t// just output the last content\n\t\t\t\tif len(strings) > 0 {\n\t\t\t\t\ts.strings = strings\n\t\t\t\t\ts.buf = rest\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t\treturn false\n\t\t\t} else if s.err == io.EOF && err == ErrQuoteLeftOpen {\n\t\t\t\t// there is no more available data to read\n\t\t\t\t// but the last content is not valid\n\t\t\t\ts.err = err\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// here, at the end of the if/elseif, we know that err is a\n\t\t\t// \"incomplete line\" error, and that we can try to read more data\n\t\t}\n\t\t// there was not enough data to generate new content\n\t\tif s.err != nil {\n\t\t\treturn false\n\t\t}\n\t\t// if there is no more space on the right side of s.buf, or if there is\n\t\t// much space on the left side of s.buf, then copy the data to the\n\t\t// beginning of s.origbuf\n\t\tif cap(s.buf) < 65536 && (len(s.buf) == cap(s.buf) || cap(s.buf) < 32768) {\n\t\t\tcopy(s.origbuf[:len(s.buf)], s.buf)\n\t\t\ts.buf = s.origbuf[:len(s.buf)]\n\t\t}\n\t\tif len(s.buf) == 65536 {\n\t\t\t// the line to parse is too long\n\t\t\ts.err = bufio.ErrTooLong\n\t\t\treturn false\n\t\t}\n\t\t// read some more data into the free space on the right side of s.buf\n\t\tn, err = s.reader.Read(s.buf[len(s.buf):cap(s.buf)])\n\t\tif err == io.EOF {\n\t\t\tif s.err == nil {\n\t\t\t\ts.err = io.EOF\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\ts.err = err\n\t\t\treturn false\n\t\t} else if n == 0 {\n\t\t\t// err == nil but n == 0\n\t\t\ts.err = io.ErrNoProgress\n\t\t\treturn false\n\t\t}\n\t\ts.buf = s.buf[:len(s.buf)+n]\n\t}\n}", "func (r *rows) Next() bool {\n\treturn r.rows.Next()\n}", "func (result *BufferedResult) HasNext() bool {\n\treturn result.index < len(result.values)\n}", "func (iter *Iterator) Next() bool { return iter.impl.Next() }", "func (i *Iterator) Next() (bool, error) {\n\treturn false, errors.New(\"next failure\")\n}", "func (self *Cursor) HasNext() bool {\n\tif self.curr != nil {\n\t\treturn true\n\t}\n\tfor {\n\t\tif self.stack.len() == 0 || self.opts.Limit == 0 {\n\t\t\treturn false\n\t\t}\n\t\tself.curr = self.stack.pop()\n\t\tself.stack.push(self.curr.next[1&^int(self.opts.Dir)])\n\t\tself.stack.push(self.curr.next[int(self.opts.Dir)])\n\t\tif self.curr.leaf != nil {\n\t\t\tself.opts.Limit--\n\t\t\treturn true\n\t\t}\n\t}\n}", "func (r *linesIterator) Next(line *string) bool {\n\n\tfor r.scanner.Scan() {\n\t\t*line = r.scanner.Text()\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (iter *TypesIterator) Next() bool {\n\tif len(iter.spec.types) <= iter.index {\n\t\treturn false\n\t}\n\n\titer.Type = iter.spec.types[iter.index]\n\titer.index++\n\treturn true\n}", "func (i *IndexIterator) Next() bool {\n\tfor i.err == nil && i.steps < i.nslots {\n\t\tslot, err := i.readCurrent()\n\n\t\tif err != nil {\n\t\t\ti.err = err\n\t\t\tbreak\n\t\t}\n\n\t\tif slot.lpos == 0 {\n\t\t\ti.steps = i.nslots\n\t\t\tbreak\n\t\t}\n\n\t\ti.steps++\n\t\tif i.cursor++; i.cursor == i.nslots {\n\t\t\ti.cursor = 0\n\t\t}\n\n\t\tif slot.cksum == i.cksum {\n\t\t\ti.current = slot\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (s *sensorPackage) Next() bool {\n\ts.currentIndex++\n\tif s.currentIndex < s.length {\n\t\treturn true\n\t}\n\ts.currentIndex = 0\n\treturn false\n}", "func (iterator *Iterator) Next() bool {\n\treturn iterator.iterator.Next()\n}", "func (iter *SyncFolderIterator) Next() bool {\n\treturn len(iter.fileInfos) > 0\n}", "func (t *Tail) Scan() bool {\n\tvar err error\n\tif t.done {\n\t\treturn false\n\t}\n\tif t.init {\n\t\t// there is no pos file Start reading from the end of the file\n\t\tif t.InitialReadPositionEnd && t.isCreatePosFile {\n\t\t\tt.fileFd.Seek(0, os.SEEK_END)\n\t\t}\n\t\tt.data = make(chan []byte, 1)\n\t\tt.scanner = bufio.NewScanner(t.fileFd)\n\t\tt.init = false\n\t}\n\n\tfor {\n\t\tif t.scanner.Scan() {\n\t\t\tt.data <- t.scanner.Bytes()\n\t\t\treturn true\n\t\t}\n\n\t\tif err := t.scanner.Err(); err != nil {\n\t\t\tt.err = err\n\t\t\treturn false\n\t\t}\n\n\t\tt.Stat.Offset, err = t.fileFd.Seek(0, os.SEEK_CUR)\n\t\tif err != nil {\n\t\t\tt.err = err\n\t\t\treturn false\n\t\t}\n\n\t\tfd, err := os.Open(t.file)\n\t\tif os.IsNotExist(err) {\n\t\t\ttime.Sleep(time.Millisecond * 10)\n\t\t} else if err != nil {\n\t\t\tt.err = err\n\t\t\treturn false\n\t\t}\n\t\tfdStat, err := fd.Stat()\n\t\tif err != nil {\n\t\t\tt.err = err\n\t\t\treturn false\n\t\t}\n\t\tstat := fdStat.Sys().(*syscall.Stat_t)\n\t\tif stat.Ino != t.Stat.Inode {\n\t\t\tt.Stat.Inode = stat.Ino\n\t\t\tt.Stat.Offset = 0\n\t\t\tt.Stat.Size = stat.Size\n\t\t\tt.fileFd.Close()\n\t\t\tt.fileFd = fd\n\t\t} else {\n\t\t\tif stat.Size < t.Stat.Size {\n\t\t\t\tt.fileFd.Seek(0, os.SEEK_SET)\n\t\t\t}\n\t\t\tt.Stat.Size = stat.Size\n\t\t\ttime.Sleep(time.Millisecond * 10)\n\t\t\tfd.Close()\n\t\t}\n\t\tt.scanner = bufio.NewScanner(t.fileFd)\n\n\t\terr = posUpdate(t)\n\t\tif err != nil {\n\t\t\tt.err = err\n\t\t\treturn false\n\t\t}\n\t}\n}", "func (iter *dbCacheIterator) Next() bool {\n\t// Nothing to return if cursor is exhausted.\n\tif iter.currentIter == nil {\n\t\treturn false\n\t}\n\n\t// Move the current iterator to the next entry and choose the iterator\n\t// that is both valid and has the smaller key.\n\titer.currentIter.Next()\n\treturn iter.chooseIterator(true)\n}", "func (r *Result) Next() (more bool) {\n\tif r.conn == nil {\n\t\treturn false\n\t}\n\tswitch r.conn.Status() {\n\tcase StatusResultDone:\n\t\treturn false\n\t}\n\treturn !r.val.eof\n}", "func (m *MssqlRowReader) Next() bool {\n\tif m.NextFunc != nil {\n\t\treturn m.NextFunc()\n\t}\n\tnext := m.Cursor.Next()\n\tif next {\n\t\tcolumnNames, err := m.Cursor.Columns()\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tm.columns = make([]interface{}, len(columnNames))\n\t\tcolumnPointers := make([]interface{}, len(columnNames))\n\t\tfor i := 0; i < len(columnNames); i++ {\n\t\t\tcolumnPointers[i] = &m.columns[i]\n\t\t}\n\t\tif err := m.Cursor.Scan(columnPointers...); err != nil {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn next\n}", "func (s *QualReader) HasNext() bool {\n var err error\n\n // Grab next two lines, verify, and return false if any errors occur.\n\n s.next[0], err = s.reader.ReadBytes('\\n')\n if err != nil || len(s.next[0]) == 0 || s.next[0][0] != '>' {\n return false\n }\n\n s.next[1], err = s.reader.ReadBytes('\\n')\n if (err != nil && err != io.EOF) || len(s.next[1]) == 0 {\n return false\n }\n\n // Sanitize newlines\n s.next[0] = bytes.Trim(s.next[0], \"\\n\")\n s.next[1] = bytes.Trim(s.next[1], \"\\n\")\n\n return true\n}", "func (s Scanner) match(expected byte) bool {\n\tif s.reachedEnd() {\n\t\treturn false\n\t}\n\n\tif s.peek() == expected {\n\t\ts.current++\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (r *walReader) next() bool {\n\tif r.cur >= len(r.wal.files) {\n\t\treturn false\n\t}\n\tcf := r.wal.files[r.cur]\n\n\t// Save position after last valid entry if we have to truncate the WAL.\n\tlastOffset, err := cf.Seek(0, os.SEEK_CUR)\n\tif err != nil {\n\t\tr.err = err\n\t\treturn false\n\t}\n\n\tet, flag, b, err := r.entry(cf)\n\t// If we reached the end of the reader, advance to the next one\n\t// and close.\n\t// Do not close on the last one as it will still be appended to.\n\tif err == io.EOF {\n\t\tif r.cur == len(r.wal.files)-1 {\n\t\t\treturn false\n\t\t}\n\t\t// Current reader completed, close and move to the next one.\n\t\tif err := cf.Close(); err != nil {\n\t\t\tr.err = err\n\t\t\treturn false\n\t\t}\n\t\tr.cur++\n\t\treturn r.next()\n\t}\n\tif err != nil {\n\t\tr.err = err\n\n\t\tif _, ok := err.(walCorruptionErr); ok {\n\t\t\tr.err = r.truncate(lastOffset)\n\t\t}\n\t\treturn false\n\t}\n\n\tr.curType = et\n\tr.curFlag = flag\n\tr.curBuf = b\n\treturn r.err == nil\n}", "func (i *ResolvedProductIter) Next() bool {\n\tif i.first && i.err != nil {\n\t\ti.first = false\n\t\treturn true\n\t}\n\ti.first = false\n\ti.i++\n\treturn i.i < len(i.page)\n}", "func Next(it Iterator) bool {\n\treturn neogointernal.Syscall1(\"System.Iterator.Next\", it).(bool)\n}", "func (r *Rows) Next() bool {\n\treturn r.c.Next(context.TODO())\n}", "func (it *KVResultsIter) HasNext() bool {\n\tqueryResult, err := it.it.Next()\n\tif err != nil {\n\t\t// Save the error and return true. The caller will get the error when Next is called.\n\t\tit.nextErr = err\n\t\treturn true\n\t}\n\tif queryResult == nil {\n\t\treturn false\n\t}\n\tit.next = queryResult\n\treturn true\n}", "func (i *StringIterator) Next() bool {\n\ti.i++\n\treturn i.i <= i.l\n}", "func (s *IdeaScanner) Scan() bool {\n\tsbuf := bytes.NewBuffer(make([]byte, 0, 4096))\n\n\t// Scan the Header\n\tif s.nextHeader != nil {\n\t\t_, err := sbuf.Write(s.nextHeader)\n\t\tif err != nil {\n\t\t\ts.lastError = err\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tfor s.scanner.Scan() {\n\t\t\tline := s.scanner.Text()\n\n\t\t\tif i := strings.Index(line, \"## [\"); i >= 0 {\n\t\t\t\t_, err := sbuf.Write(s.scanner.Bytes())\n\t\t\t\tif err != nil {\n\t\t\t\t\ts.lastError = err\n\t\t\t\t\treturn false\n\t\t\t\t}\n\n\t\t\t\tgoto scanBody\n\t\t\t}\n\t\t}\n\n\t\t// Scanned all the lines and didn't find a header\n\t\ts.lastError = s.scanner.Err()\n\t\treturn false\n\t}\n\nscanBody:\n\ts.nextHeader = nil\n\n\tfor s.scanner.Scan() {\n\t\tline := s.scanner.Text()\n\n\t\t// Look for the start of another Idea\n\t\tif i := strings.Index(line, \"## [\"); i >= 0 {\n\t\t\ts.nextHeader = s.scanner.Bytes()\n\t\t\tbreak\n\t\t}\n\n\t\t// Look for the ClosedAt timestamp\n\t\tif _, err := time.Parse(time.UnixDate, line[:len(line)-1]); err == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Add this line to the body\n\t\tif _, err := sbuf.Write(s.scanner.Bytes()); err != nil {\n\t\t\ts.lastError = err\n\t\t\treturn false\n\t\t}\n\t}\n\n\tpbuf := bufio.NewReader(bytes.NewReader(sbuf.Bytes()))\n\n\t// Grab the complete header line as a string\n\tlineBytes, err := pbuf.ReadBytes('\\n')\n\tif err != nil {\n\t\ts.lastError = err\n\t\treturn false\n\t}\n\tline := string(lineBytes)\n\n\t// Parse the Status, Id, Name\n\tstatus, id, name, err := parseHeader(line)\n\tif err != nil {\n\t\ts.lastError = err\n\t\treturn false\n\t}\n\n\t// Parse the Body\n\tbodyBytes, err := ioutil.ReadAll(pbuf)\n\tif err != nil {\n\t\ts.lastError = err\n\t\treturn false\n\t}\n\n\ts.lastIdea = &Idea{\n\t\tStatus: status,\n\t\tId: id,\n\t\tName: name,\n\t\tBody: string(bytes.TrimSpace(bodyBytes)) + \"\\n\",\n\t}\n\n\treturn true\n}", "func (s *Scanner) Scan() (Entry, error) {\n\tblock := make(Entry)\n\n\tfor {\n\t\tch, err := s.peek()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if unicode.IsSpace(rune(ch)) {\n\t\t\treturn block, s.scanEmptyLine()\n\t\t}\n\n\t\tkey, value, err := s.scanPair()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tblock[key] = value\n\t}\n}", "func (iter *CouchDBMockStateRangeQueryIterator) HasNext() bool {\n\tif iter.Closed {\n\t\t// previously called Close()\n\t\treturn false\n\t}\n\tif iter.CurrentIndex >= len(iter.QueryResults) {\n\t\treturn false\n\t}\n\treturn true\n}", "func (j *IterableJob) Next() bool {\n\tif !j.finished {\n\t\tj.lock.Lock()\n\t\tcurr := j.pos\n\t\tj.pos++\n\t\tj.lock.Unlock()\n\t\tj.finished = !j.theFunc(curr)\n\t}\n\treturn !j.finished\n}", "func (it *ScriptingReturnValueIterator) Next() bool {\n\tT().Debugf(\"%d return values to iterate\", len(it.values.values))\n\tit.inx++\n\tif it.inx < len(it.values.values) {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}", "func (mci *XMCacheIterator) Next() bool {\n\tif mci.dir == dirEOI || mci.err != nil {\n\t\treturn false\n\t} else if mci.dir == dirReleased {\n\t\tmci.err = iterator.ErrIterReleased\n\t\treturn false\n\t}\n\n\tswitch mci.dir {\n\tcase dirSOI:\n\t\treturn mci.First()\n\t}\n\n\tif !mci.setMciKeys(mci.index, setTypeNext) {\n\t\treturn false\n\t}\n\treturn mci.next()\n}", "func (s *Scanner) Scan() (result Token, err error) {\n\tif s.tokenQueue.Len() == 0 && s.ended {\n\t\treturn\n\t}\n\tif err = s.prepare(); err != nil {\n\t\treturn\n\t}\n\telem := s.tokenQueue.Front()\n\tresult = elem.Value.(Token)\n\ts.tokenQueue.Remove(elem)\n\ts.parsedCount++\n\treturn\n}", "func (iter *FlatIterator) Next() bool {\n\tif len(iter.db.buff) == 0 && !iter.eof {\n\t\tif err := iter.db.readChunk(); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\titer.eof = true\n\t\t\t\treturn false\n\t\t\t} else {\n\t\t\t\titer.err = err\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\tvar offset int\n\tx, n := binary.Uvarint(iter.db.buff)\n\toffset += n\n\tif n <= 0 {\n\t\treturn false\n\t}\n\tkey := iter.db.buff[offset : offset+int(x)]\n\toffset += int(x)\n\tx, n = binary.Uvarint(iter.db.buff[offset:])\n\toffset += n\n\tif n <= 0 {\n\t\treturn false\n\t}\n\tval := iter.db.buff[offset : offset+int(x)]\n\toffset += int(x)\n\n\titer.key = key\n\titer.val = val\n\titer.db.buff = iter.db.buff[offset:]\n\treturn true\n}", "func (tr *Tree) Scan(iter func(cell store_pb.RecordID, data unsafe.Pointer, extra uint64) bool) {\n\tif tr.root == nil {\n\t\treturn\n\t}\n\ttr.scan(tr.root, iter)\n}", "func (i *Iter) Next() (b bool) {\n\tif len(i.v) == 0 {\n\t\treturn false\n\t}\n\n\ti.c, i.v = i.v[0], i.v[1:]\n\treturn len(i.v) >= 0\n}", "func (it *FilteringInt32Iterator) HasNext() bool {\n\tif it.hasNext {\n\t\treturn true\n\t}\n\tfor it.preparedInt32Item.HasNext() {\n\t\tnext := it.base.Next()\n\t\tisFilterPassed, err := it.filter.Check(next)\n\t\tif err != nil {\n\t\t\tif !isEndOfInt32Iterator(err) {\n\t\t\t\terr = errors.Wrap(err, \"filtering iterator: check\")\n\t\t\t}\n\t\t\tit.err = err\n\t\t\treturn false\n\t\t}\n\n\t\tif !isFilterPassed {\n\t\t\tcontinue\n\t\t}\n\n\t\tit.hasNext = true\n\t\tit.next = next\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (i *ArrayIterator) Next() bool {\n\ti.i++\n\treturn i.i <= i.l\n}", "func (D DiskIterator) HasNext() bool {\n\treturn D.recordInx > 0\n}", "func (iter *MockStateRangeQueryIterator) HasNext() bool {\n\tif iter.Closed {\n\t\t// previously called Close()\n\t\treturn false\n\t}\n\n\tif iter.Current == nil {\n\t\treturn false\n\t}\n\n\tcurrent := iter.Current\n\tfor current != nil {\n\t\t// if this is an open-ended query for all keys, return true\n\t\tif iter.StartKey == \"\" && iter.EndKey == \"\" {\n\t\t\treturn true\n\t\t}\n\t\tcomp1 := strings.Compare(current.Value.(string), iter.StartKey)\n\t\tcomp2 := strings.Compare(current.Value.(string), iter.EndKey)\n\t\tif comp1 >= 0 {\n\t\t\tif comp2 < 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t\tcurrent = current.Next()\n\t}\n\treturn false\n}", "func (cca *cookedSyncCmdArgs) scanningComplete() bool {\n\treturn atomic.LoadUint32(&cca.atomicScanningStatus) > 0\n}", "func (c *ViewSchema) NextRow() bool {\n\tif c.rowNum >= len(c.rows)-1 {\n\t\tc.done = true\n\t}\n\tc.rowNum = c.rowNum + 1\n\treturn !c.done\n}", "func (it *eventsIterator) Next() bool {\n\tfor {\n\t\tif ok := it.inner.Next(); !ok {\n\t\t\treturn false\n\t\t}\n\n\t\tevent, err := it.keys.KeyToEvent(it.inner.Key())\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Error(\"could not iterate over events: corrupted database state\")\n\t\t\treturn false\n\t\t}\n\n\t\tif event.GetTopic().GetId() == it.topic {\n\t\t\treturn true\n\t\t}\n\t}\n}", "func (this *Database) MaybeScan(row Row, vars ...interface{}) bool {\n\terr := row.Scan(vars...)\n\tif err == sql.ErrNoRows {\n\t\treturn false\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn true\n}", "func (this *Database) MaybeScan(row Row, vars ...interface{}) bool {\n\terr := row.Scan(vars...)\n\tif err == sql.ErrNoRows {\n\t\treturn false\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn true\n}", "func (qr *queryResult) HasNextResultSet() bool { return false }", "func (lx *Lexer) hasNextRune() bool {\n\treturn lx.unicodeQueue.Len() > 0 || lx.Scanner.HasNext()\n}", "func (i *iterator) HasNext() bool {\n\treturn i.index < len(i.sortArr) && i.sortArr[i.index] == i.currentSequence\n}", "func (a *arguments) Scan() bool {\n\tif a.err != nil {\n\t\treturn false\n\t}\n\n\ts, err := a.reader.ReadString('\\n')\n\tif err != nil {\n\t\ta.err = err\n\t\tif err == io.EOF && len(s) > 0 {\n\t\t\ta.argument = s\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tl := len(s)\n\tif l >= 2 && s[l-2] == '\\r' {\n\t\ta.argument = s[:l-2]\n\t} else {\n\t\ta.argument = s[:l-1]\n\t}\n\treturn true\n}", "func (c *FunctionSchema) NextRow() bool {\n\tif c.rowNum >= len(c.rows)-1 {\n\t\tc.done = true\n\t}\n\tc.rowNum = c.rowNum + 1\n\treturn !c.done\n}", "func (set *ContentTypeSet) Next() bool {\n\tif set == nil {\n\t\treturn false\n\t}\n\tset.pos++\n\treturn set.pos < len(set.set)\n}", "func (it *EnumFilteringInt32Iterator) HasNext() bool {\n\tif it.hasNext {\n\t\treturn true\n\t}\n\tfor it.preparedInt32Item.HasNext() {\n\t\tnext := it.base.Next()\n\t\tisFilterPassed, err := it.filter.Check(it.count, next)\n\t\tif err != nil {\n\t\t\tif !isEndOfInt32Iterator(err) {\n\t\t\t\terr = errors.Wrap(err, \"enum filtering iterator: check\")\n\t\t\t}\n\t\t\tit.err = err\n\t\t\treturn false\n\t\t}\n\t\tit.count++\n\n\t\tif !isFilterPassed {\n\t\t\tcontinue\n\t\t}\n\n\t\tit.hasNext = true\n\t\tit.next = next\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (i *Iterator) Next() bool {\n\treturn i.i.Next()\n}", "func (rs *rowSets) HasNextResultSet() bool {\n\treturn rs.pos+1 < len(rs.sets)\n}", "func (r *hashRange) Next() bool {\n\tnext, overflow := new(uint256.Int).AddOverflow(r.current, r.step)\n\tif overflow {\n\t\treturn false\n\t}\n\tr.current = next\n\treturn true\n}", "func (db *calcDatabase) next() bool {\n\tmatched, rows := false, len(db.database)\n\tfor !matched && db.row < rows {\n\t\tif db.row++; db.row < rows {\n\t\t\tmatched = db.criteriaEval()\n\t\t}\n\t}\n\treturn matched\n}", "func (qi *QueryIterator) HasNext() bool {\n\treturn qi.processedRows < qi.totalRows\n}", "func (r *Reader) Next() bool {\n\tif r.err != nil || r.finished {\n\t\treturn false\n\t}\n\n\tdefer func() {\n\t\tif r.err != nil {\n\t\t\tr.Close()\n\t\t}\n\t}()\n\n\t// skip newlines between eleemnts\n\tt, err := getTokenIgnoreCharData(r.d)\n\tif err != nil {\n\t\tr.err = err\n\t\treturn false\n\t}\n\n\tif isEndElement(t, \"row\") {\n\t\tt, r.err = getTokenIgnoreCharData(r.d)\n\t\tif r.err != nil {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif isEndElement(t, r.typ) {\n\t\tr.Close()\n\t\treturn false\n\t}\n\n\tif !isStartElement(t, \"row\") {\n\t\tr.err = fmt.Errorf(\"unexpected token: %#v, wanted xml.StartElement 'row'\", t)\n\t\treturn false\n\t}\n\tswitch r.typ {\n\tcase typeBadges:\n\t\tr.err = decodeBadgeRow(t, &r.Badge)\n\tcase typeComments:\n\t\tr.err = decodeCommentRow(t, &r.Comment)\n\tcase typePosts:\n\t\tr.err = decodePostRow(t, &r.Post)\n\tcase typePostHistory:\n\t\tr.err = decodePostHistoryRow(t, &r.PostHistory)\n\tcase typePostLinks:\n\t\tr.err = decodePostLinkRow(t, &r.PostLink)\n\tcase typeTags:\n\t\tr.err = decodeTagRow(t, &r.Tag)\n\tcase typeUsers:\n\t\tr.err = decodeUserRow(t, &r.User)\n\tcase typeVotes:\n\t\tr.err = decodeVoteRow(t, &r.Vote)\n\t}\n\tif r.err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (it *EnumHandlingInt32Iterator) HasNext() bool {\n\tif it.hasNext {\n\t\treturn true\n\t}\n\tif it.preparedInt32Item.HasNext() {\n\t\tnext := it.base.Next()\n\t\terr := it.handler.Handle(it.count, next)\n\t\tif err != nil {\n\t\t\tif !isEndOfInt32Iterator(err) {\n\t\t\t\terr = errors.Wrap(err, \"enum handling iterator: check\")\n\t\t\t}\n\t\t\tit.err = err\n\t\t\treturn false\n\t\t}\n\t\tit.count++\n\n\t\tit.hasNext = true\n\t\tit.next = next\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (result *Result) HasNext() bool {\n\treturn result.index < len(result.pageValues) || result.pageToken != nil\n}", "func (it *HashSetIterator) HasNext() bool {\n\tif it.index+1 < len(it.values) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (batcher *UploadObjectsIterator) Next() bool {\n\tif batcher.inc {\n\t\tbatcher.index++\n\t} else {\n\t\tbatcher.inc = true\n\t}\n\treturn batcher.index < len(batcher.Objects)\n}", "func (r *Reader) NextRow() bool {\n\tif r.cols == nil || r.err != nil {\n\t\treturn false\n\t}\n\trow, err := r.readRow()\n\tr.row = row\n\tif row == nil {\n\t\tr.err = err\n\t\tr.cols = nil\n\t\treturn false\n\t}\n\treturn true\n}", "func (it *HandlingInt32Iterator) HasNext() bool {\n\tif it.hasNext {\n\t\treturn true\n\t}\n\tif it.preparedInt32Item.HasNext() {\n\t\tnext := it.base.Next()\n\t\terr := it.handler.Handle(next)\n\t\tif err != nil {\n\t\t\tif !isEndOfInt32Iterator(err) {\n\t\t\t\terr = errors.Wrap(err, \"handling iterator: check\")\n\t\t\t}\n\t\t\tit.err = err\n\t\t\treturn false\n\t\t}\n\n\t\tit.hasNext = true\n\t\tit.next = next\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Scan(after Job, fn func(job Job, jobFn func()) bool) (ok bool) {\n\treturn DefaultScheduler.Scan(after, fn)\n}", "func (s *Scheduler) Scan(\n\tafter Job,\n\tfn func(job Job, jobFn func()) bool,\n) (ok bool) {\n\ts.lock.RLock()\n\tdefer s.lock.RUnlock()\n\n\treturn s.queue.Scan(\n\t\tksuid.KSUID(after),\n\t\tfunc(id ksuid.KSUID, job func()) bool {\n\t\t\treturn fn(Job(id), job)\n\t\t},\n\t)\n}", "func (iter *Iterator) HasNext() bool {\n\thasNext := false\n\tif iter.index < iter.list.size {\n\t\titer.index++\n\t\thasNext = true\n\t}\n\tif iter.index == 1 {\n\t\titer.currentNode = iter.list.head\n\t} else {\n\t\titer.currentNode = iter.currentNode.next\n\t}\n\treturn hasNext\n}", "func (cr *callResult) HasNextResultSet() bool { return false }", "func (it *ConvertingInt32Iterator) HasNext() bool {\n\tif it.hasNext {\n\t\treturn true\n\t}\n\tif it.preparedInt32Item.HasNext() {\n\t\tnext := it.base.Next()\n\t\tnext, err := it.converter.Convert(next)\n\t\tif err != nil {\n\t\t\tif !isEndOfInt32Iterator(err) {\n\t\t\t\terr = errors.Wrap(err, \"converting iterator: check\")\n\t\t\t}\n\t\t\tit.err = err\n\t\t\treturn false\n\t\t}\n\n\t\tit.hasNext = true\n\t\tit.next = next\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (iter *DeleteObjectsIterator) Next() bool {\n\tif iter.inc {\n\t\titer.index++\n\t} else {\n\t\titer.inc = true\n\t}\n\treturn iter.index < len(iter.Objects)\n}", "func (it *EnumConvertingInt32Iterator) HasNext() bool {\n\tif it.hasNext {\n\t\treturn true\n\t}\n\tif it.preparedInt32Item.HasNext() {\n\t\tnext := it.base.Next()\n\t\tnext, err := it.converter.Convert(it.count, next)\n\t\tif err != nil {\n\t\t\tif !isEndOfInt32Iterator(err) {\n\t\t\t\terr = errors.Wrap(err, \"converting iterator: check\")\n\t\t\t}\n\t\t\tit.err = err\n\t\t\treturn false\n\t\t}\n\t\tit.count++\n\n\t\tit.hasNext = true\n\t\tit.next = next\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (it *EnumDoingUntilInt32Iterator) HasNext() bool {\n\tif it.hasNext {\n\t\treturn true\n\t}\n\tfor it.preparedInt32Item.HasNext() {\n\t\tnext := it.base.Next()\n\t\tisUntilPassed, err := it.until.Check(it.count, next)\n\t\tif err != nil {\n\t\t\tif !isEndOfInt32Iterator(err) {\n\t\t\t\terr = errors.Wrap(err, \"doing until iterator: until\")\n\t\t\t}\n\t\t\tit.err = err\n\t\t\treturn false\n\t\t}\n\t\tit.count++\n\n\t\tif isUntilPassed {\n\t\t\tit.err = EndOfInt32Iterator\n\t\t}\n\n\t\tit.hasNext = true\n\t\tit.next = next\n\t\treturn true\n\t}\n\n\treturn false\n}" ]
[ "0.75100935", "0.73737305", "0.70878965", "0.67532665", "0.6742028", "0.662504", "0.6594801", "0.6585331", "0.6568856", "0.65153414", "0.6491675", "0.64756805", "0.6306259", "0.6269406", "0.6056796", "0.60155344", "0.598887", "0.5958128", "0.5958128", "0.5942838", "0.59334344", "0.59284574", "0.59059936", "0.5873167", "0.58452344", "0.5839931", "0.58238107", "0.5807261", "0.57827455", "0.57657003", "0.57645494", "0.5753485", "0.57411796", "0.57357836", "0.5734297", "0.56931436", "0.5683271", "0.5682269", "0.56820256", "0.5676033", "0.56594557", "0.5651128", "0.563654", "0.56328875", "0.5624626", "0.5621126", "0.5617486", "0.5616038", "0.56087583", "0.5601322", "0.55996734", "0.55765784", "0.5576064", "0.55726516", "0.55645734", "0.5562726", "0.55579215", "0.5555068", "0.55365914", "0.5515038", "0.55060476", "0.5499774", "0.549345", "0.5492214", "0.5491064", "0.5486088", "0.54836434", "0.5460191", "0.54490036", "0.54456544", "0.5443583", "0.5439993", "0.5439993", "0.5437683", "0.54274905", "0.5416676", "0.541666", "0.54144293", "0.54140013", "0.5404923", "0.54037964", "0.53876805", "0.5379629", "0.5378911", "0.53740364", "0.53727496", "0.5367797", "0.5351169", "0.53478223", "0.53454417", "0.5343358", "0.53429675", "0.53383493", "0.5329829", "0.5318971", "0.52940744", "0.52852005", "0.5284776", "0.5277705", "0.5277323" ]
0.6287024
13
Path returns the path of the currently scanned entry.
func (s *DirScanner) Path() string { return s.path }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (e *entry) Path() string {\n\treturn e.path\n}", "func (fi *fileInfo) Path() string {\n\treturn fi.fullPath\n}", "func Path() string {\n\treturn c.Path\n}", "func (r *readerWithStats) Path() string {\n\treturn r.r.Path()\n}", "func (i *Instance) Path() (string, error) {\n\tref, _, _, err := i.GetAsAny(WmiPathKey)\n\treturn ref.(string), err\n}", "func (i *Image) Path() string {\n\treturn i.p\n}", "func (i *Index) Path() string { return i.path }", "func (d *InfoOutput) Path() string {\n\tval := d.reply[\"path\"]\n\n\treturn val.(string)\n\n}", "func (i *Inode) Path() string {\n\treturn i.path\n}", "func (f *File) Path() string {\n\treturn \"/\" + f.key\n}", "func (t *TreeEntry) GetPath() string {\n\tif t == nil || t.Path == nil {\n\t\treturn \"\"\n\t}\n\treturn *t.Path\n}", "func (tb *Table) Path() string {\n\treturn tb.path\n}", "func (c *FileConfigReader) Path() string {\n\treturn c.path\n}", "func (me *Image) Path() string {\n\treturn me.key.path\n}", "func (f *IndexFile) Path() string { return f.path }", "func (f *File) Path() string {\n\treturn f.path\n}", "func Path() string { return out }", "func (s *SimpleHealthCheck) Path() string {\n\treturn s.path\n}", "func (p *SeriesPartition) Path() string { return p.path }", "func (s *Store) Path() string { return s.path }", "func (s *Store) Path() string { return s.path }", "func (l *Clog) Path() string {\n\treturn l.path\n}", "func (fc *FileCache) Path() string {\n\treturn fc.path\n}", "func (o ApplicationStatusHistorySourcePtrOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationStatusHistorySource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Path\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ArgoCDSpecPrometheusIngressPtrOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpecPrometheusIngress) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Path\n\t}).(pulumi.StringPtrOutput)\n}", "func (r *ManagerResource) Path() string {\n\treturn r.path\n}", "func (f File) Path() string {\n\treturn string(f)\n}", "func (f *LogFile) Path() string { return f.path }", "func (o ApplicationSpecSourcePtrOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationSpecSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Path\n\t}).(pulumi.StringPtrOutput)\n}", "func (d *Document) Path() string {\n\treturn d.path\n}", "func (f *Flock) Path() string {\n\treturn f.path\n}", "func (j *Jail) Path() string {\n\treturn j.path\n}", "func (id ID) Path() string {\n\treturn id.path\n}", "func (s *switchPortAllocations) Path() string {\n\treturn s.path\n}", "func (s *Store) Path() string {\n\treturn s.path\n}", "func (s *Store) Path() string {\n\treturn s.path\n}", "func (o ApplicationStatusSyncComparedToSourcePtrOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationStatusSyncComparedToSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Path\n\t}).(pulumi.StringPtrOutput)\n}", "func (m *Resource) Path() string {\n\treturn m.path\n}", "func (a ArithmeticPage) GetPath() string {\n\treturn a.Common.Path\n}", "func (m *mountPoint) Path() string {\n\tif m.Volume != nil {\n\t\treturn m.Volume.Path()\n\t}\n\n\treturn m.Source\n}", "func (o ApplicationStatusHistorySourceOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusHistorySource) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (ge *GollumEvent) Path() string {\n\treturn \"\"\n}", "func (ctx *Context) Path() string {\r\n\treturn ctx.R.URL.Path\r\n}", "func (a *Address) Path() string {\n\treturn a.path\n}", "func (item *baseItem) Path() *proto.Path {\n\treturn &item.path\n}", "func (jm JSONMeta) Path() string {\n\tsb := make([]string, 0)\n\tvar c JSONMetaNode = jm\n\tfor c != nil {\n\t\tsb = append([]string{c.Key()}, sb...)\n\t\t// Prepend a \".\" for non-index segments.\n\t\tif _, ok := c.Parent().(JSONMetaContainerNode); ok {\n\t\t\tsb = append([]string{\".\"}, sb...)\n\t\t}\n\t\tc = c.Parent()\n\t}\n\n\treturn strings.TrimLeft(strings.Join(sb, \"\"), \".\")\n}", "func (t *Tailer) getPath() string {\n\treturn t.path\n}", "func (o ArgoCDSpecPrometheusIngressOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDSpecPrometheusIngress) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (app *Application) Path() dbus.ObjectPath {\n\treturn app.config.ObjectPath\n}", "func (c ArtifactConfig) Path() string {\n\treturn c.File\n}", "func (metadata EventMetadata) GetPath() (string, error) {\n\tpath, err := os.Readlink(\n\t\tfilepath.Join(\n\t\t\tProcFsFdInfo,\n\t\t\tstrconv.FormatUint(\n\t\t\t\tuint64(metadata.Fd),\n\t\t\t\t10,\n\t\t\t),\n\t\t),\n\t)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"fanotify: %w\", err)\n\t}\n\n\treturn path, nil\n}", "func (s *Database) Path() string {\n\treturn s.dbFile\n}", "func (b *Bucket) Path() (string, error) {\n\tconf := b.conf.Viper.ConfigFileUsed()\n\tif conf == \"\" {\n\t\treturn b.cwd, nil\n\t}\n\treturn filepath.Dir(filepath.Dir(conf)), nil\n}", "func (db *DB) Path() string {\n\treturn db.path\n}", "func (db *DB) Path() string {\n\treturn db.path\n}", "func (e *Entry) realPath() string {\n\tparts := make([]string, 1, len(e.path)+1)\n\tparts[0] = e.root.realPath\n\tparts = append(parts, e.path...)\n\treturn filepath.Join(parts...)\n}", "func (o ApplicationStatusSyncComparedToSourceOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusSyncComparedToSource) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (o LocalCopyPtrOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *LocalCopy) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Path\n\t}).(pulumi.StringPtrOutput)\n}", "func (blt Bolt) Path() string {\n\treturn blt.path\n}", "func (i Import) Path() string {\n\treturn i.path\n}", "func (s *K8s) Path() string {\n\treturn s.moduleDir\n}", "func (o ArgoCDSpecGrafanaIngressPtrOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpecGrafanaIngress) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Path\n\t}).(pulumi.StringPtrOutput)\n}", "func (me *inode) GetPath() (path string, mount *mountData) {\n\tme.mount.treeLock.RLock()\n\tdefer me.mount.treeLock.RUnlock()\n\t\t\n\tif me.NodeId != FUSE_ROOT_ID && me.Parent == nil {\n\t\t// Deleted node. Treat as if the filesystem was unmounted.\n\t\treturn \".deleted\", nil\n\t}\n\n\trev_components := make([]string, 0, 10)\n\tinode := me\n\n\tfor ; inode != nil && inode.mountPoint == nil; inode = inode.Parent {\n\t\trev_components = append(rev_components, inode.Name)\n\t}\n\tif inode == nil {\n\t\tpanic(fmt.Sprintf(\"did not find parent with mount: %v\", rev_components))\n\t}\n\tmount = inode.mountPoint\n\n\tif mount.unmountPending {\n\t\treturn \"\", nil\n\t}\n\treturn ReverseJoin(rev_components, \"/\"), mount\n}", "func (o ApplicationSpecSourceOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSource) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (c *CodeResult) GetPath() string {\n\tif c == nil || c.Path == nil {\n\t\treturn \"\"\n\t}\n\treturn *c.Path\n}", "func (e *Element) Path() string {\n\tif e.Parent == nil {\n\t\treturn \"root\"\n\t}\n\treturn e.Parent.Path() + \".\" + e.name\n}", "func (o AppTemplateContainerStartupProbeOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerStartupProbe) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (s *sysCC) Path() string {\n\treturn s.path\n}", "func (o ArgoCDSpecServerIngressPtrOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpecServerIngress) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Path\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ApplicationStatusSyncComparedToSourceHelmFileParametersOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusSyncComparedToSourceHelmFileParameters) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (l *logPipe) Path() string {\n\treturn l.path\n}", "func (d *device) GetPath() string {\n\tif d.parent == nil {\n\t\treturn d.class.ToString() + \"=\" + d.name\n\n\t}\n\treturn d.parent.GetPath() + \",\" + d.class.ToString() + \"=\" + d.name\n}", "func (h Hashicorp) GetPath() string {\n\treturn h.FilePath\n}", "func (i *Item) GetPath() string {\n\treturn i.folder.GetPath()\n}", "func (o ApplicationStatusOperationStateSyncResultSourceHelmFileParametersOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateSyncResultSourceHelmFileParameters) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (server *SingleInstance) Path() string {\n\treturn server.path\n}", "func (o AppTemplateContainerLivenessProbeOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerLivenessProbe) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (k *Kluster) Path() string {\n\treturn k.path\n}", "func (o IopingSpecVolumeVolumeSourceProjectedSourcesSecretItemsOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceProjectedSourcesSecretItems) string { return v.Path }).(pulumi.StringOutput)\n}", "func (o ApplicationStatusOperationStateSyncResultSourcePtrOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationStatusOperationStateSyncResultSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Path\n\t}).(pulumi.StringPtrOutput)\n}", "func (o LookupContentResultOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupContentResult) string { return v.Path }).(pulumi.StringOutput)\n}", "func (file *File) Path() string {\n\treturn filepath.Join(file.dir, file.name)\n}", "func (o ApplicationStatusOperationStateSyncResultSourceOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateSyncResultSource) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (o GetAppTemplateContainerStartupProbeOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerStartupProbe) string { return v.Path }).(pulumi.StringOutput)\n}", "func (o ApplicationStatusHistorySourceHelmFileParametersOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusHistorySourceHelmFileParameters) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (t reqInfo) Path() string {\n\treturn t.path\n}", "func (d Document) Path() string { return d.path }", "func (tc *TrafficCapture) Path() (string, error) {\n\ttc.RLock()\n\tdefer tc.RUnlock()\n\n\treturn tc.writer.Path()\n}", "func (o ArgoCDSpecServerGrpcIngressPtrOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpecServerGrpcIngress) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Path\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ArgoCDSpecGrafanaIngressOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDSpecGrafanaIngress) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (o ArgoCDSpecServerIngressOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDSpecServerIngress) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (k *Key) path() (string, error) {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to get current user\")\n\t}\n\treturn path.Join(usr.HomeDir, *identity), nil\n}", "func (o ApplicationSpecSourceHelmFileParametersOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSourceHelmFileParameters) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationStatusOperationStateOperationSyncSourceHelmFileParametersOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateOperationSyncSourceHelmFileParameters) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (l *ActivityLogger) GetPath() string {\n\treturn l.fullName\n}", "func (o FioSpecVolumeVolumeSourceProjectedSourcesSecretItemsOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceProjectedSourcesSecretItems) string { return v.Path }).(pulumi.StringOutput)\n}", "func (o GetAppTemplateContainerLivenessProbeOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerLivenessProbe) string { return v.Path }).(pulumi.StringOutput)\n}", "func (a Asset) Path() string {\n\treturn a.path\n}", "func (c *Client) Path() string {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.path\n}", "func (u *URL) Path() string {\n\treturn u.URL.Path\n}" ]
[ "0.814329", "0.7101329", "0.6913344", "0.69013375", "0.6842367", "0.67836183", "0.673894", "0.67302", "0.6706463", "0.66323745", "0.6622492", "0.66114634", "0.6606461", "0.6588219", "0.6560654", "0.6534897", "0.65255404", "0.64959705", "0.6493998", "0.6484852", "0.6484852", "0.6480952", "0.64791065", "0.6474006", "0.6472362", "0.6470495", "0.6456069", "0.64504117", "0.64485806", "0.6438598", "0.64326", "0.6429744", "0.6428604", "0.6427308", "0.6417613", "0.6417613", "0.639873", "0.6393157", "0.63913786", "0.6385903", "0.638221", "0.6381541", "0.6375266", "0.6371166", "0.6358777", "0.6340626", "0.63370097", "0.63345754", "0.6323069", "0.63151795", "0.6309286", "0.62993133", "0.6289948", "0.6289426", "0.6289426", "0.62867504", "0.6279521", "0.6267063", "0.6264103", "0.6263455", "0.62629133", "0.62521684", "0.62502146", "0.62500405", "0.62463236", "0.6244957", "0.6239437", "0.6239019", "0.6238636", "0.622883", "0.6220031", "0.62171376", "0.6213033", "0.621124", "0.620621", "0.61993486", "0.6196375", "0.6193965", "0.61857116", "0.6182239", "0.61813706", "0.61771613", "0.61744237", "0.6174001", "0.6152367", "0.6151474", "0.6148273", "0.61421144", "0.61358565", "0.61280173", "0.6125839", "0.61179817", "0.61144024", "0.6106682", "0.6105979", "0.6091786", "0.60851574", "0.6082825", "0.60772157", "0.60766715" ]
0.7185758
1
File returns the file of the currently scanned entry.
func (s *DirScanner) File() reflow.File { return s.contents[s.path] }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m Matcher) File() File { return File{} }", "func (cfg *Configuration) File() string {\n\treturn cfg.FilePath\n}", "func (c *Config) File() string {\n\tif c == nil || c.mx == nil {\n\t\treturn \"\"\n\t}\n\n\treturn c.file\n}", "func (pos *Position) File() string {\n\treturn *pos.file\n}", "func GetFile() string {\n\tfile, _ := GetFileNoByDepth(1)\n\treturn file\n}", "func (ref FileView) File() resource.ID {\n\treturn ref.file\n}", "func (err ErrPermission) File() File {\n\treturn err.file\n}", "func (k Key) File() string {\n\tif k.Type == PrivateKey {\n\t\treturn PrivateKeyFile(k.Usage, k.Version)\n\t}\n\treturn PublicKeyFile(k.Usage, k.IA, k.Version)\n}", "func (p *Post) File() string {\n\treturn fmt.Sprintf(\"%d%s\", p.Name, p.Extenstion)\n}", "func (c *CallerInfo) GetFile() string {\n\tif c.file != \"\" {\n\t\treturn c.file\n\t}\n\n\tc.file = trimGoPath(c.rawFile)\n\treturn c.file\n}", "func (c *Config) File() string { return c.viper.GetString(configFile) }", "func (sf *Associate) File() (f *os.File, err error) {\n\treturn sf.getUnderAssociate().File()\n}", "func (r *LocalRegistry) file() string {\n\treturn filepath.Join(core.RegistryPath(r.ArtHome), \"repository.json\")\n}", "func (p Pos) File() *File {\n\tif p.index() == 0 {\n\t\treturn nil\n\t}\n\treturn p.file\n}", "func (f Frame) File() string {\n\treturn f.tr.getStringDefault(f.file)\n}", "func (s *Store) getFile() string {\n\treturn s.File\n}", "func (f *Function) File() *File {\n\treturn f.file\n}", "func File(pass *analysis.Pass, pos token.Pos) *ast.File {\n\tfor _, f := range pass.Files {\n\t\tif f.Pos() <= pos && pos <= f.End() {\n\t\t\treturn f\n\t\t}\n\t}\n\treturn nil\n}", "func File(pass *analysis.Pass, pos token.Pos) *ast.File {\n\tfor _, f := range pass.Files {\n\t\tif f.Pos() <= pos && pos <= f.End() {\n\t\t\treturn f\n\t\t}\n\t}\n\treturn nil\n}", "func (f *Files) File(name string) string {\n\treturn filepath.Join(f.OutDir, name)\n}", "func GetFile() string {\n\treturn filename\n}", "func (err ErrAlreadyExists) File() File {\n\treturn err.file\n}", "func (o *ResourceVersion) GetFile() string {\n\tif o == nil || o.File == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.File\n}", "func (r *PebbleFileRegistry) GetFileEntry(filename string) *enginepb.FileEntry {\n\tfilename = r.tryMakeRelativePath(filename)\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\treturn r.mu.currProto.Files[filename]\n}", "func (o *InlineObject36) GetFile() map[string]interface{} {\n\tif o == nil {\n\t\tvar ret map[string]interface{}\n\t\treturn ret\n\t}\n\n\treturn o.File\n}", "func (err ErrIsNotDirectory) File() File {\n\treturn err.file\n}", "func (d *Download) File() *os.File {\r\n\treturn d.file\r\n}", "func (err ErrIsDirectory) File() File {\n\treturn err.file\n}", "func (conf blah) File() string {\n\treturn filepath.Join(configPath, defConfigFile)\n}", "func (c *common) GetFile() *FileDescriptor { return c.file }", "func (c *Client) File(name, version, path string) (string, error) {\n\tfile := \"\"\n\tfound := false\n\terr := c.read(name, version, func(name string, contents io.Reader) error {\n\t\tif !found && strings.TrimPrefix(name, \"package/\") == path {\n\t\t\tbuf := new(bytes.Buffer)\n\t\t\t_, err := buf.ReadFrom(contents)\n\t\t\tif err != nil {\n\t\t\t\treturn xerrors.Errorf(\"copy contents: %w\", err)\n\t\t\t}\n\t\t\tfile = buf.String()\n\t\t\tfound = true\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn \"\", xerrors.Errorf(\"read package contents: %w\", err)\n\t}\n\tif !found {\n\t\tlog.Printf(\"ERROR standard registry: file: not found: %v:%v@%v/%v\", c.Host, name, version, path)\n\t\treturn \"\", registry.ErrNotFound\n\t}\n\n\treturn file, nil\n}", "func (e Entry) Filename() (string, error) {\n\treturn e.df.Filename(e.name)\n}", "func (repo Repository) File(fileID resource.ID) drivestream.FileReference {\n\treturn File{\n\t\tdb: repo.db,\n\t\tfile: fileID,\n\t}\n}", "func FILE() string {\n\t_, file, _, _ := runtime.Caller(1)\n\treturn file\n}", "func (o *InlineObject1) GetFile() *os.File {\n\tif o == nil || o.File == nil {\n\t\tvar ret *os.File\n\t\treturn ret\n\t}\n\treturn *o.File\n}", "func (err ErrDoesNotExist) File() (file File, ok bool) {\n\tfile, ok = err.file.(File)\n\treturn file, ok\n}", "func (metadata EventMetadata) File() *os.File {\n\treturn os.NewFile(uintptr(metadata.Fd), \"\")\n}", "func (c *Changes) GetFile() string {\n\treturn c.File\n}", "func (s *SerializeFuncs) File() *gothicgo.File {\n\treturn s.F\n}", "func (l *logger) GetFile() fileLogger {\n\tl.RLock()\n\tdefer l.RUnlock()\n\treturn l.File\n}", "func (file BaseFile) getFile() interface{} {\n\treturn file.File\n}", "func (is *InputStream) GetFile() string {\n\treturn is.file\n}", "func (s *FileSet) File(p token.Pos) (f *File) {\n\tif p != token.NoPos {\n\t\tinnerf := s.FileSet.File(p)\n\t\tf = s.filemap[innerf]\n\t}\n\treturn\n}", "func (m *Magic) File(file string) (string, error) {\n\tif m.ptr == nil {\n\t\treturn \"\", ConnectionError\n\t}\n\n\tcf := C.CString(file)\n\tdefer C.free(unsafe.Pointer(cf))\n\n\tcr := C.magic_file(m.ptr, cf)\n\tif cr == nil {\n\t\treturn \"\", m.check()\n\t}\n\n\tr := C.GoString(cr)\n\tC.free(unsafe.Pointer(cr))\n\treturn r, nil\n}", "func (c *config) File(u *model.User, r *model.Repo, b *model.Build, f string) ([]byte, error) {\n\treturn c.FileRef(u, r, b.Commit, f)\n}", "func (fe *fileEntry) Name() string { return fe.name }", "func (*FileEntry) Kind() string {\n\treturn \"File\"\n}", "func (e *Entry) String() string {\n\treturn \"file.Entry(\" + e.fullPath() + \")\"\n}", "func (o EncryptionServicesPtrOutput) File() EncryptionServicePtrOutput {\n\treturn o.ApplyT(func(v *EncryptionServices) *EncryptionService {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.File\n\t}).(EncryptionServicePtrOutput)\n}", "func (f frame) file() string {\n\tfn := runtime.FuncForPC(f.pc())\n\tif fn == nil {\n\t\treturn \"unknown\"\n\t}\n\tfile, _ := fn.FileLine(f.pc())\n\treturn file\n}", "func mainFile(table *gosym.Table) (string, error) {\n\tmain := table.LookupFunc(\"main.main\")\n\tif main == nil {\n\t\treturn \"\", fmt.Errorf(\"not found\")\n\t}\n\tfile, _, fn := table.PCToLine(main.Entry)\n\tif fn == nil {\n\t\treturn \"\", fmt.Errorf(\"not found\")\n\t}\n\treturn file, nil\n}", "func (f Frame) file() string {\n\tfn := runtime.FuncForPC(f.pc())\n\tif fn == nil {\n\t\treturn \"unknown\"\n\t}\n\tfile, _ := fn.FileLine(f.pc())\n\treturn file\n}", "func File(flagset *flag.FlagSet) flag.Value {\n\treturn flagfile.File(flagset, loader)\n}", "func file(path string) string {\n\tf, err := filepath.Abs(slash(path))\n\tkit.E(err)\n\treturn f\n}", "func (t *FileTree) File(path file.Path) *file.Reference {\n\tif value, ok := t.pathToFileRef[path.ID()]; ok {\n\t\treturn &value\n\t}\n\treturn nil\n}", "func (f File) Path() string {\n\treturn string(f)\n}", "func (c *Config) File(stream string) (string, error) {\n\tkey, err := keyName(stream, \"file\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tt, err := template.New(\"filename\").Parse(c.v.GetString(key))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar tpl bytes.Buffer\n\tif err := t.Execute(&tpl, c); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tpl.String(), nil\n}", "func Get(file string) *File {\n\tf := allFiles[file]\n\treturn &f\n}", "func (s *Scanner) CacheFile() string {\n\treturn filepath.Join(s.Tempdir, \"finder.cache\")\n}", "func GetFileForPatternMatch(ap *PagePattern, pm *PatternMatch) (*File, error) {\n\tif ap == nil {\n\t\treturn nil, errors.New(\"page pattern object cannot be nil\")\n\t}\n\n\tif pm == nil {\n\t\treturn nil, errors.New(\"pattern match cannot be nil\")\n\t}\n\n\tfile := File{}\n\tfile.EditOnGithub = ap.EditOnGithub\n\tfile.AddToMenu = ap.AddToMenu\n\tfile.MenuGroup = ap.MenuGroup\n\tfile.Template = ap.Template\n\tfile.SourceFile = pm.Path\n\n\tvar err error\n\n\tif file.Name, err = stringFromTemplate(ap.Name, pm); err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to parse name template: \\n\\t%s\", err.Error())\n\t} else if file.Path, err = stringFromTemplate(ap.Path, pm); err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to parse path template: \\n\\t%s\", err.Error())\n\t}\n\n\treturn &file, nil\n}", "func (o EncryptionServicesResponsePtrOutput) File() EncryptionServiceResponsePtrOutput {\n\treturn o.ApplyT(func(v *EncryptionServicesResponse) *EncryptionServiceResponse {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.File\n\t}).(EncryptionServiceResponsePtrOutput)\n}", "func (t *Table) Filename() string { return t.fd.Name() }", "func (f *IndexFile) Path() string { return f.path }", "func (o EndpointsResponsePtrOutput) File() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *EndpointsResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.File\n\t}).(pulumi.StringPtrOutput)\n}", "func (fv *FileView) SelectedFile() string {\n\treturn filepath.Join(fv.DirPath, fv.SelFile)\n}", "func (r *Root) File(ctx context.Context, s blob.CAS) (*file.File, error) {\n\tif r.FileKey == \"\" {\n\t\treturn nil, ErrNoData\n\t}\n\tif s == nil {\n\t\ts = r.cas\n\t}\n\treturn file.Open(ctx, s, r.FileKey)\n}", "func (o EncryptionServicesOutput) File() EncryptionServicePtrOutput {\n\treturn o.ApplyT(func(v EncryptionServices) *EncryptionService { return v.File }).(EncryptionServicePtrOutput)\n}", "func (e *entry) Path() string {\n\treturn e.path\n}", "func (f *File) String() string {\n\treturn f.URI()\n}", "func getFile(chart *chart.Chart, name string) *chart.File {\n\tfor _, file := range chart.Files {\n\t\tif file.Name == name {\n\t\t\treturn file\n\t\t}\n\t}\n\treturn nil\n}", "func (e FileEvent) Object() File {\n\treturn e.File\n}", "func (c *client) File(u *model.User, r *model.Repo, b *model.Build, f string) ([]byte, error) {\n\tclient := c.newClientToken(u.Token)\n\tcfg, err := client.GetFile(r.Owner, r.Name, b.Commit, f)\n\treturn cfg, err\n}", "func (g *GitChartProvider) GetIndexFile() (*helmrepo.IndexFile, error) {\n\treturn g.index.IndexFile, nil\n}", "func (fdf ExfatFileDirectoryEntry) TypeName() string {\n\treturn \"File\"\n}", "func (o operator) GetFile(filename string) (types.FileInfo, error) {\n\treturn types.FileInfo{}, fmt.Errorf(\"%s doesn't implement GetFile yet\", operatorName)\n}", "func (w *fileLogger) currentFile() (*os.File, error) {\r\n\tif w.curFile == nil {\r\n\t\t//log.Println(\"Reopening file\")\r\n\t\tfile, err := os.OpenFile(w.file, os.O_APPEND|os.O_CREATE, 0777)\r\n\t\t//defer file.Close()\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t\tw.curFile = file\r\n\t}\r\n\treturn w.curFile, nil\r\n}", "func (e *Event) File() *File {\n\tif e.Type != \"file\" {\n\t\treturn nil\n\t}\n\tvar f File\n\n\tf.Event = *e\n\tif err := json.Unmarshal(e.ContentType, &f.ContentType); err != nil {\n\t\treturn nil\n\t}\n\tif err := json.Unmarshal(e.Name, &f.Name); err != nil {\n\t\treturn nil\n\t}\n\tif err := json.Unmarshal(e.URL, &f.URL); err != nil {\n\t\treturn nil\n\t}\n\tif err := internal.UnmarshalOptionalRawField(e.ThumbnailURL, &f.ThumbnailURL); err != nil {\n\t\treturn nil\n\t}\n\tif err := internal.UnmarshalOptionalRawField(e.Thumbnail2xURL, &f.Thumbnail2xURL); err != nil {\n\t\treturn nil\n\t}\n\tif err := internal.UnmarshalOptionalRawField(e.Width, &f.Width); err != nil {\n\t\treturn nil\n\t}\n\tif err := internal.UnmarshalOptionalRawField(e.Height, &f.Height); err != nil {\n\t\treturn nil\n\t}\n\tif err := internal.UnmarshalOptionalRawField(e.Size, &f.Size); err != nil {\n\t\treturn nil\n\t}\n\tif err := internal.UnmarshalOptionalRawField(e.AlternativeText, &f.AlternativeText); err != nil {\n\t\treturn nil\n\t}\n\n\treturn &f\n}", "func (e *Event) File() *File {\n\tif e.Type != \"file\" {\n\t\treturn nil\n\t}\n\tvar f File\n\n\tf.Event = *e\n\tif err := json.Unmarshal(e.ContentType, &f.ContentType); err != nil {\n\t\treturn nil\n\t}\n\tif err := json.Unmarshal(e.Name, &f.Name); err != nil {\n\t\treturn nil\n\t}\n\tif err := json.Unmarshal(e.URL, &f.URL); err != nil {\n\t\treturn nil\n\t}\n\tif err := internal.UnmarshalOptionalRawField(e.ThumbnailURL, &f.ThumbnailURL); err != nil {\n\t\treturn nil\n\t}\n\tif err := internal.UnmarshalOptionalRawField(e.Thumbnail2xURL, &f.Thumbnail2xURL); err != nil {\n\t\treturn nil\n\t}\n\tif err := internal.UnmarshalOptionalRawField(e.Width, &f.Width); err != nil {\n\t\treturn nil\n\t}\n\tif err := internal.UnmarshalOptionalRawField(e.Height, &f.Height); err != nil {\n\t\treturn nil\n\t}\n\tif err := internal.UnmarshalOptionalRawField(e.Size, &f.Size); err != nil {\n\t\treturn nil\n\t}\n\tif err := internal.UnmarshalOptionalRawField(e.AlternativeText, &f.AlternativeText); err != nil {\n\t\treturn nil\n\t}\n\n\treturn &f\n}", "func (game *Game) CurrentTrnFile() (string, error) {\n\tvar possibleMatches []string\n\n\tfiles, err := ioutil.ReadDir(game.Directory)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, f := range files {\n\t\tif ValidTrnFileName(f.Name()) {\n\t\t\tpossibleMatches = append(possibleMatches, f.Name())\n\t\t}\n\t}\n\n\tswitch {\n\tcase len(possibleMatches) == 0:\n\t\treturn \"\", errors.New(fmt.Sprintf(\"Could not find a trn file for %s\", game.Name))\n\tcase len(possibleMatches) > 1:\n\t\t// Take the shortest filename. Ugly, but we have no way to check for valid nation names yet\n\t\tsort.Sort(utility.ByLength(possibleMatches))\n\t}\n\n\treturn possibleMatches[0], nil\n}", "func (s *Drive) GetFile(sha256sum []byte) ([]byte, error) {\n\treturn nil, nil\n}", "func getInfile(filename string) *os.File {\n\tf, err := os.Open(filename)\n\tcheck(err)\n\treturn f\n}", "func (c *Contents) FilePath() string {\n\treturn c.filePath\n}", "func (f File) GetFile() (string, error) {\n\tpw := Password{Title: f.Title, Password: f.Password} // Password Struct\n\terr := pw.GetFileName() // Reading FileName from Password\n\n\t// If No Error\n\tif err == nil {\n\t\treturn IsExist(path.Join(FileLoc, pw.FileName))\n\t}\n\n\t// if err.Error() == \"wrong:password\" {\n\t// \treturn \"\", err\n\t// } // I can just comment it ou, as eventually it returns the same Error\n\treturn \"\", err\n}", "func (o EncryptionServicesResponseOutput) File() EncryptionServiceResponsePtrOutput {\n\treturn o.ApplyT(func(v EncryptionServicesResponse) *EncryptionServiceResponse { return v.File }).(EncryptionServiceResponsePtrOutput)\n}", "func GetSnapshotFile() string {\n\n\t_file := Get(\"SnapshotFile\")\n\tif _file == \"\" {\n\t\t_file = \"snapshot.bin\"\n\t}\n\n\t_absFile, err := filepath.Abs(_file)\n\tif err != nil {\n\t\tlog.Fatalf(\"[!] Failed to find real path of `%s` : %s\\n\", _file, err.Error())\n\t}\n\n\treturn _absFile\n\n}", "func (fp *OrgLinkParser) FilePath() string {\n\treturn fp.file\n}", "func (l *listener) ListenerFile() (*os.File, error) {\n\treturn l.rawl.File()\n}", "func (s *Store) File(id string) *File {\n\treturn NewFile(s, id)\n}", "func getFile(fileName string) string {\n\twd, _ := os.Getwd()\n\n\tif !strings.HasSuffix(wd, \"file\") {\n\t\twd += \"\"\n\t}\n\n\treturn wd + \"/\" + fileName\n}", "func (d *Directory) Entry(name string) *DirectoryEntry {\n\tfor _, e := range d.Entries {\n\t\tif e.Path == name {\n\t\t\treturn e\n\t\t}\n\t}\n\treturn nil\n}", "func (f *File) Path() string {\n\treturn f.path\n}", "func (f *File) Path() string {\n\treturn \"/\" + f.key\n}", "func (b *baseBuilder) GetFile() *FileBuilder {\n\tp := b.parent\n\tfor p != nil {\n\t\tif fb, ok := p.(*FileBuilder); ok {\n\t\t\treturn fb\n\t\t}\n\t\tp = p.GetParent()\n\t}\n\treturn nil\n}", "func mainfile() string {\n\tif m := stack.Main(); m != nil {\n\t\treturn m.File\n\t}\n\treturn \"\"\n}", "func (fi *fileInfo) Path() string {\n\treturn fi.fullPath\n}", "func Get() (*File, error) {\n\treturn GetSpecific(Path())\n}", "func (_this *InterventionReportBody) SourceFile() *string {\n\tvar ret *string\n\tvalue := _this.Value_JS.Get(\"sourceFile\")\n\tif value.Type() != js.TypeNull && value.Type() != js.TypeUndefined {\n\t\t__tmp := (value).String()\n\t\tret = &__tmp\n\t}\n\treturn ret\n}", "func (fi *fileInfo) Name() string { return fi.name }", "func (c Config) GetFile(flag int, perm os.FileMode, section, key string) (*os.File, error) {\n\ts, err := c.GetString(section, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn os.OpenFile(s, flag, perm)\n}", "func (v *View) GetFile(uri protocol.DocumentURI) *File {\n\tv.mu.Lock()\n\tf, found := v.files[uri]\n\tif !found {\n\t\tf := &File{URI: uri}\n\t\tv.files[f.URI] = f\n\t}\n\tv.mu.Unlock()\n\treturn f\n}" ]
[ "0.67540044", "0.67068243", "0.6687292", "0.66767365", "0.66577923", "0.66560775", "0.66108453", "0.65874934", "0.65597", "0.65354407", "0.6456274", "0.64512724", "0.641237", "0.64050615", "0.63878703", "0.63815755", "0.6373724", "0.6353302", "0.6353302", "0.63308656", "0.632238", "0.63106114", "0.62690663", "0.625717", "0.62380433", "0.61436063", "0.61333", "0.61273396", "0.6125658", "0.6122991", "0.61161864", "0.6091069", "0.60845673", "0.6081382", "0.6068328", "0.60626817", "0.60019374", "0.5992227", "0.5986684", "0.5977449", "0.5946669", "0.59253716", "0.5919013", "0.5912656", "0.5910336", "0.5902827", "0.59025806", "0.58987755", "0.58714074", "0.58607066", "0.5847885", "0.58420944", "0.5841182", "0.57753104", "0.5731122", "0.5686133", "0.5673483", "0.566243", "0.5652536", "0.5645846", "0.5634637", "0.56319094", "0.5613513", "0.56006366", "0.55998594", "0.55879974", "0.55838954", "0.5559093", "0.5558037", "0.5551181", "0.5545485", "0.5467709", "0.5466408", "0.5451453", "0.5449991", "0.5442953", "0.54316777", "0.54316777", "0.54111594", "0.5409323", "0.5403663", "0.5394158", "0.5389561", "0.53875834", "0.53834015", "0.53811204", "0.5377937", "0.5375824", "0.5371819", "0.5370738", "0.5360627", "0.5335314", "0.53346926", "0.5330492", "0.5326442", "0.53219676", "0.5311891", "0.53079414", "0.5307171", "0.53061223" ]
0.7276619
0
Equal compares the file names and digests in the directory.
func (d Dir) Equal(e Dir) bool { if d.Len() != e.Len() { return false } for lk, lv := range d.contents { if rv, ok := e.contents[lk]; !ok || !rv.Equal(lv) { return false } } return true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func EqualFiles(a, b string) (bool, error) {\n\tam, as, err := sha256sum(a)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"could not hash %s: %w\", a, err)\n\t}\n\tbm, bs, err := sha256sum(b)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"could not hash %s: %w\", b, err)\n\t}\n\treturn as == bs && am == bm, nil\n}", "func Compare(d1, d2 *DirScan) []*File {\n\tinfof(\"Comparing %s to %s\", d1, d2)\n\n\t// 0) Create map with name and normalized name as keys\n\tfileMap := make(map[string]*File)\n\tfor _, f := range d2.Files {\n\t\tfileMap[f.Name] = f\n\t\tfileMap[f.AbsolutePath] = f\n\t\tfileMap[Normalize(f.Name)] = f\n\t}\n\n\t// 1) Find filename matches\n\tcount := 0\n\tfor _, f := range d1.Files {\n\t\tvar match *File\n\n\t\t// 1a) Did file already match?\n\t\texisting := f.Metadata[\"match\"]\n\t\tif existing != \"\" {\n\t\t\tmatch = fileMap[existing]\n\t\t}\n\n\t\t// 1b) Look for exact name match\n\t\tif match == nil {\n\t\t\tmatch = fileMap[f.Name]\n\t\t\tif match != nil && f.Size == match.Size {\n\t\t\t\tdebugf(\"Matched: %s to %s\", f.AbsolutePath, match.AbsolutePath)\n\t\t\t} else {\n\t\t\t\tmatch = nil\n\t\t\t}\n\t\t}\n\n\t\t// 1c) Failing that, look for close enough match\n\t\tif match == nil {\n\t\t\tmatch = fileMap[Normalize(f.Name)]\n\t\t\tif match != nil && f.Size == match.Size {\n\t\t\t\tdebugf(\"Matched: %s to %s\", f.AbsolutePath, match.AbsolutePath)\n\t\t\t} else {\n\t\t\t\tmatch = nil\n\t\t\t}\n\t\t}\n\n\t\t// 1d) Track matches\n\t\tif match != nil {\n\t\t\tf.Metadata[\"match\"] = match.AbsolutePath\n\t\t\tmatch.Metadata[\"match\"] = f.AbsolutePath\n\t\t} else {\n\t\t\tcount++\n\t\t}\n\t}\n\n\t// 2) If there are files without name matches, calculate hashes for all files that didn't match to anything\n\tif count > 0 {\n\t\tinfof(\"Calculating file hashes ...\")\n\t\tgetHashes(d1, nil)\n\n\t\tcb := func(f *File, hash string) {\n\t\t\tfileMap[hash] = f\n\t\t}\n\t\tgetHashes(d2, cb)\n\t}\n\n\t// 3) Match files based on hash\n\tdiff := make([]*File, 0)\n\tfor _, f := range d1.Files {\n\t\tif f.Metadata[\"match\"] == \"\" {\n\t\t\thash := f.Metadata[\"hash\"]\n\t\t\tif hash != \"\" {\n\t\t\t\tmatch := fileMap[hash]\n\t\t\t\tif match != nil {\n\t\t\t\t\tdebugf(\"Matched: %s to %s\", f.AbsolutePath, match.AbsolutePath)\n\t\t\t\t\tf.Metadata[\"match\"] = match.AbsolutePath\n\t\t\t\t\tmatch.Metadata[\"match\"] = f.AbsolutePath\n\t\t\t\t} else {\n\t\t\t\t\tdiff = append(diff, f)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tdiff = append(diff, f)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(diff) > 0 {\n\t\tinfof(\"%d files in %s are not in %s\", len(diff), d1.Name, d2.Name)\n\t} else {\n\t\tinfof(\"All files in %s found in %s\", d1.Name, d2.Name)\n\t}\n\n\treturn diff\n}", "func EqualFileContents(a, b string) (bool, error) {\n\t_, as, err := sha256sum(a)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"could not hash %s: %w\", a, err)\n\t}\n\t_, bs, err := sha256sum(b)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"could not hash %s: %w\", b, err)\n\t}\n\treturn as == bs, nil\n}", "func TestIsSameFile(t *testing.T) {\n\tabsPath, err := filepath.Abs(\"../tests/files/\")\n\n\tassert.NotNil(t, absPath)\n\tassert.Nil(t, err)\n\n\tfileInfo1, err := os.Stat(absPath + \"/logs/test.log\")\n\tfileInfo2, err := os.Stat(absPath + \"/logs/system.log\")\n\n\tassert.Nil(t, err)\n\tassert.NotNil(t, fileInfo1)\n\tassert.NotNil(t, fileInfo2)\n\n\tfile1 := &File{\n\t\tFileInfo: fileInfo1,\n\t}\n\n\tfile2 := &File{\n\t\tFileInfo: fileInfo2,\n\t}\n\n\tfile3 := &File{\n\t\tFileInfo: fileInfo2,\n\t}\n\n\tassert.False(t, file1.IsSameFile(file2))\n\tassert.False(t, file2.IsSameFile(file1))\n\n\tassert.True(t, file1.IsSameFile(file1))\n\tassert.True(t, file2.IsSameFile(file2))\n\n\tassert.True(t, file3.IsSameFile(file2))\n\tassert.True(t, file2.IsSameFile(file3))\n}", "func AssertFileNamesEqual(t *testing.T, dirPath string, fileNames ...string) {\n\tfiles, err := ioutil.ReadDir(dirPath)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, len(files), len(fileNames))\n\n\tfileNamesMap := map[string]bool{}\n\n\tfor _, fileInfo := range files {\n\t\tfileNamesMap[fileInfo.Name()] = true\n\t}\n\n\tfor _, fileName := range fileNames {\n\t\trequire.True(t, fileNamesMap[fileName], fileName+\" does not exist.\")\n\t}\n}", "func IdenticalDirContents(ctx context.Context, dirA, dirB File, recursive bool) (identical bool, err error) {\n\tif SameFile(dirA, dirB) {\n\t\treturn true, nil\n\t}\n\n\tfileInfosA := make(map[string]FileInfo)\n\terr = dirA.ListDirInfoContext(ctx, func(info FileInfo) error {\n\t\tif !info.IsDir || recursive {\n\t\t\tfileInfosA[info.Name] = info\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"IdenticalDirContents: error listing dirA %q: %w\", dirA, err)\n\t}\n\n\tfileInfosB := make(map[string]FileInfo, len(fileInfosA))\n\thasDiff := errors.New(\"hasDiff\")\n\terr = dirB.ListDirInfoContext(ctx, func(info FileInfo) error {\n\t\tif !info.IsDir || recursive {\n\t\t\tinfoA, found := fileInfosA[info.Name]\n\t\t\tif !found || info.Size != infoA.Size || info.IsDir != infoA.IsDir {\n\t\t\t\treturn hasDiff\n\t\t\t}\n\t\t\tfileInfosB[info.Name] = info\n\t\t}\n\t\treturn nil\n\t})\n\tif errors.Is(err, hasDiff) || len(fileInfosB) != len(fileInfosA) {\n\t\treturn false, nil\n\t}\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"IdenticalDirContents: error listing dirB %q: %w\", dirB, err)\n\t}\n\n\tfor filename, infoA := range fileInfosA {\n\t\tif recursive && infoA.IsDir {\n\t\t\tidentical, err = IdenticalDirContents(ctx, dirA.Join(filename), dirB.Join(filename), true)\n\t\t\tif !identical {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t} else {\n\t\t\thashA, err := dirA.Join(filename).ContentHash()\n\t\t\tif err != nil {\n\t\t\t\treturn false, fmt.Errorf(\"IdenticalDirContents: error content hashing %q: %w\", filename, err)\n\t\t\t}\n\t\t\thashB, err := dirB.Join(filename).ContentHash()\n\t\t\tif err != nil {\n\t\t\t\treturn false, fmt.Errorf(\"IdenticalDirContents: error content hashing %q: %w\", filename, err)\n\t\t\t}\n\t\t\tif hashA != hashB {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func filesEqual(a, b string) error {\n\taBytes, err := os.ReadFile(a)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbBytes, err := os.ReadFile(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Normalize newlines.\n\taBytes = bytes.Replace(aBytes, []byte{13, 10} /* \\r\\n */, []byte{10} /* \\n */, -1)\n\tbBytes = bytes.Replace(bBytes, []byte{13, 10}, []byte{10}, -1)\n\n\td, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{\n\t\tA: difflib.SplitLines(string(aBytes)),\n\t\tB: difflib.SplitLines(string(bBytes)),\n\t})\n\tif d != \"\" {\n\t\treturn errors.Errorf(\"a != b\\ndiff = %s\", d)\n\t}\n\n\treturn nil\n}", "func AssertEqualFiles(t *testing.T, fn0, fn1 string) {\n\tcmp := equalfile.New(nil, equalfile.Options{}) // compare using single mode\n\tok, err := cmp.CompareFile(fn0, fn1)\n\tassert.Nil(t, err)\n\tassert.True(t, ok)\n}", "func IsDirSame(a, b string) bool {\n\taFiles, err := GetAllFiles(a, false)\n\tif err != nil {\n\t\treturn false\n\t}\n\tbFiles, err := GetAllFiles(b, false)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif len(aFiles) != len(bFiles) {\n\t\treturn false\n\t}\n\tsort.Strings(aFiles)\n\tsort.Strings(bFiles)\n\tfor i := range aFiles {\n\t\tif aFiles[i] != bFiles[i] {\n\t\t\tfmt.Println(aFiles[i])\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func checkFile(f string, storedFiles []files.File) (tracked bool, updated bool) {\n\tfor _, storedFile := range storedFiles {\n\t\tif storedFile.Name == f {\n\t\t\ttracked = true\n\t\t\tmd5sum, err := crypto.MD5(f)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"checkFile | crypto.MD5 [%s]\", err)\n\t\t\t}\n\t\t\tif storedFile.MD5 != md5sum {\n\t\t\t\tupdated = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (r *Repo) CheckIntegrity() error {\n\tif r.sett == nil {\n\t\tpanic(\"settings not loaded in CheckIntegrity\")\n\t}\n\n\t// l1 is the list of elements in repo/files.\n\t// It should contain folders named from 00 to ff.\n\tl1, err := utils.ListDir(r.filesFolder)\n\tif err != nil {\n\t\treturn errors.New(\"cannot access files\")\n\t}\n\n\tmHasher, err := hasher.NewMultiHasher(r.sett.HashAlgorithm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tallFiles := make([]string, 0, 1000)\n\n\tpkg.Log.Info(\"Listing files\")\n\terrsFound := false\n\t// c1 represent a given Child of the list l1\n\tfor _, c1 := range l1 {\n\t\tc1Path := filepath.Join(r.filesFolder, c1.Name())\n\t\tif !c1.IsDir() {\n\t\t\tpkg.Log.Debugf(\"%s is not a directory. Skipping...\", c1Path)\n\t\t\tcontinue\n\t\t}\n\n\t\t// l2 is the list of elements in repo/files/c1.\n\t\t// It should contain the files named like <hash_hex>-<size_bytes>\n\t\tl2, err := utils.ListDir(c1Path)\n\t\tif err != nil {\n\t\t\tpkg.Log.Errorf(\"error listing \\\"%s\\\": %s\\n\", c1Path, err.Error())\n\t\t\terrsFound = true\n\t\t\tcontinue\n\t\t}\n\t\t// c2 represents a given Child of the list l2,\n\t\t// that means, a file in repo/files/c1\n\t\tfor _, c2 := range l2 {\n\t\t\tc2Path := filepath.Join(c1Path, c2.Name())\n\t\t\tif !c2.Mode().IsRegular() {\n\t\t\t\tpkg.Log.Debugf(\"%s is not a file. Skipping...\", c2Path)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tallFiles = append(allFiles, c2Path)\n\t\t}\n\t}\n\n\tpkg.Log.Info(\"Checking file integrity\")\n\tif mHasher.CheckFiles(allFiles) || errsFound {\n\t\treturn errors.New(\"some errors were found\")\n\t}\n\treturn nil\n}", "func TestVerifyComposefileDiffDigests(t *testing.T) {\n\tt.Parallel()\n\tlPath := filepath.Join(verifyComposeBaseDir, \"diffdigests\", \"docker-lock.json\")\n\tflags := []string{fmt.Sprintf(\"--lockfile-path=%s\", lPath)}\n\tshouldFail := true\n\ttestVerify(t, flags, shouldFail)\n}", "func TestDiff_identical(t *testing.T) {\n\ts := t.TempDir()\n\td := t.TempDir()\n\n\terr := os.Mkdir(filepath.Join(s, \"a1\"), 0700)\n\tassert.NoError(t, err)\n\terr = os.WriteFile(\n\t\tfilepath.Join(s, \"a1\", \"f.yaml\"), []byte(`a`), 0600)\n\tassert.NoError(t, err)\n\n\terr = os.Mkdir(filepath.Join(d, \"a1\"), 0700)\n\tassert.NoError(t, err)\n\terr = os.WriteFile(\n\t\tfilepath.Join(d, \"a1\", \"f.yaml\"), []byte(`a`), 0600)\n\tassert.NoError(t, err)\n\n\tdiff, err := Diff(s, d)\n\tassert.NoError(t, err)\n\tassert.Empty(t, diff.List())\n}", "func compareFiles(file1 string, file2 string) error {\n\ts1, err := os.Stat(file1)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't stat %s: %v\", file1, err)\n\t}\n\ts2, err := os.Stat(file2)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't stat %s: %v\", file2, err)\n\t}\n\tif s1.Size() != s2.Size() {\n\t\treturn fmt.Errorf(\"files %s and %s have different sizes: %d vs %d\", file1, file2, s1.Size(), s2.Size())\n\t}\n\tif s1.Mode() != s2.Mode() {\n\t\treturn fmt.Errorf(\"files %s and %s have different permissions: %#4o vs %#4o\", file1, file2, s1.Mode(), s2.Mode())\n\t}\n\tf1bytes, err := ioutil.ReadFile(file1)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't read %s: %v\", file1, err)\n\t}\n\tf2bytes, err := ioutil.ReadFile(file2)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can't read %s: %v\", file2, err)\n\t}\n\tif !bytes.Equal(f1bytes, f2bytes) {\n\t\treturn fmt.Errorf(\"files %s and %s have different contents\", file1, file2)\n\t}\n\treturn nil\n}", "func IsSame(a, bFileId) bool {}", "func CompareFiles(file1, file2 string) (bool, error) {\n\tsize := 1024 * 4\n\tbuffer1, buffer2 := make([]byte, size), make([]byte, size)\n\n\tf1, err := os.Open(file1)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer utils.CloseQuietly(f1)\n\n\tf2, err := os.Open(file2)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer utils.CloseQuietly(f2)\n\n\tfor {\n\t\t// Si hay error, se retorna\n\t\tn1, err := f1.Read(buffer1)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn false, err\n\t\t}\n\t\tn2, err2 := f2.Read(buffer2)\n\t\tif err2 != nil && err2 != io.EOF {\n\t\t\treturn false, err2\n\t\t}\n\n\t\t// Se llegó al final de ambos archivos.\n\t\tif err == io.EOF && err == err2 && n1 == n2 {\n\t\t\treturn true, nil\n\t\t}\n\n\t\t// Se leyeron distintas cantidades, se llegó al final de uno.\n\t\tif n1 != n2 {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tresult := bytes.Equal(buffer1, buffer2)\n\t\tif !result {\n\t\t\treturn false, nil\n\t\t}\n\t}\n}", "func FilesEqual(name1, name2 string) (bool, error) {\n\tf1Info, err := os.Stat(name1)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tf2Info, err := os.Stat(name2)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif f1Info.Size() != f2Info.Size() {\n\t\treturn false, nil\n\t}\n\n\tf1, err := os.Open(name1)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer f1.Close()\n\n\tf2, err := os.Open(name2)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer f2.Close()\n\n\t// 64 KiB buffers seem to be most performant with larger files\n\treturn Equal(f1, f2, 64*1024)\n}", "func TestVerifyDockerfileDiffDigests(t *testing.T) {\n\tt.Parallel()\n\tlPath := filepath.Join(verifyDockerBaseDir, \"diffdigests\", \"docker-lock.json\")\n\tflags := []string{fmt.Sprintf(\"--lockfile-path=%s\", lPath)}\n\tshouldFail := true\n\ttestVerify(t, flags, shouldFail)\n}", "func (t *FileTree) Equal(other *FileTree) bool {\n\tif len(t.pathToFileRef) != len(other.pathToFileRef) {\n\t\treturn false\n\t}\n\n\textra, missing := t.PathDiff(other)\n\n\treturn len(extra) == 0 && len(missing) == 0\n}", "func HashFiles(files []string) string {\n\thasher := md5.New()\n\n\tfor i := 0; i < len(files); i++ {\n\t\tfp, err := os.Open(files[i])\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor {\n\t\t\tbuffer := make([]byte, 8192, 8192)\n\t\t\tbytes_read, err := fp.Read(buffer)\n\t\t\tif bytes_read > 0 {\n\t\t\t\thasher.Write(buffer)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\ts := make([]byte, 0)\n\n\treturn hex.EncodeToString(hasher.Sum(s))\n\n}", "func TestFileOrder(t *testing.T) {\n\tfileSize := HashSize*5 + 1\n\ttree := NewTree().(*treeDigest)\n\tt1 := *tree\n\tt2 := *tree\n\tfile := NewFile2(HashSize, &t1, &t2)\n\tbuf := make([]byte, fileSize)\n\ttree.Write(buf)\n\ttsum := tree.Sum(nil)\n\tfile.Write(buf)\n\tfsum := file.Sum(nil)\n\tif !bytes.Equal(fsum, tsum) {\n\t\tt.Fatalf(\" %x != %x\", fsum, tsum)\n\t}\n}", "func SameFile(fi1, fi2 os.FileInfo,) bool", "func SameFile(a, b File) bool {\n\taFS, aPath := a.ParseRawURI()\n\tbFS, bPath := b.ParseRawURI()\n\treturn aFS == bFS && aPath == bPath\n}", "func compareFiles(t *testing.T) {\n\tactualCursor, err := files.Find(ctx, emptyDoc)\n\ttesthelpers.RequireNil(t, err, \"error running Find for files: %s\", err)\n\texpectedCursor, err := expectedFiles.Find(ctx, emptyDoc)\n\ttesthelpers.RequireNil(t, err, \"error running Find for expected files: %s\", err)\n\n\tfor expectedCursor.Next(ctx) {\n\t\tif !actualCursor.Next(ctx) {\n\t\t\tt.Fatalf(\"files has fewer documents than expectedFiles\")\n\t\t}\n\n\t\tvar actualFile bsonx.Doc\n\t\tvar expectedFile bsonx.Doc\n\n\t\terr = actualCursor.Decode(&actualFile)\n\t\ttesthelpers.RequireNil(t, err, \"error decoding actual file: %s\", err)\n\t\terr = expectedCursor.Decode(&expectedFile)\n\t\ttesthelpers.RequireNil(t, err, \"error decoding expected file: %s\", err)\n\n\t\tcompareGfsDoc(t, expectedFile, actualFile, primitive.ObjectID{})\n\t}\n}", "func compareFiles(src, dst string) (bool, error) {\n\tb1, err := ioutil.ReadFile(src)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tb2, err := ioutil.ReadFile(dst)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif bytes.Equal(b1, b2) {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func filesDigest(fs afero.Fs, paths []string) []*PackageFilesDigest {\n\treturn mapPaths(fs, paths, pathToOperator)\n}", "func IsSameFile(a, b string) bool {\n\ti1, err1 := getInode(a)\n\ti2, err2 := getInode(b)\n\treturn err1 == nil && err2 == nil && i1 == i2\n}", "func cmpTarFilesHelper(c1, c2 []byte) {\n\tr1, r2 := bytes.NewBuffer(c1), bytes.NewBuffer(c2)\n\tset1, err := untarToFileSet(r1)\n\tExpectWithOffset(1, err).ToNot(HaveOccurred())\n\tset2, err := untarToFileSet(r2)\n\tExpectWithOffset(1, err).ToNot(HaveOccurred())\n\n\tfor fileName, contents1 := range set1 {\n\t\tcontents2, hasFileName := set2[fileName]\n\t\tExpectWithOffset(1, hasFileName).To(BeTrue(), \"second tarball does not have file %s\", fileName)\n\t\tExpectWithOffset(1, contents1.String()).To(Equal(contents2.String()),\n\t\t\t\"contents of file %s differ in first and second tarballs\", fileName)\n\t\tdelete(set2, fileName)\n\t}\n\tExpectWithOffset(1, set2).To(BeEmpty(), \"second tarball has files not in the first\")\n}", "func Comparehashes(hash1 string, hash2 string) (result, error) {\n var r result\n var err error\n r.s1 = hash1\n r.s2 = hash2\n r.score, err = ssdeep.Compare(hash1, hash2)\n if err != nil {\n return r, err_sscomp \n }\n if r.score == 100 { //100 spotted in the wild for non-identifcal files\n if strings.Compare(r.s1, r.s2) != 0 {\n r.strflag = true\n }\n }\n return r, nil\n}", "func sameFile(x, y string) bool {\n\tif x == y {\n\t\t// It could be the case that y doesn't exist.\n\t\t// For instance, it may be an overlay file that\n\t\t// hasn't been written to disk. To handle that case\n\t\t// let x == y through. (We added the exact absolute path\n\t\t// string to the CompiledGoFiles list, so the unwritten\n\t\t// overlay case implies x==y.)\n\t\treturn true\n\t}\n\tif strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation)\n\t\tif xi, err := os.Stat(x); err == nil {\n\t\t\tif yi, err := os.Stat(y); err == nil {\n\t\t\t\treturn os.SameFile(xi, yi)\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func TestPathMatcher(t *testing.T) {\n\tdirEntries, _ :=path.ReadDir(\"files\")\n\t for _, entry := range dirEntries {\n\t\tif !entry.IsDir() {\n\t\t\tfmt.Println(entry.Name())\n\t\t\tfile, _ := path.ReadFile(\"files/\" + entry.Name())\n\t\t\tfmt.Println(string(file)) //! convert ke string karena file nya binary atau slice of byte []byte\n\t\t}\n\t }\n}", "func Diff(sourceDir, destDir string) (sets.String, error) {\n\t// get set of filenames in the package source\n\tupstreamFiles := sets.String{}\n\terr := filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// skip git repo if it exists\n\t\tif IsDotGitFolder(path) {\n\t\t\treturn nil\n\t\t}\n\n\t\tupstreamFiles.Insert(strings.TrimPrefix(strings.TrimPrefix(path, sourceDir), string(filepath.Separator)))\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn sets.String{}, err\n\t}\n\n\t// get set of filenames in the cloned package\n\tlocalFiles := sets.String{}\n\terr = filepath.Walk(destDir, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// skip git repo if it exists\n\t\tif IsDotGitFolder(path) {\n\t\t\treturn nil\n\t\t}\n\n\t\tlocalFiles.Insert(strings.TrimPrefix(strings.TrimPrefix(path, destDir), string(filepath.Separator)))\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn sets.String{}, err\n\t}\n\n\t// verify the source and cloned packages have the same set of filenames\n\tdiff := upstreamFiles.SymmetricDifference(localFiles)\n\n\t// verify file contents match\n\tfor _, f := range upstreamFiles.Intersection(localFiles).List() {\n\t\tfi, err := os.Stat(filepath.Join(destDir, f))\n\t\tif err != nil {\n\t\t\treturn diff, err\n\t\t}\n\t\tif fi.Mode().IsDir() {\n\t\t\t// already checked that this directory exists in the local files\n\t\t\tcontinue\n\t\t}\n\n\t\t// compare upstreamFiles\n\t\tb1, err := os.ReadFile(filepath.Join(destDir, f))\n\t\tif err != nil {\n\t\t\treturn diff, err\n\t\t}\n\t\tb2, err := os.ReadFile(filepath.Join(sourceDir, f))\n\t\tif err != nil {\n\t\t\treturn diff, err\n\t\t}\n\t\tif !bytes.Equal(b1, b2) {\n\t\t\tfmt.Println(PrettyFileDiff(string(b1), string(b2)))\n\t\t\tdiff.Insert(f)\n\t\t}\n\t}\n\t// return the differing files\n\treturn diff, nil\n}", "func ChecksumFilesFS(fs fs.FileSystem, groups ...[]File) {\n\tstart := time.Now()\n\n\ttotalFiles := 0\n\ttotalBytes := util.ByteCount(0)\n\tfor _, files := range groups {\n\t\tfor _, f := range files {\n\t\t\tif (f.IsRegular() || f.IsSymlink()) && !f.HasChecksum() {\n\t\t\t\ttotalFiles += 1\n\t\t\t\ttotalBytes += util.ByteCount(f.Size)\n\t\t\t}\n\t\t}\n\t}\n\n\tif totalFiles == 0 {\n\t\treturn // no files to hash\n\t}\n\n\tlog.Printf(\"check: calculating hashes for %d files (%s).\\n\", totalFiles,\n\t\ttotalBytes)\n\n\tdoneFiles := 0\n\tdoneBytes := util.ByteCount(0)\n\n\ttimer := util.NewTimer(1800, func() {\n\t\tprogress := doneBytes / totalBytes * 100\n\t\tlog.Printf(\"check: busy. %d files (%s, %.1f%%) hashed.\\n\", doneFiles,\n\t\t\tdoneBytes, progress)\n\t})\n\n\tfor i, files := range groups {\n\t\tfor j, f := range files {\n\t\t\tvar hash [sha1.Size]byte\n\t\t\tvar length int\n\t\t\tif f.IsRegular() {\n\t\t\t\thash, length = checksumFile(fs, f.Path())\n\t\t\t} else if f.IsSymlink() {\n\t\t\t\thash, length = checksumSymlink(fs, f.Path())\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t//log.Printf(\"check: read %q\\n\", f.Path())\n\t\t\tgroups[i][j].SHA1 = hash\n\t\t\tdoneFiles += 1\n\t\t\tdoneBytes += util.ByteCount(length)\n\t\t}\n\t}\n\n\ttimer.Stop()\n\n\telapsed := time.Since(start)\n\tlog.Printf(\"check: done. %d files (%s) hashed. took %s.\\n\", doneFiles,\n\t\tdoneBytes, elapsed)\n}", "func assertFilesEqualIgnoreNL(t *testing.T, path1, path2 string) {\n\tinfo1, err := os.Stat(path1)\n\tassert.NoError(t, err)\n\n\tinfo2, err := os.Stat(path2)\n\tassert.NoError(t, err)\n\n\t// If both are directories, just return\n\t// Make sure if one name corresponds to a directory, the other one is a directory too\n\tif info1.IsDir() {\n\t\tassert.True(t, info2.IsDir())\n\t\treturn\n\t}\n\t// First file is not a directory, so the second one should not be too\n\tassert.False(t, info2.IsDir())\n\n\tcontent1, err := ioutil.ReadFile(path1)\n\tassert.NoError(t, err)\n\n\tcontent1 = normalizeNL(content1)\n\n\tcontent2, err := ioutil.ReadFile(path2)\n\tassert.NoError(t, err)\n\n\tcontent2 = normalizeNL(content2)\n\n\tr := bytes.Compare(content1, content2)\n\tassert.Equal(t, 0, r, \"No Match - '%s', '%s'\", path1, path2)\n}", "func Comparefiles(file1 string, file2 string) (result, error) {\n var r result\n r.paths = true\n var err error\n f1, _ := fileExists(file1)\n f2, _ := fileExists(file2)\n if !f1 || !f2 {\n return r, fmt.Errorf(\"Warning: Cannot find file.\\n\")\n }\n r.s1, err = createfilehash(file1)\n if err != nil {\n return r, err\n }\n r.s2, err = createfilehash(file2)\n if err != nil {\n return r, err\n }\n r.score, err = ssdeep.Compare(r.s1, r.s2)\n if err != nil {\n return r, err\n }\n if r.score == 100 { //100 spotted in the wild for non-identifcal files\n if strings.Compare(r.s1, r.s2) != 0 {\n r.strflag = true\n }\n shaval1, err := hashfile(file1)\n if err != nil {\n return r, err_sha1_file1\n } \n shaval2, err := hashfile(file2)\n if err != nil {\n return r, err_sha1_file2\n }\n if strings.Compare(shaval1, shaval2) != 0 {\n r.shaflag = true\n }\n }\n return r, nil \n}", "func Equal(d1, d2 *repb.Digest) bool {\n\treturn proto.Equal(d1, d2)\n}", "func checkFileHash(file *os.File, hashlist []string, verbose bool) bool {\n\t// Create a new MD5 hash\n\thash := md5.New()\n\n\t// Read the contents of the file into the hash\n\tif _, err := io.Copy(hash, file); err != nil {\n\t\tlog.Println(\"File could not be hashed: %s\", err)\n\t\treturn false\n\t}\n\n\t// Iterate through the list of exclusion hashes\n\t// Return true if file hash matches an exclusion hash.\n\tfor _, s := range hashlist {\n\t\tif s == fmt.Sprintf(\"%x\", hash.Sum(nil)) {\n\t\t\tif verbose {\n\t\t\t\tlog.Printf(\"[IGNORING] File hash (%s) was found in MD5 exclusion list: %s.\", fmt.Sprintf(\"%x\", hash.Sum(nil)), file.Name())\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func TestCheckDir(t *testing.T) {\n\ttestPaths, err := filepath.Glob(filepath.FromSlash(\"testdata/check_dir/*.txt\"))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, testPath := range testPaths {\n\t\ttestPath := testPath\n\t\tname := strings.TrimSuffix(filepath.Base(testPath), \".txt\")\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\t// Load the test and extract the files to a temporary directory.\n\t\t\ttest, err := readTest(testPath)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tvar want string\n\t\t\tfor i, f := range test.archive.Files {\n\t\t\t\tif f.Name == \"want\" {\n\t\t\t\t\twant = string(f.Data)\n\t\t\t\t\ttest.archive.Files = append(test.archive.Files[:i], test.archive.Files[i+1:]...)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\ttmpDir, err := extractTxtarToTempDir(t, test.archive)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\t// Check the directory.\n\t\t\tcf, err := modzip.CheckDir(tmpDir)\n\t\t\tif err != nil && err.Error() != cf.Err().Error() {\n\t\t\t\t// I/O error\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\trep := strings.NewReplacer(tmpDir, \"$work\", `'\\''`, `'\\''`, string(os.PathSeparator), \"/\")\n\t\t\tgot := rep.Replace(formatCheckedFiles(cf))\n\t\t\tif got != want {\n\t\t\t\tt.Errorf(\"got:\\n%s\\n\\nwant:\\n%s\", got, want)\n\t\t\t}\n\n\t\t\t// Check that the error (if any) is just a list of invalid files.\n\t\t\t// SizeError is not covered in this test.\n\t\t\tvar gotErr, wantErr string\n\t\t\tif len(cf.Invalid) > 0 {\n\t\t\t\twantErr = modzip.FileErrorList(cf.Invalid).Error()\n\t\t\t}\n\t\t\tif err := cf.Err(); err != nil {\n\t\t\t\tgotErr = err.Error()\n\t\t\t}\n\t\t\tif gotErr != wantErr {\n\t\t\t\tt.Errorf(\"got error:\\n%s\\n\\nwant error:\\n%s\", gotErr, wantErr)\n\t\t\t}\n\t\t})\n\t}\n}", "func hashDir(hc hashConstructor, dirname string) ([]byte, error) {\n\t// ReadDir returns the entries sorted by filename\n\tdirEntries, err := os.ReadDir(dirname)\n\tif err != nil {\n\t\treturn nil, errs.FileError(err, dirname)\n\t}\n\tst, err := os.Stat(dirname)\n\tif err != nil {\n\t\treturn nil, errs.FileError(err, dirname)\n\t}\n\n\tvar sum []byte\n\tmode := make([]byte, 4)\n\n\t// calculate sum of contents and mode\n\th := hc()\n\tbinary.LittleEndian.PutUint32(mode, uint32(st.Mode()))\n\th.Write(mode)\n\tfor _, dirEntry := range dirEntries {\n\t\tfi, err := dirEntry.Info()\n\t\tif err != nil {\n\t\t\treturn nil, errs.FileError(err, dirEntry.Name())\n\t\t}\n\t\tname := path.Join(dirname, fi.Name())\n\t\tswitch {\n\t\tcase fi.IsDir():\n\t\t\tsum, err = hashDir(hc, name)\n\t\tcase fi.Mode()&os.ModeSymlink != 0:\n\t\t\tbinary.LittleEndian.PutUint32(mode, uint32(fi.Mode()))\n\t\t\th.Write(mode)\n\t\t\tsum, err = hashSymlink(hc, name)\n\t\tdefault:\n\t\t\tbinary.LittleEndian.PutUint32(mode, uint32(fi.Mode()))\n\t\t\th.Write(mode)\n\t\t\tsum, err = hashFile(hc(), name)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\th.Write(sum)\n\t}\n\n\treturn h.Sum(nil), nil\n}", "func (f File) Equal(f2 File) bool {\n\treturn f.Name == f2.Name && f.Path == f2.Path && f.Type == f2.Type\n}", "func (flogs *fileLogs) Equal(config dvid.StoreConfig) bool {\n\tpath, _, err := parseConfig(config)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn path == flogs.path\n}", "func DirHash(path string) (finalHash string, err error) {\n\thash := md5.New()\n\n\terr = filepath.Walk(path, func(path string, info os.FileInfo, err error) (err2 error) {\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tio.WriteString(hash, path)\n\n\t\tfmt.Fprintf(hash, \"%v\", info.IsDir())\n\t\tfmt.Fprintf(hash, \"%v\", info.ModTime())\n\t\tfmt.Fprintf(hash, \"%v\", info.Mode())\n\t\tfmt.Fprintf(hash, \"%v\", info.Name())\n\t\tfmt.Fprintf(hash, \"%v\", info.Size())\n\t\treturn\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfinalHash = fmt.Sprintf(\"%x\", hash.Sum(nil))\n\treturn\n}", "func (suite *DigestTreeTestSuite) TestDigestTree() {\n\tt := suite.T()\n\n\tfor n := uint(1); n <= MaxTestSize; n++ {\n\t\tleaves := suite.randomDigests(n)\n\n\t\trootDigest, trees, err := NewDigestTree(leaves)\n\t\t_, _, _ = rootDigest, trees, err\n\n\t\tif !assert.Nil(t, err) {\n\t\t\tbreak\n\t\t}\n\n\t\tok := assert.Equal(t, int(n), len(trees))\n\t\tok = ok && assert.Equal(t, sha512.Size384, len(rootDigest))\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\tfor i, tree := range trees {\n\t\t\trecomputedRootDigest := tree.RootDigest()\n\t\t\tok = ok && assert.Equal(t, rootDigest, recomputedRootDigest, fmt.Sprintf(\"path %v produced incorrect root digest\", i))\n\t\t}\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (v *VersionFile) equals(vp *EtcdVersionPair) (bool, error) {\n\texists, err := v.Exists()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !exists {\n\t\treturn false, nil\n\t}\n\tcvp, err := v.Read()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn vp.Equals(cvp), nil\n}", "func TestDiff_srcDestContentsDifferInDirs(t *testing.T) {\n\ts := t.TempDir()\n\td := t.TempDir()\n\n\terr := os.Mkdir(filepath.Join(s, \"a1\"), 0700)\n\tassert.NoError(t, err)\n\terr = os.WriteFile(\n\t\tfilepath.Join(s, \"a1\", \"f.yaml\"), []byte(`a`), 0600)\n\tassert.NoError(t, err)\n\n\terr = os.Mkdir(filepath.Join(d, \"b1\"), 0700)\n\tassert.NoError(t, err)\n\terr = os.WriteFile(\n\t\tfilepath.Join(d, \"b1\", \"f.yaml\"), []byte(`a`), 0600)\n\tassert.NoError(t, err)\n\n\tdiff, err := Diff(s, d)\n\tassert.NoError(t, err)\n\tassert.ElementsMatch(t, diff.List(), []string{\n\t\t\"a1\",\n\t\tfmt.Sprintf(\"a1%sf.yaml\", string(filepath.Separator)),\n\t\tfmt.Sprintf(\"b1%sf.yaml\", string(filepath.Separator)),\n\t\t\"b1\",\n\t})\n}", "func ImageVerifierFilenames(infile, sha256, tmpID string) (string, string, string) {\n\tverifierDirname, verifiedDirname := getVerifierDir(), getVerifiedDir()\n\t// Handle names which are paths\n\tverified := tmpID + \".\" + sha256\n\treturn infile, path.Join(verifierDirname, verified), path.Join(verifiedDirname, sha256)\n}", "func dDiffDigests() (*test, error) {\n\tlPath := filepath.Join(dTestDir, \"diffdigests\", \"docker-lock.json\")\n\n\tflags, err := NewFlags(lPath, defaultConfigPath(), \".env\", false, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &test{flags: flags, shouldFail: true}, nil\n}", "func TestCheckFiles(t *testing.T) {\n\ttestPaths, err := filepath.Glob(filepath.FromSlash(\"testdata/check_files/*.txt\"))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, testPath := range testPaths {\n\t\ttestPath := testPath\n\t\tname := strings.TrimSuffix(filepath.Base(testPath), \".txt\")\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\t// Load the test.\n\t\t\ttest, err := readTest(testPath)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tfiles := make([]modzip.File, 0, len(test.archive.Files))\n\t\t\tvar want string\n\t\t\tfor _, tf := range test.archive.Files {\n\t\t\t\tif tf.Name == \"want\" {\n\t\t\t\t\twant = string(tf.Data)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfiles = append(files, fakeFile{\n\t\t\t\t\tname: tf.Name,\n\t\t\t\t\tsize: uint64(len(tf.Data)),\n\t\t\t\t\tdata: tf.Data,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\t// Check the files.\n\t\t\tcf, _ := modzip.CheckFiles(files)\n\t\t\tgot := formatCheckedFiles(cf)\n\t\t\tif got != want {\n\t\t\t\tt.Errorf(\"got:\\n%s\\n\\nwant:\\n%s\", got, want)\n\t\t\t}\n\n\t\t\t// Check that the error (if any) is just a list of invalid files.\n\t\t\t// SizeError is not covered in this test.\n\t\t\tvar gotErr, wantErr string\n\t\t\tif len(cf.Invalid) > 0 {\n\t\t\t\twantErr = modzip.FileErrorList(cf.Invalid).Error()\n\t\t\t}\n\t\t\tif err := cf.Err(); err != nil {\n\t\t\t\tgotErr = err.Error()\n\t\t\t}\n\t\t\tif gotErr != wantErr {\n\t\t\t\tt.Errorf(\"got error:\\n%s\\n\\nwant error:\\n%s\", gotErr, wantErr)\n\t\t\t}\n\t\t})\n\t}\n}", "func (b Base) SameFile(sum, path string) bool {\n\treturn file.SameFile(b.blobPath(sum), path)\n}", "func (gc *GitCommit) SameDiffStat(b *GitCommit) bool {\n\tif len(gc.Files) != len(b.Files) {\n\t\treturn false\n\t}\n\tfor i, af := range gc.Files {\n\t\tbf := b.Files[i]\n\t\tif af == nil || bf == nil {\n\t\t\treturn false\n\t\t}\n\t\tif *af != *bf {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (b *Binary) CompareChecksum() error {\n\tif err := b.checksumList.Get(); err != nil {\n\t\treturn errors.Wrapf(err, \"%s get checksum failed\", b.Name())\n\t}\n\n\tsum := b.file.rootFs.Fs().SHA256Sum(b.LocalPath())\n\tif sum != b.checksumList.Value() {\n\t\treturn errors.Errorf(\"SHA256 no match. file: %s sha256: %s not equal checksum: %s\", b.Name(), sum, b.checksumList.Value())\n\t}\n\treturn nil\n}", "func TestFileOvverideDir(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", tstprefix)\n\tif err != nil {\n\t\tt.Fatalf(\"error creating tempdir: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\tds := NewTestStore()\n\n\timj := `\n\t\t{\n\t\t \"acKind\": \"ImageManifest\",\n\t\t \"acVersion\": \"0.1.1\",\n\t\t \"name\": \"example.com/test01\"\n\t\t}\n\t`\n\n\tentries := []*testTarEntry{\n\t\t{\n\t\t\tcontents: imj,\n\t\t\theader: &tar.Header{\n\t\t\t\tName: \"manifest\",\n\t\t\t\tSize: int64(len(imj)),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\theader: &tar.Header{\n\t\t\t\tName: \"rootfs/a/b\",\n\t\t\t\tTypeflag: tar.TypeDir,\n\t\t\t\tMode: 0700,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\theader: &tar.Header{\n\t\t\t\tName: \"rootfs/a/b/c\",\n\t\t\t\tTypeflag: tar.TypeDir,\n\t\t\t\tMode: 0700,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tcontents: \"hello\",\n\t\t\theader: &tar.Header{\n\t\t\t\tName: \"rootfs/a/b/c/file01\",\n\t\t\t\tSize: 5,\n\t\t\t},\n\t\t},\n\t}\n\n\tkey1, err := newTestACI(entries, dir, ds)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tim, err := createImageManifest(imj)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\timage1 := Image{Im: im, Key: key1, Level: 1}\n\n\timj = `\n\t\t{\n\t\t \"acKind\": \"ImageManifest\",\n\t\t \"acVersion\": \"0.1.1\",\n\t\t \"name\": \"example.com/test02\"\n\t\t}\n\t`\n\n\tk1, _ := types.NewHash(key1)\n\timj, err = addDependencies(imj,\n\t\ttypes.Dependency{\n\t\t\tImageName: \"example.com/test01\",\n\t\t\tImageID: k1},\n\t)\n\n\tentries = []*testTarEntry{\n\t\t{\n\t\t\tcontents: imj,\n\t\t\theader: &tar.Header{\n\t\t\t\tName: \"manifest\",\n\t\t\t\tSize: int64(len(imj)),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tcontents: \"hellohello\",\n\t\t\theader: &tar.Header{\n\t\t\t\tName: \"rootfs/a/b\",\n\t\t\t\tSize: 10,\n\t\t\t},\n\t\t},\n\t}\n\n\texpectedFiles := []*fileInfo{\n\t\t&fileInfo{path: \"manifest\", typeflag: tar.TypeReg},\n\t\t&fileInfo{path: \"rootfs/a/b\", typeflag: tar.TypeReg, size: 10},\n\t}\n\n\tkey2, err := newTestACI(entries, dir, ds)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tim, err = createImageManifest(imj)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\timage2 := Image{Im: im, Key: key2, Level: 0}\n\n\timages := Images{image2, image1}\n\terr = checkRenderACIFromList(images, expectedFiles, ds)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\terr = checkRenderACI(\"example.com/test02\", expectedFiles, ds)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}", "func compareSHA(src, want string) error {\n\tf, err := os.Open(src)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to open file\")\n\t}\n\tdefer f.Close()\n\n\th := sha256.New()\n\tif _, err := io.Copy(h, f); err != nil {\n\t\treturn errors.Wrapf(err, \"sha256 hashing of file %s failed\", src)\n\t}\n\tif got := hex.EncodeToString(h.Sum(nil)); strings.Compare(got, want) != 0 {\n\t\treturn fmt.Errorf(\"sha256 hex code of file %s did not match, got: %s want: %s\", src, got, want)\n\t}\n\n\treturn nil\n}", "func Matches(l, r string) bool {\n\tli, err := os.Stat(l)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tvar ri os.FileInfo\n\tif ri, err = os.Stat(r); err != nil {\n\t\treturn false\n\t}\n\n\treturn li.Name() == ri.Name() &&\n\t\tli.Size() == ri.Size() &&\n\t\tli.Mode() == ri.Mode() &&\n\t\tli.ModTime() == ri.ModTime() &&\n\t\tli.IsDir() == ri.IsDir()\n\n}", "func (metadata MetaFile) CompareHash(hash, filename string) (bool, error) {\n\tfor _, v := range metadata.Verification.Hashes {\n\t\tswitch {\n\t\tcase v.Type == \"md5\" && v.Type == hash && len(v.Text) > 2:\n\t\t\tres, err := ComputeMd5(filename)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tresstr := hex.EncodeToString(res)\n\t\t\tif v.Text == resstr {\n\t\t\t\treturn true, nil\n\t\t\t} else {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\tcase v.Type == \"sha1\" && v.Type == hash && len(v.Text) > 2:\n\t\t\tres, err := ComputeSha1(filename)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tresstr := hex.EncodeToString(res)\n\t\t\tif v.Text == resstr {\n\t\t\t\treturn true, nil\n\t\t\t} else {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\tcase v.Type == \"sha256\" && v.Type == hash && len(v.Text) > 2:\n\t\t\tres, err := ComputeSha256(filename)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tresstr := hex.EncodeToString(res)\n\t\t\tif v.Text == resstr {\n\t\t\t\treturn true, nil\n\t\t\t} else {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\tcase v.Type == \"sha512\" && v.Type == hash && len(v.Text) > 2:\n\t\t\tres, err := ComputeSha256(filename)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tresstr := hex.EncodeToString(res)\n\t\t\tif v.Text == resstr {\n\t\t\t\treturn true, nil\n\t\t\t} else {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\tdefault:\n\t\t\t;\n\t\t}\n\t}\n\treturn false, fmt.Errorf(\"no \\\"%v\\\" hash present in the metafile\", hash)\n}", "func completeDigest(prefix string) (digest string) {\n\tif len(prefix) == DigestStdLen {\n\t\treturn prefix\n\t}\n\tlist, err := ioutil.ReadDir(containerPath)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, file := range list {\n\t\tif file.IsDir() && strings.HasPrefix(file.Name(), prefix) {\n\t\t\treturn file.Name()\n\t\t}\n\t}\n\treturn\n}", "func equalBubbledMetadata(md1, md2 siadir.Metadata) error {\n\t// Check AggregateHealth\n\tif md1.AggregateHealth != md2.AggregateHealth {\n\t\treturn fmt.Errorf(\"AggregateHealth not equal, %v and %v\", md1.AggregateHealth, md2.AggregateHealth)\n\t}\n\t// Check AggregateNumFiles\n\tif md1.AggregateNumFiles != md2.AggregateNumFiles {\n\t\treturn fmt.Errorf(\"AggregateNumFiles not equal, %v and %v\", md1.AggregateNumFiles, md2.AggregateNumFiles)\n\t}\n\t// Check Size\n\tif md1.AggregateSize != md2.AggregateSize {\n\t\treturn fmt.Errorf(\"aggregate sizes not equal, %v and %v\", md1.AggregateSize, md2.AggregateSize)\n\t}\n\t// Check Health\n\tif md1.Health != md2.Health {\n\t\treturn fmt.Errorf(\"healths not equal, %v and %v\", md1.Health, md2.Health)\n\t}\n\t// Check LastHealthCheckTimes\n\tif md2.LastHealthCheckTime != md1.LastHealthCheckTime {\n\t\treturn fmt.Errorf(\"LastHealthCheckTimes not equal %v and %v\", md2.LastHealthCheckTime, md1.LastHealthCheckTime)\n\t}\n\t// Check MinRedundancy\n\tif md1.MinRedundancy != md2.MinRedundancy {\n\t\treturn fmt.Errorf(\"MinRedundancy not equal, %v and %v\", md1.MinRedundancy, md2.MinRedundancy)\n\t}\n\t// Check Mod Times\n\tif md2.ModTime != md1.ModTime {\n\t\treturn fmt.Errorf(\"ModTimes not equal %v and %v\", md2.ModTime, md1.ModTime)\n\t}\n\t// Check NumFiles\n\tif md1.NumFiles != md2.NumFiles {\n\t\treturn fmt.Errorf(\"NumFiles not equal, %v and %v\", md1.NumFiles, md2.NumFiles)\n\t}\n\t// Check NumStuckChunks\n\tif md1.NumStuckChunks != md2.NumStuckChunks {\n\t\treturn fmt.Errorf(\"NumStuckChunks not equal, %v and %v\", md1.NumStuckChunks, md2.NumStuckChunks)\n\t}\n\t// Check NumSubDirs\n\tif md1.NumSubDirs != md2.NumSubDirs {\n\t\treturn fmt.Errorf(\"NumSubDirs not equal, %v and %v\", md1.NumSubDirs, md2.NumSubDirs)\n\t}\n\t// Check StuckHealth\n\tif md1.StuckHealth != md2.StuckHealth {\n\t\treturn fmt.Errorf(\"stuck healths not equal, %v and %v\", md1.StuckHealth, md2.StuckHealth)\n\t}\n\treturn nil\n}", "func auditFile(str *strings.Builder, res Result) bool {\n\tstr.WriteString(\"\\n\")\n\n\tidentifiedByHash := true\n\tfound := findByHashes(res)\n\tif found == \"\" {\n\t\t_, found = filepath.Split(res.File)\n\t\tidentifiedByHash = false\n\t}\n\n\tvalid := true\n\n\tif val, ok := hashDatabase[found]; ok {\n\t\tif identifiedByHash {\n\t\t\tstr.WriteString(fmt.Sprintf(\"%s (identified by hash)\\n\", res.File))\n\t\t} else {\n\t\t\tstr.WriteString(fmt.Sprintf(\"%s (identified by filename)\\n\", res.File))\n\t\t}\n\n\t\tstr.WriteString(fmt.Sprintf(\"description %s\\n\", val.Description))\n\t\tstr.WriteString(fmt.Sprintf(\" version %s\\n\", val.Version))\n\t\tstr.WriteString(fmt.Sprintf(\" date %s\\n\", val.Date))\n\t\tstr.WriteString(\"\\n\")\n\n\t\tif hasHash(HashNames.MD5) && val.MD5 != \"\" {\n\t\t\tif res.MD5 == val.MD5 {\n\t\t\t\tstr.WriteString(\" MD5 \" + val.MD5 + \" pass\\n\")\n\t\t\t} else {\n\t\t\t\tstr.WriteString(\" MD5 \" + val.MD5 + \" fail\\n\")\n\t\t\t\tvalid = false\n\t\t\t}\n\t\t}\n\n\t\tif hasHash(HashNames.SHA1) && val.SHA1 != \"\" {\n\t\t\tif res.SHA1 == val.SHA1 {\n\t\t\t\tstr.WriteString(\" SHA1 \" + val.SHA1 + \" pass\\n\")\n\t\t\t} else {\n\t\t\t\tstr.WriteString(\" SHA1 \" + val.SHA1 + \" fail\\n\")\n\t\t\t\tvalid = false\n\t\t\t}\n\t\t}\n\n\t\tif hasHash(HashNames.SHA256) && val.SHA256 != \"\" {\n\t\t\tif res.SHA256 == val.SHA256 {\n\t\t\t\tstr.WriteString(\" SHA256 \" + val.SHA256 + \" pass\\n\")\n\t\t\t} else {\n\t\t\t\tstr.WriteString(\" SHA256 \" + val.SHA256 + \" fail\\n\")\n\t\t\t\tvalid = false\n\t\t\t}\n\t\t}\n\n\t\tif hasHash(HashNames.SHA512) && val.SHA512 != \"\" {\n\t\t\tif res.SHA512 == val.SHA512 {\n\t\t\t\tstr.WriteString(\" SHA512 \" + val.SHA512 + \" pass\\n\")\n\t\t\t} else {\n\t\t\t\tstr.WriteString(\" SHA512 \" + val.SHA512 + \" fail\\n\")\n\t\t\t\tvalid = false\n\t\t\t}\n\t\t}\n\t} else {\n\t\tstr.WriteString(fmt.Sprintf(\"%s (unknown file cannot audit)\\n\", res.File))\n\t}\n\n\treturn valid\n}", "func compareDirectoryInfoAndMetadata(di modules.DirectoryInfo, siaDir *siadir.SiaDirSetEntry) error {\n\t_, stuckHealth, lastHealthCheckTime := siaDir.Health()\n\tif di.HyperspacePath != siaDir.HyperspacePath() {\n\t\treturn fmt.Errorf(\"HyperspacePaths not equal %v and %v\", di.HyperspacePath, siaDir.HyperspacePath())\n\t}\n\tif di.Health != stuckHealth {\n\t\treturn fmt.Errorf(\"Healths not equal %v and %v\", di.HyperspacePath, stuckHealth)\n\t}\n\tif di.LastHealthCheckTime != lastHealthCheckTime {\n\t\treturn fmt.Errorf(\"LastHealthCheckTimes not equal %v and %v\", di.LastHealthCheckTime, lastHealthCheckTime)\n\t}\n\treturn nil\n}", "func Match(files1 []*File, files2 []*File, mapper func(*File) string) []*File {\n\tfileMap := make(map[string]*File)\n\tfor _, f := range files2 {\n\t\tkey := mapper(f)\n\t\tfileMap[key] = f\n\t}\n\n\tdiff := make([]*File, 0)\n\n\tfor _, f := range files1 {\n\t\tkey := mapper(f)\n\n\t\tmatch := fileMap[key]\n\t\tif match != nil {\n\t\t\t// Have similar files - are they really the same file?\n\t\t\tif f.Size != match.Size {\n\t\t\t\tdiff = append(diff, f)\n\t\t\t} else {\n\t\t\t\tdebugf(\"Matched: %s to %s\", f.AbsolutePath, match.AbsolutePath)\n\t\t\t}\n\t\t} else {\n\t\t\tdiff = append(diff, f)\n\t\t}\n\t}\n\n\treturn diff\n}", "func cDiffDigests() (*test, error) {\n\tlPath := filepath.Join(cTestDir, \"diffdigests\", \"docker-lock.json\")\n\n\tflags, err := NewFlags(lPath, defaultConfigPath(), \".env\", false, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &test{flags: flags, shouldFail: true}, nil\n}", "func fileCompare(p1, p2 string) (bool, error) {\n\ta, err := ioutil.ReadFile(p1)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tb, err := ioutil.ReadFile(p2)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn bytes.Equal(a, b), nil\n\t// return bytes.Compare(a, b) == 0, nil\n}", "func TestEvaluateDuplicates(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"no duplicates\", func(t *testing.T) {\n\t\tdestDir, err := ioutil.TempDir(\"\", \"destDir_unarchive\")\n\t\trequire.NoError(t, err)\n\t\tdefer os.RemoveAll(destDir)\n\n\t\tfileA, err := ioutil.TempFile(destDir, \"fileA.txt\")\n\t\trequire.NoError(t, err)\n\t\tdefer os.Remove(fileA.Name())\n\n\t\tfileB, err := ioutil.TempFile(destDir, \"fileB.txt\")\n\t\trequire.NoError(t, err)\n\t\tdefer os.Remove(fileB.Name())\n\n\t\tddInfo, err := os.Open(destDir)\n\t\trequire.NoError(t, err)\n\t\tdefer ddInfo.Close()\n\n\t\tfetchDir, err := ioutil.TempDir(\"\", \"fetchDir_unarchive\")\n\t\trequire.NoError(t, err)\n\t\tdefer os.RemoveAll(fetchDir)\n\n\t\tfileC, err := ioutil.TempFile(fetchDir, \"fileC.txt\")\n\t\trequire.NoError(t, err)\n\t\tdefer os.Remove(fileC.Name())\n\n\t\tfileD, err := ioutil.TempFile(fetchDir, \"fileD.txt\")\n\t\trequire.NoError(t, err)\n\t\tdefer os.Remove(fileD.Name())\n\n\t\tfdInfo, err := os.Open(fetchDir)\n\t\trequire.NoError(t, err)\n\t\tdefer fdInfo.Close()\n\n\t\tu := &Unarchive{\n\t\t\tdestDir: ddInfo,\n\t\t\tfetchDir: fdInfo,\n\t\t}\n\n\t\terr = filepath.Walk(u.destDir.Name(), func(path string, f os.FileInfo, err error) error {\n\t\t\tu.destContents = append(u.destContents, path)\n\t\t\treturn nil\n\t\t})\n\t\trequire.NoError(t, err)\n\n\t\terr = filepath.Walk(u.fetchDir.Name(), func(path string, f os.FileInfo, err error) error {\n\t\t\tu.fetchContents = append(u.fetchContents, path)\n\t\t\treturn nil\n\t\t})\n\t\trequire.NoError(t, err)\n\n\t\terr = u.evaluateDuplicates()\n\n\t\tassert.NoError(t, err)\n\t})\n\n\tt.Run(\"duplicates\", func(t *testing.T) {\n\t\tdestDir, err := ioutil.TempDir(\"\", \"destDir_unarchive\")\n\t\trequire.NoError(t, err)\n\t\tdefer os.RemoveAll(destDir)\n\n\t\tfileADest, err := os.Create(destDir + \"/fileA.txt\")\n\t\trequire.NoError(t, err)\n\t\tdefer os.Remove(fileADest.Name())\n\n\t\tddInfo, err := os.Open(destDir)\n\t\trequire.NoError(t, err)\n\t\tdefer ddInfo.Close()\n\n\t\tfetchDir, err := ioutil.TempDir(\"\", \"fetchDir_unarchive\")\n\t\trequire.NoError(t, err)\n\t\tdefer os.RemoveAll(fetchDir)\n\n\t\tfileAFetch, err := os.Create(fetchDir + \"/fileA.txt\")\n\t\trequire.NoError(t, err)\n\t\tdefer os.Remove(fileAFetch.Name())\n\n\t\tfdInfo, err := os.Open(fetchDir)\n\t\trequire.NoError(t, err)\n\t\tdefer fdInfo.Close()\n\n\t\tt.Run(\"checksum match\", func(t *testing.T) {\n\t\t\tu := &Unarchive{\n\t\t\t\tdestDir: ddInfo,\n\t\t\t\tfetchDir: fdInfo,\n\t\t\t}\n\n\t\t\tchecksumDest, err := u.getChecksum(destDir + \"/fileA.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tchecksumFetch, err := u.getChecksum(fetchDir + \"/fileA.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, checksumDest, checksumFetch)\n\n\t\t\terr = filepath.Walk(u.destDir.Name(), func(path string, f os.FileInfo, err error) error {\n\t\t\t\tu.destContents = append(u.destContents, path)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = filepath.Walk(u.fetchDir.Name(), func(path string, f os.FileInfo, err error) error {\n\t\t\t\tu.fetchContents = append(u.fetchContents, path)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = u.evaluateDuplicates()\n\n\t\t\tassert.NoError(t, err)\n\t\t})\n\n\t\tt.Run(\"checksum mismatch\", func(t *testing.T) {\n\t\t\tt.Run(\"different size\", func(t *testing.T) {\n\t\t\t\tfileBDest, err := os.Create(destDir + \"/fileB.txt\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tdefer os.Remove(fileBDest.Name())\n\n\t\t\t\tfileBFetch, err := os.Create(fetchDir + \"/fileB.txt\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tdefer os.Remove(fileBFetch.Name())\n\n\t\t\t\t_, err = fileBFetch.Write([]byte{1})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tdStat, _ := os.Stat(destDir + \"/fileB.txt\")\n\t\t\t\tfStat, _ := os.Stat(fetchDir + \"/fileB.txt\")\n\t\t\t\trequire.NotEqual(t, dStat.Size(), fStat.Size())\n\n\t\t\t\tu := &Unarchive{\n\t\t\t\t\tDestination: destDir,\n\t\t\t\t\tdestDir: ddInfo,\n\t\t\t\t\tfetchDir: fdInfo,\n\t\t\t\t}\n\n\t\t\t\tchecksumDest, err := u.getChecksum(destDir + \"/fileB.txt\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tchecksumFetch, err := u.getChecksum(fetchDir + \"/fileB.txt\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.NotEqual(t, checksumDest, checksumFetch)\n\n\t\t\t\terr = filepath.Walk(u.destDir.Name(), func(path string, f os.FileInfo, err error) error {\n\t\t\t\t\tu.destContents = append(u.destContents, path)\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = filepath.Walk(u.fetchDir.Name(), func(path string, f os.FileInfo, err error) error {\n\t\t\t\t\tu.fetchContents = append(u.fetchContents, path)\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = u.evaluateDuplicates()\n\n\t\t\t\tassert.EqualError(t, err, fmt.Sprintf(\"will not replace, \\\"/fileB.txt\\\" exists at %q: checksum mismatch\", u.Destination))\n\t\t\t})\n\n\t\t\tt.Run(\"same size\", func(t *testing.T) {\n\t\t\t\tfileBDest, err := os.Create(destDir + \"/fileB.txt\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tdefer os.Remove(fileBDest.Name())\n\n\t\t\t\t_, err = fileBDest.WriteString(\"a\")\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tfileBFetch, err := os.Create(fetchDir + \"/fileB.txt\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tdefer os.Remove(fileBFetch.Name())\n\n\t\t\t\t_, err = fileBFetch.WriteString(\"b\")\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tdStat, _ := os.Stat(destDir + \"/fileB.txt\")\n\t\t\t\tfStat, _ := os.Stat(fetchDir + \"/fileB.txt\")\n\t\t\t\trequire.Equal(t, dStat.Size(), fStat.Size())\n\n\t\t\t\tu := &Unarchive{\n\t\t\t\t\tDestination: destDir,\n\t\t\t\t\tdestDir: ddInfo,\n\t\t\t\t\tfetchDir: fdInfo,\n\t\t\t\t}\n\n\t\t\t\tchecksumDest, err := u.getChecksum(destDir + \"/fileB.txt\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tchecksumFetch, err := u.getChecksum(fetchDir + \"/fileB.txt\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.NotEqual(t, checksumDest, checksumFetch)\n\n\t\t\t\terr = filepath.Walk(u.destDir.Name(), func(path string, f os.FileInfo, err error) error {\n\t\t\t\t\tu.destContents = append(u.destContents, path)\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = filepath.Walk(u.fetchDir.Name(), func(path string, f os.FileInfo, err error) error {\n\t\t\t\t\tu.fetchContents = append(u.fetchContents, path)\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = u.evaluateDuplicates()\n\n\t\t\t\tassert.EqualError(t, err, fmt.Sprintf(\"will not replace, \\\"/fileB.txt\\\" exists at %q: checksum mismatch\", u.Destination))\n\t\t\t})\n\t\t})\n\t})\n\n\tt.Run(\"recurse\", func(t *testing.T) {\n\t\tdestDir, err := ioutil.TempDir(\"\", \"destDir_unarchive\")\n\t\trequire.NoError(t, err)\n\t\tdefer os.RemoveAll(destDir)\n\n\t\tstat, err := os.Stat(destDir)\n\t\trequire.NoError(t, err)\n\n\t\terr = os.Mkdir(destDir+\"/dirA\", stat.Mode().Perm())\n\t\trequire.NoError(t, err)\n\n\t\terr = os.Mkdir(destDir+\"/dirA/dirB\", stat.Mode().Perm())\n\t\trequire.NoError(t, err)\n\n\t\tfileBDest, err := os.Create(destDir + \"/dirA/dirB/fileB.txt\")\n\t\trequire.NoError(t, err)\n\t\tdefer os.Remove(fileBDest.Name())\n\n\t\tddInfo, err := os.Open(destDir)\n\t\trequire.NoError(t, err)\n\t\tdefer ddInfo.Close()\n\n\t\tfetchDir, err := ioutil.TempDir(\"\", \"fetchDir_unarchive\")\n\t\trequire.NoError(t, err)\n\t\tdefer os.RemoveAll(fetchDir)\n\n\t\tstat, err = os.Stat(fetchDir)\n\t\trequire.NoError(t, err)\n\n\t\terr = os.Mkdir(fetchDir+\"/dirA\", stat.Mode().Perm())\n\t\trequire.NoError(t, err)\n\n\t\terr = os.Mkdir(fetchDir+\"/dirA/dirB\", stat.Mode().Perm())\n\t\trequire.NoError(t, err)\n\n\t\tfileBFetch, err := os.Create(fetchDir + \"/dirA/dirB/fileB.txt\")\n\t\trequire.NoError(t, err)\n\t\tdefer os.Remove(fileBFetch.Name())\n\n\t\tfdInfo, err := os.Open(fetchDir)\n\t\trequire.NoError(t, err)\n\t\tdefer fdInfo.Close()\n\n\t\tt.Run(\"checksum match\", func(t *testing.T) {\n\t\t\tu := &Unarchive{\n\t\t\t\tDestination: destDir,\n\t\t\t\tdestDir: ddInfo,\n\t\t\t\tfetchDir: fdInfo,\n\t\t\t}\n\n\t\t\tchecksumDest, err := u.getChecksum(destDir + \"/dirA/dirB/fileB.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tchecksumFetch, err := u.getChecksum(fetchDir + \"/dirA/dirB/fileB.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, checksumDest, checksumFetch)\n\n\t\t\terr = filepath.Walk(u.destDir.Name(), func(path string, f os.FileInfo, err error) error {\n\t\t\t\tu.destContents = append(u.destContents, path)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = filepath.Walk(u.fetchDir.Name(), func(path string, f os.FileInfo, err error) error {\n\t\t\t\tu.fetchContents = append(u.fetchContents, path)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = u.evaluateDuplicates()\n\n\t\t\tassert.NoError(t, err)\n\t\t})\n\n\t\tt.Run(\"checksum mismatch\", func(t *testing.T) {\n\t\t\tfileCDest, err := os.Create(destDir + \"/dirA/dirB/fileC.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(fileCDest.Name())\n\n\t\t\tfileCFetch, err := os.Create(fetchDir + \"/dirA/dirB/fileC.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(fileCFetch.Name())\n\n\t\t\t_, err = fileCFetch.Write([]byte{2})\n\t\t\trequire.NoError(t, err)\n\n\t\t\tu := &Unarchive{\n\t\t\t\tDestination: destDir,\n\t\t\t\tdestDir: ddInfo,\n\t\t\t\tfetchDir: fdInfo,\n\t\t\t}\n\n\t\t\tchecksumDest, err := u.getChecksum(destDir + \"/dirA/dirB/fileC.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tchecksumFetch, err := u.getChecksum(fetchDir + \"/dirA/dirB/fileC.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotEqual(t, checksumDest, checksumFetch)\n\n\t\t\terr = filepath.Walk(u.destDir.Name(), func(path string, f os.FileInfo, err error) error {\n\t\t\t\tu.destContents = append(u.destContents, path)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = filepath.Walk(u.fetchDir.Name(), func(path string, f os.FileInfo, err error) error {\n\t\t\t\tu.fetchContents = append(u.fetchContents, path)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = u.evaluateDuplicates()\n\n\t\t\tassert.EqualError(t, err, fmt.Sprintf(\"will not replace, \\\"/dirA/dirB/fileC.txt\\\" exists at %q: checksum mismatch\", u.Destination))\n\t\t})\n\t})\n}", "func TestDiff_additionalSourceFiles(t *testing.T) {\n\ts := t.TempDir()\n\td := t.TempDir()\n\n\terr := os.Mkdir(filepath.Join(s, \"a1\"), 0700)\n\tassert.NoError(t, err)\n\n\terr = os.Mkdir(filepath.Join(s, \"a2\"), 0700)\n\tassert.NoError(t, err)\n\n\terr = os.Mkdir(filepath.Join(d, \"a1\"), 0700)\n\tassert.NoError(t, err)\n\n\tdiff, err := Diff(s, d)\n\tassert.NoError(t, err)\n\tassert.ElementsMatch(t, diff.List(), []string{\"a2\"})\n}", "func EqualTextFiles(t *testing.T, fSys *afero.Afero, fn0, fn1 string) {\n\n\t// Read entire file content, giving us little control but\n\t// making it very simple. No need to close the file.\n\tb0, e0 := fSys.ReadFile(fn0)\n\tassert.Nil(t, e0)\n\tb1, e1 := fSys.ReadFile(fn1)\n\tassert.Nil(t, e1)\n\n\t// Convert []byte to string and print to screen\n\ts0 := string(b0)\n\ts1 := string(b1)\n\n\tEqualLines(t, s0, s1)\n}", "func SortEqual(o, n []map[string][]string) bool {\n\tif len(o) != len(n) {\n\t\treturn false\n\t}\n\tfor i := range n {\n\t\tf1 := o[i][\"file\"][0]\n\t\tf2 := n[i][\"file\"][0]\n\t\tif f1 != f2 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func TestSyncFile(t *testing.T) {\n\td1 := t.TempDir()\n\td2 := t.TempDir()\n\tf1Name := d1 + \"/temp.txt\"\n\tf2Name := d2 + \"/temp.txt\"\n\terr := os.WriteFile(f1Name, []byte(\"abc\"), 0600)\n\tassert.NoError(t, err)\n\terr = os.WriteFile(f2Name, []byte(\"def\"), 0644)\n\texpectedFileInfo, _ := os.Stat(f2Name)\n\tassert.NoError(t, err)\n\terr = SyncFile(f1Name, f2Name)\n\tassert.NoError(t, err)\n\tactual, err := os.ReadFile(f2Name)\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"abc\", string(actual))\n\tdstFileInfo, _ := os.Stat(f2Name)\n\tassert.Equal(t, expectedFileInfo.Mode().String(), dstFileInfo.Mode().String())\n}", "func (r *remoteFile) Compare(name string) (bool, error) {\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer f.Close()\n\tfi, err := f.Stat()\n\t// b, err := ioutil.ReadFile(name) // size := len(b)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\t// if r.Length == -1 {\n\t// \treturn false, fmt.Errorf(\"%s: no content length\", r.URL)\n\t// }\n\tif fi.Size() != r.Length {\n\t\t// fmt.Println(\"DIFFERENT SIZE\", fi.Size(), \"->\", r.Length)\n\t\treturn false, nil\n\t}\n\tif fi.ModTime().After(r.Date) {\n\t\t// fmt.Println(\"DIFFERENT DATE\", fi.ModTime(), \"->\", r.Date)\n\t\treturn false, nil\n\t}\n\t// fmt.Println(\"Etag\", r.Etag)\n\t// h := sha1.New()\n\t// if _, err = io.Copy(h, f); err != nil {\n\t// \treturn false, err\n\t// }\n\t// hash := hex.EncodeToString(h.Sum(nil))\n\t// fmt.Println(\"SHA1\", hash)\n\treturn true, nil\n}", "func TestDiff_srcDestContentsDiffer(t *testing.T) {\n\ts := t.TempDir()\n\td := t.TempDir()\n\n\terr := os.Mkdir(filepath.Join(s, \"a1\"), 0700)\n\tassert.NoError(t, err)\n\terr = os.WriteFile(\n\t\tfilepath.Join(s, \"a1\", \"f.yaml\"), []byte(`a`), 0600)\n\tassert.NoError(t, err)\n\n\terr = os.Mkdir(filepath.Join(d, \"a1\"), 0700)\n\tassert.NoError(t, err)\n\terr = os.WriteFile(\n\t\tfilepath.Join(d, \"a1\", \"f.yaml\"), []byte(`b`), 0600)\n\tassert.NoError(t, err)\n\n\tdiff, err := Diff(s, d)\n\tassert.NoError(t, err)\n\tassert.ElementsMatch(t, diff.List(), []string{\n\t\tfmt.Sprintf(\"a1%sf.yaml\", string(filepath.Separator)),\n\t})\n}", "func IdenticalFileContents(ctx context.Context, files ...FileReader) (identical bool, err error) {\n\tif len(files) < 2 {\n\t\treturn false, fmt.Errorf(\"need at least 2 files to compare, got %d\", len(files))\n\t}\n\tif !files[0].Exists() {\n\t\treturn false, NewErrDoesNotExistFileReader(files[0])\n\t}\n\tsize := files[0].Size()\n\tfor _, file := range files[1:] {\n\t\tif !file.Exists() {\n\t\t\treturn false, NewErrDoesNotExistFileReader(file)\n\t\t}\n\t\tif file.Size() != size {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\tif ctx.Err() != nil {\n\t\treturn false, ctx.Err()\n\t}\n\n\t// Compare bytes directly in memory up to compareContentHashSizeThreshold\n\t// use content hash for larger files to not take up too much RAM\n\tif size <= compareContentHashSizeThreshold {\n\t\tref, err := files[0].ReadAllContext(ctx)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tfor _, file := range files[1:] {\n\t\t\tcomp, err := file.ReadAllContext(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tif bytes.Compare(comp, ref) != 0 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t} else {\n\t\tref, err := files[0].ContentHash()\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tfor _, file := range files[1:] {\n\t\t\tif ctx.Err() != nil {\n\t\t\t\treturn false, ctx.Err()\n\t\t\t}\n\t\t\tcomp, err := file.ContentHash()\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tif comp != ref {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func checkFiles(destDir string, files fileMap) error {\n\t// We are going to remove elements from the map, but we don't want to modify the map used by the caller\n\tfiles = copyFileMap(files)\n\n\terr := filepath.Walk(destDir, func(path string, info os.FileInfo, err error) error {\n\t\tlogging.Debugf(\"Walking %s\", path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\tlogging.Debugf(\"Skipping directory %s\", path)\n\t\t\treturn nil\n\t\t}\n\t\tlogging.Debugf(\"Checking file %s\", path)\n\t\tarchivePath, err := filepath.Rel(destDir, path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\texpectedContent, found := files[archivePath]\n\t\tif !found {\n\t\t\treturn fmt.Errorf(\"Unexpected extracted file '%s'\", path)\n\t\t}\n\t\tdelete(files, archivePath)\n\n\t\tdata, err := os.ReadFile(path) // #nosec G304\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif string(data) != expectedContent {\n\t\t\treturn fmt.Errorf(\"Unexpected content for '%s': expected [%s], got [%s]\", path, expectedContent, string(data))\n\t\t}\n\t\tlogging.Debugf(\"'%s' successfully checked\", path)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(files) != 0 {\n\t\treturn fmt.Errorf(\"Some expected files were not extracted: %v\", files)\n\t}\n\n\treturn nil\n}", "func diffFiles(t *testing.T, expectedPath, actual string) error {\n\tt.Helper()\n\twriteTmp := func(content string) (string, error) {\n\t\ttmp, err := ioutil.TempFile(\"\", \"*.yaml\")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tdefer func() {\n\t\t\ttmp.Close()\n\t\t}()\n\t\tif _, err := tmp.Write([]byte(content)); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn tmp.Name(), nil\n\t}\n\n\tactualTmp, err := writeTmp(actual)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"write actual yaml to temp file failed: %w\", err)\n\t}\n\tt.Logf(\"Wrote actual to %s\", actualTmp)\n\n\t// pls to use unified diffs, kthxbai?\n\tcmd := exec.Command(\"diff\", \"-u\", expectedPath, actualTmp)\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"set up stdout pipe from diff failed: %w\", err)\n\t}\n\n\tif err := cmd.Start(); err != nil {\n\t\treturn xerrors.Errorf(\"start command failed: %w\", err)\n\t}\n\n\tdiff, err := ioutil.ReadAll(stdout)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"read from diff stdout failed: %w\", err)\n\t}\n\n\tif err := cmd.Wait(); err != nil {\n\t\texitErr, ok := err.(*exec.ExitError)\n\t\tif !ok {\n\t\t\treturn xerrors.Errorf(\"wait for command to finish failed: %w\", err)\n\t\t}\n\t\tt.Logf(\"Diff exited %s\", exitErr)\n\t}\n\n\texpectedAbs, err := filepath.Abs(expectedPath)\n\tif err != nil {\n\t\tt.Logf(\"getting absolute path for %s failed: %s\", expectedPath, err)\n\t\texpectedAbs = expectedPath\n\t}\n\n\tt.Logf(\"View diff: meld %s %s\", expectedAbs, actualTmp)\n\tt.Logf(\"Diff: expected - + actual\\n%s\", diff)\n\treturn nil\n}", "func equalsDS(a, b *dns.DS) bool {\n\tif a.Algorithm == b.Algorithm &&\n\t\tstrings.ToLower(a.Digest) == strings.ToLower(b.Digest) &&\n\t\ta.DigestType == b.DigestType &&\n\t\ta.KeyTag == b.KeyTag {\n\t\treturn true\n\t}\n\treturn false\n}", "func hash(fn string) (res string) {\n\th := sha256.New()\n\n\tfi, err := os.Stat(fn)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tf, err := os.Open(fn)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer f.Close()\n\n\tif fi.IsDir() {\n\t\tns, err := f.Readdirnames(0)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tfor _, e := range ns {\n\t\t\th.Write([]byte(e))\n\t\t}\n\t} else {\n\t\tif _, err := io.Copy(h, f); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn string(h.Sum(nil))\n}", "func GetFilesMetadataHash(paths []string) (string, error) {\n\thashData := make([]string, 0, len(paths))\n\n\tfor _, path := range paths {\n\t\tmeta, err := os.Stat(path)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"couldn't stat '%s': %v\", path, err)\n\t\t}\n\n\t\thashData = append(hashData, meta.Name()+strconv.FormatInt(meta.Size(), 10)+meta.ModTime().String())\n\t}\n\n\t// sort the strings so that the same paths in different order still generate the same hash\n\tsort.Strings(hashData)\n\n\th := sha256.Sum256([]byte(strings.Join(hashData, \"\")))\n\treturn base64.StdEncoding.EncodeToString(h[:]), nil\n}", "func TestDiff_additionalDestFiles(t *testing.T) {\n\ts := t.TempDir()\n\td := t.TempDir()\n\n\terr := os.Mkdir(filepath.Join(s, \"a1\"), 0700)\n\tassert.NoError(t, err)\n\n\terr = os.Mkdir(filepath.Join(d, \"a1\"), 0700)\n\tassert.NoError(t, err)\n\n\terr = os.Mkdir(filepath.Join(d, \"a2\"), 0700)\n\tassert.NoError(t, err)\n\n\tdiff, err := Diff(s, d)\n\tassert.NoError(t, err)\n\tassert.ElementsMatch(t, diff.List(), []string{\"a2\"})\n}", "func (fOpenCfg *FileOpenConfig) Equal(fOpStat2 *FileOpenConfig) bool {\n\n if fOpenCfg.fileOpenModes == nil {\n fOpenCfg.fileOpenModes = make([]FileOpenMode, 0)\n }\n\n if fOpStat2.fileOpenModes == nil {\n fOpStat2.fileOpenModes = make([]FileOpenMode, 0)\n }\n\n if fOpenCfg.isInitialized != fOpStat2.isInitialized {\n return false\n }\n\n lenfOpStat1 := len(fOpenCfg.fileOpenModes)\n\n lenfOpStat2 := len(fOpStat2.fileOpenModes)\n\n if lenfOpStat1 != lenfOpStat2 {\n return false\n }\n\n if fOpenCfg.fileOpenType != fOpStat2.fileOpenType {\n return false\n }\n\n for i := 0; i < lenfOpStat1; i++ {\n isFound := false\n\n for j := 0; j < lenfOpStat1; j++ {\n if fOpStat2.fileOpenModes[j] == fOpenCfg.fileOpenModes[i] {\n isFound = true\n }\n }\n\n if !isFound {\n return false\n }\n }\n\n return true\n}", "func TestDirectorySize(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\n\t// Create a test directory with sub folders\n\t//\n\t// root/ file\n\t// root/SubDir1/\n\t// root/SubDir1/SubDir2/ file\n\n\t// Create test renter\n\trt, err := newRenterTesterWithDependency(t.Name(), &dependencies.DependencyDisableRepairAndHealthLoops{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer rt.Close()\n\n\t// Create directory tree\n\tsubDir1, err := modules.NewSiaPath(\"SubDir1\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsubDir2, err := modules.NewSiaPath(\"SubDir2\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := rt.renter.CreateDir(subDir1); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsubDir1_2, err := subDir1.Join(subDir2.String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := rt.renter.CreateDir(subDir1_2); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Add files\n\trsc, _ := siafile.NewRSCode(1, 1)\n\tup := modules.FileUploadParams{\n\t\tSource: \"\",\n\t\tSiaPath: modules.RandomSiaPath(),\n\t\tErasureCode: rsc,\n\t}\n\tfileSize := uint64(100)\n\t_, err = rt.renter.staticFileSet.NewSiaFile(up, crypto.GenerateSiaKey(crypto.RandomCipherType()), fileSize, 0777)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tup.SiaPath, err = subDir1_2.Join(hex.EncodeToString(fastrand.Bytes(8)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t_, err = rt.renter.staticFileSet.NewSiaFile(up, crypto.GenerateSiaKey(crypto.RandomCipherType()), 2*fileSize, 0777)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Call bubble on lowest lever and confirm top level reports accurate size\n\trt.renter.managedBubbleMetadata(subDir1_2)\n\tbuild.Retry(100, 100*time.Millisecond, func() error {\n\t\tdirInfo, err := rt.renter.staticDirSet.DirInfo(modules.RootSiaPath())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif dirInfo.AggregateSize != 3*fileSize {\n\t\t\treturn fmt.Errorf(\"AggregateSize incorrect, got %v expected %v\", dirInfo.AggregateSize, 3*fileSize)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func Equals(f string, s string) (bool, error) {\n\tif len([]byte(f)) == len([]byte(s)) {\n\t\tfor i := range []byte(f) {\n\t\t\tif f[i] != s[i] {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func (s *Service) checkChecksumFiles() error {\n\tlog.Debug(context.Background(), \"ui> checking checksum files...\")\n\n\tfilesUI := filepath.Join(s.HTMLDir, \"FILES_UI\")\n\tcontent, err := os.ReadFile(filesUI)\n\tif err != nil {\n\t\treturn sdk.WrapError(err, \"error while reading file %s\", filesUI)\n\t}\n\tlines := strings.Split(string(content), \"\\n\")\n\n\tfor _, lineValues := range lines {\n\t\tline := strings.Split(lineValues, \";\")\n\t\tif len(line) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tsha512sum, err512 := sdk.FileSHA512sum(filepath.Join(s.HTMLDir, line[0]))\n\t\tif err512 != nil {\n\t\t\treturn sdk.WrapError(err512, \"error while compute sha512 on %s\", line[0])\n\t\t}\n\t\tif line[1] != sha512sum {\n\t\t\treturn fmt.Errorf(\"file %s sha512:%s computed:%s\", line[0], line[1], sha512sum)\n\t\t}\n\t}\n\tlog.Debug(context.Background(), \"ui> checking checksum files OK\")\n\treturn nil\n}", "func EqualHashes(fil FileInfoList) []EqualSize {\n\tes := SortSizeHashes(fil)\n\n\treturn equalHashes(es)\n}", "func TestChecksum(t *testing.T) {\n\tt.Parallel()\n\twinners := []parameters{\n\t\t[]string{\"md5\", \"d41d8cd98f00b204e9800998ecf8427e\", \"/dev/null\"},\n\t}\n\t// generate losers from all files - none of them have that checksum\n\tlosers := []parameters{}\n\tfor _, f := range fileParameters {\n\t\tloser := []string{\"md5\", \"00000000000000000000000000000000\", f[0]}\n\t\tlosers = append(losers, loser)\n\t}\n\ttestInputs(t, checksum, winners, losers)\n}", "func TestNumFiles(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\n\t// Create a test directory with sub folders\n\t//\n\t// root/ file\n\t// root/SubDir1/\n\t// root/SubDir1/SubDir2/ file\n\n\t// Create test renter\n\trt, err := newRenterTesterWithDependency(t.Name(), &dependencies.DependencyDisableRepairAndHealthLoops{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer rt.Close()\n\n\t// Create directory tree\n\tsubDir1, err := modules.NewSiaPath(\"SubDir1\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsubDir2, err := modules.NewSiaPath(\"SubDir2\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := rt.renter.CreateDir(subDir1); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsubDir1_2, err := subDir1.Join(subDir2.String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := rt.renter.CreateDir(subDir1_2); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Add files\n\trsc, _ := siafile.NewRSCode(1, 1)\n\tup := modules.FileUploadParams{\n\t\tSource: \"\",\n\t\tSiaPath: modules.RandomSiaPath(),\n\t\tErasureCode: rsc,\n\t}\n\t_, err = rt.renter.staticFileSet.NewSiaFile(up, crypto.GenerateSiaKey(crypto.RandomCipherType()), 100, 0777)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tup.SiaPath, err = subDir1_2.Join(hex.EncodeToString(fastrand.Bytes(8)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t_, err = rt.renter.staticFileSet.NewSiaFile(up, crypto.GenerateSiaKey(crypto.RandomCipherType()), 100, 0777)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Call bubble on lowest lever and confirm top level reports accurate number\n\t// of files and aggregate number of files\n\trt.renter.managedBubbleMetadata(subDir1_2)\n\tbuild.Retry(100, 100*time.Millisecond, func() error {\n\t\tdirInfo, err := rt.renter.staticDirSet.DirInfo(modules.RootSiaPath())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif dirInfo.NumFiles != 1 {\n\t\t\treturn fmt.Errorf(\"NumFiles incorrect, got %v expected %v\", dirInfo.NumFiles, 1)\n\t\t}\n\t\tif dirInfo.AggregateNumFiles != 2 {\n\t\t\treturn fmt.Errorf(\"AggregateNumFiles incorrect, got %v expected %v\", dirInfo.AggregateNumFiles, 2)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (s *Synchronizer) AreFoldersTheSame(fullFolderPath string, remoteFolderId string) (bool, error) {\n\tvar dbFilesChan = make(contracts.FilesChan)\n\tvar localFilesChan = make(chan contracts.ExtendedFileInfo)\n\tvar syncChan = make(contracts.SyncChan)\n\tvar exitChan = make(contracts.ExitChan)\n\n\tgo func() {\n\t\tif err := s.getFilesByParentRecursively(remoteFolderId, dbFilesChan, syncChan); nil != err {\n\t\t\ts.log.Error(err)\n\t\t}\n\t\tclose(dbFilesChan)\n\t}()\n\tgo func() {\n\t\terr := filepath.Walk(\n\t\t\tfullFolderPath,\n\t\t\tfunc(path string, info os.FileInfo, err error) error {\n\t\t\t\tif path != fullFolderPath {\n\t\t\t\t\tlocalFilesChan <- contracts.ExtendedFileInfo{FileInfo: info, FullPath: path}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t)\n\t\tclose(localFilesChan)\n\t\tif nil != err {\n\t\t\ts.log.Error(err)\n\t\t}\n\t}()\n\tisDirTheSame := true\n\tvar err error\n\tgo func() {\n\t\tfor {\n\t\t\tdbFile, dbChanOpened := <-dbFilesChan\n\t\t\tif dbChanOpened {\n\t\t\t\tsyncChan <- true\n\t\t\t} else {\n\t\t\t\tclose(syncChan)\n\t\t\t}\n\t\t\tlocalFile, localChanOpened := <-localFilesChan\n\t\t\tif !dbChanOpened || !localChanOpened {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif dbFile.CurRemoteName != localFile.FileInfo.Name() {\n\t\t\t\tisDirTheSame = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif localFile.FileInfo.IsDir() != specification.IsFolder(dbFile) {\n\t\t\t\tisDirTheSame = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif localFile.FileInfo.IsDir() || specification.IsFolder(dbFile) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar hash string\n\t\t\thash, err = lfileHash.CalcCachedHash(localFile.FullPath)\n\t\t\tif nil != err {\n\t\t\t\tisDirTheSame = false\n\t\t\t\terr = errors.Wrap(err, \"hash calculation error while comparing folders\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif hash != dbFile.Hash {\n\t\t\t\tisDirTheSame = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tclose(exitChan)\n\t}()\n\t<-exitChan\n\n\treturn isDirTheSame, err\n}", "func DigestFile(filename string) (string, error) {\n\tb, err := DigestFileBytes(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hex.EncodeToString(b), nil\n}", "func Compare(resultPaths []string) {\n\t// m maps names like \"baseline\" and \"instrumented\" to textual data in\n\t// the Go Benchmark Data Format. See https://golang.org/issue/14313.\n\tm := make(map[string][]byte)\n\n\t// Walk each directory from resultPaths and collect benchmark data from\n\t// all result.json files.\n\tfor _, root := range resultPaths {\n\t\t_ = filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error {\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tif !d.IsDir() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tdefer func() {\n\t\t\t\t// ignore panics from readTestResult\n\t\t\t\t_ = recover()\n\t\t\t}()\n\t\t\ttr := readTestResult(filepath.Join(path, \"result.json\"))\n\t\t\tm[d.Name()] = append(m[d.Name()], toGoBenchFormat(tr)...)\n\t\t\treturn nil\n\t\t})\n\t}\n\n\t// Sort names to make output deterministic.\n\tvar names []string\n\tfor name := range m {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\tvar c benchstat.Collection\n\tfor _, name := range names {\n\t\tc.AddConfig(name, m[name])\n\t}\n\n\t// Print comparison.\n\tbenchstat.FormatText(os.Stdout, c.Tables())\n}", "func EqualPathNames(x, y string) bool {\n\treturn strings.Trim(x, \"/\") == strings.Trim(y, \"/\")\n}", "func (imageName *ImageName) HasDigest() bool {\n\treturn strings.HasPrefix(imageName.Tag, \"sha256:\")\n}", "func allHashesEqual(usl []fleet.UnitStatus) (bool, error) {\n\tuhis, err := groupUnitHashInfos(usl)\n\tif err != nil {\n\t\treturn false, maskAny(err)\n\t}\n\n\tfor _, uhi1 := range uhis {\n\t\tfor _, uhi2 := range uhis {\n\t\t\tif uhi1.Base != uhi2.Base {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif uhi1.Hash != uhi2.Hash {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func (n Name) Equal(r Name) bool {\n\tif len(n) != len(r) {\n\t\treturn false\n\t}\n\tfor i := range n {\n\t\tif !n[i].Equal(r[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func TestNewDir(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", tstprefix)\n\tif err != nil {\n\t\tt.Fatalf(\"error creating tempdir: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\tds := NewTestStore()\n\n\timj := `\n\t\t{\n\t\t \"acKind\": \"ImageManifest\",\n\t\t \"acVersion\": \"0.1.1\",\n\t\t \"name\": \"example.com/test01\"\n\t\t}\n\t`\n\n\tentries := []*testTarEntry{\n\t\t{\n\t\t\tcontents: imj,\n\t\t\theader: &tar.Header{\n\t\t\t\tName: \"manifest\",\n\t\t\t\tSize: int64(len(imj)),\n\t\t\t},\n\t\t},\n\t\t// An empty dir\n\t\t{\n\t\t\theader: &tar.Header{\n\t\t\t\tName: \"rootfs/a\",\n\t\t\t\tTypeflag: tar.TypeDir,\n\t\t\t},\n\t\t},\n\t}\n\n\tkey1, err := newTestACI(entries, dir, ds)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tim, err := createImageManifest(imj)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\timage1 := Image{Im: im, Key: key1, Level: 0}\n\n\timj = `\n\t\t{\n\t\t \"acKind\": \"ImageManifest\",\n\t\t \"acVersion\": \"0.1.1\",\n\t\t \"name\": \"example.com/test02\"\n\t\t}\n\t`\n\n\tk1, _ := types.NewHash(key1)\n\timj, err = addDependencies(imj,\n\t\ttypes.Dependency{\n\t\t\tImageName: \"example.com/test01\",\n\t\t\tImageID: k1},\n\t)\n\n\tentries = []*testTarEntry{\n\t\t{\n\t\t\tcontents: imj,\n\t\t\theader: &tar.Header{\n\t\t\t\tName: \"manifest\",\n\t\t\t\tSize: int64(len(imj)),\n\t\t\t},\n\t\t},\n\t}\n\n\texpectedFiles := []*fileInfo{\n\t\t&fileInfo{path: \"manifest\", typeflag: tar.TypeReg},\n\t\t&fileInfo{path: \"rootfs/a\", typeflag: tar.TypeDir},\n\t}\n\n\tkey2, err := newTestACI(entries, dir, ds)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tim, err = createImageManifest(imj)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\timage2 := Image{Im: im, Key: key2, Level: 1}\n\n\timages := Images{image2, image1}\n\terr = checkRenderACIFromList(images, expectedFiles, ds)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\terr = checkRenderACI(\"example.com/test02\", expectedFiles, ds)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}", "func SameFile(file1, file2 string) (bool, error) {\n\tinfo1, err := os.Stat(file1)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tinfo2, err := os.Stat(file2)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif os.SameFile(info1, info2) {\n\t\treturn true, nil\n\t}\n\n\treturn DeepCompare(file1, file2)\n}", "func TestFileHash(t *testing.T) {\n\t// Create a temporary directory\n\tdir, err := ioutil.TempDir(\"\", \"gorilla_test\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\t// Create a test server\n\tts := httptest.NewServer(router())\n\tdefer ts.Close()\n\n\t// Run the code\n\tFile(dir, ts.URL+\"/hashtest.txt\")\n\n\t// Validate the hash to confirm it was downloaded properly\n\tif !Verify(filepath.Join(dir, \"hashtest.txt\"), validHash) {\n\t\tt.Errorf(\"Hash does not match downloaded test file!\")\n\t}\n\n}", "func compareFileGraphs(expected *mojom_files.MojomFileGraph, actual *mojom_files.MojomFileGraph) error {\n\tif !reflect.DeepEqual(expected, actual) {\n\t\t// Note(rudominer) The myfmt package is a local modification of the fmt package\n\t\t// that does a deep printing that follows pointers for up to 50 levels.\n\t\t// Thus expectedString and actualString should contain enough information to\n\t\t// precisely capture the structure of expected and actual.\n\t\texpectedString := myfmt.Sprintf(\"%+v\", expected)\n\t\tactualString := myfmt.Sprintf(\"%+v\", actual)\n\t\tif expectedString != actualString {\n\t\t\tdiffPos := -1\n\t\t\tfor i := 0; i < len(expectedString) && i < len(actualString); i++ {\n\t\t\t\tif expectedString[i] != actualString[i] {\n\t\t\t\t\tdiffPos = i\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tmismatchExpected := \"\"\n\t\t\tmismatchActual := \"\"\n\t\t\tif diffPos > -1 {\n\t\t\t\tmismatchExpected = expectedString[diffPos:]\n\t\t\t\tmismatchActual = actualString[diffPos:]\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"*****\\nexpected=\\n*****\\n%q\\n*****\\nactual=\\n*****\\n%q\\n*****\\n\"+\n\t\t\t\t\"match failed at position %d: expected=\\n*****\\n%s\\n******\\nactual=\\n*****\\n%s\\n******\\n\",\n\t\t\t\texpectedString, actualString, diffPos, mismatchExpected, mismatchActual)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"expected != actual but the two printed equal.\")\n\t\t}\n\t}\n\treturn nil\n}", "func TestPathMatches(t *testing.T) {\n\tzipData, err := createZip(map[string]string{\n\t\t\"a\": \"\",\n\t\t\"a/b\": \"\",\n\t\t\"a/c\": \"\",\n\t\t\"ab\": \"\",\n\t\t\"b/a\": \"\",\n\t\t\"ba\": \"\",\n\t\t\"c/d\": \"\",\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tzf, err := mockZipFile(zipData)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trg, err := compile(&protocol.PatternInfo{\n\t\tPattern: \"\",\n\t\tIncludePatterns: []string{\"a\", \"b\"},\n\t\tPathPatternsAreRegExps: true,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfileMatches, _, err := concurrentFind(context.Background(), rg, zf, 10, true, true)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twant := []string{\"a/b\", \"ab\", \"b/a\", \"ba\"}\n\tgot := make([]string, len(fileMatches))\n\tfor i, fm := range fileMatches {\n\t\tgot[i] = fm.Path\n\t}\n\tsort.Strings(got)\n\tif !reflect.DeepEqual(got, want) {\n\t\tt.Fatalf(\"got file matches %v, want %v\", got, want)\n\t}\n}", "func TestFiles(t *testing.T) {\n\tf1 := fs.Open(\"../fs/file_test.go\")\n\tf2 := fs.Open(\"../../internal/fs/file_test.go\")\n\tassert.NotNil(t, f1)\n\tassert.NotNil(t, f2)\n\tassert.Equal(t, f1, f2)\n\n\tf1 = fs.Open(\"../fs/does_not_exist\")\n\tf2 = fs.Open(\"../../internal/fs/does_not_exist\")\n\tassert.NotNil(t, f1)\n\tassert.NotNil(t, f2)\n\tassert.Equal(t, f1, f2)\n\n\tf1 = fs.Open(\"../fs/file_test.go\")\n\tf2 = fs.Open(\"../fs/does_not_exist\")\n\tassert.NotNil(t, f1)\n\tassert.NotNil(t, f2)\n\tassert.NotEqual(t, f1, f2)\n}", "func (file *File) Verify(base string) (bool, error) {\n\tlocalized, err := file.localizeTo(base)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif stat, err := os.Lstat(localized); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t} else {\n\t\t\treturn false, err\n\t\t}\n\t} else if !stat.Mode().IsRegular() {\n\t\treturn false, fmt.Errorf(\"Tried to read a file with a weird mode in %s, aborting\", localized)\n\t} else if stat.Size() != int64(file.Size) {\n\t\treturn false, nil\n\t} else if hash, err := md5file(localized); err != nil {\n\t\treturn false, err\n\t} else if fmt.Sprintf(\"%x\", hash) != file.MD5 {\n\t\treturn false, nil\n\t} else {\n\t\treturn true, nil\n\t}\n}", "func (seal *Seal) Valid(path string) (valid bool, err error) {\n // calculates the digest using the zip file\n digest := seal.DSha256(path)\n // compare to the digest stored in the seal\n if seal.Digest == digest {\n return true, nil\n }\n return false, fmt.Errorf(\"downloaded package digest: %s does not match digest in manifest %s\", digest, seal.Digest)\n}", "func recursiveHash(dir string, fout string) {\n\tf, err := os.Create(fout)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\tdefer f.Close()\n\tfilepath.Walk(dir, func(path string, file os.FileInfo, _ error) error {\n\t\tif !file.IsDir() {\n\t\t\tfileData, err := ioutil.ReadFile(path)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t}\n\t\t\trel, err := filepath.Rel(dir, path)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t}\n\t\t\tfmt.Printf(\"%x *%s\\n\", md5.Sum(fileData), rel)\n\t\t\tvalue := md5.Sum(fileData)\n\n\t\t\ts := hex.EncodeToString(value[:]) + \" *\" + rel + \"\\n\"\n\t\t\tf.WriteString(s)\n\t\t}\n\t\treturn nil\n\t})\n\tf.Sync()\n}", "func checkDirectory(dir *Directory, fileSize uint64) error {\n\tfiles := map[string]struct{}{}\n\n\tfor filename, info := range dir.Files {\n\t\tvar err error\n\n\t\t// validate filename (not duplicate, canonical, etc)\n\t\tif _, dup := files[filename]; dup {\n\t\t\terr = fmt.Errorf(\"duplicate path %q\", filename)\n\t\t}\n\t\tfiles[filename] = struct{}{}\n\n\t\tif !path.IsAbs(filename) {\n\t\t\terr = fmt.Errorf(\"relative path %q\", filename)\n\t\t}\n\t\tif path.Clean(filename) != filename {\n\t\t\terr = fmt.Errorf(\"non-canonical path %q\", filename)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn &LoadError{\n\t\t\t\tCause: InvalidPath,\n\t\t\t\tUnderlying: err,\n\t\t\t\tPath: filename,\n\t\t\t}\n\t\t}\n\n\t\t// ensure uncompressed data is present\n\t\tif info.Uncompressed == nil {\n\t\t\treturn &LoadError{\n\t\t\t\tCause: MissingUncompressed,\n\t\t\t\tPath: filename,\n\t\t\t}\n\t\t}\n\n\t\t// validate offsets\n\t\tcheckOffset(&err, filename, info.Uncompressed, fileSize)\n\t\tcheckOffset(&err, filename, info.Gzip, fileSize)\n\t\tcheckOffset(&err, filename, info.Brotli, fileSize)\n\t\tif err != nil {\n\t\t\treturn &LoadError{\n\t\t\t\tCause: BadOffsetError,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}" ]
[ "0.6744939", "0.6428002", "0.61854255", "0.6098924", "0.60972404", "0.59272534", "0.59265554", "0.5844345", "0.58044773", "0.578544", "0.5746756", "0.57349694", "0.5720256", "0.56391513", "0.56384194", "0.56288624", "0.5608989", "0.55871016", "0.55184263", "0.55086523", "0.5505446", "0.54935443", "0.54532", "0.54314595", "0.54125094", "0.53980404", "0.5379107", "0.5362888", "0.5359954", "0.5346391", "0.53270483", "0.53133416", "0.53051573", "0.53001565", "0.5298893", "0.5288147", "0.5287582", "0.5279252", "0.5227029", "0.52223104", "0.5210943", "0.51908684", "0.5163184", "0.51615226", "0.51531273", "0.5125874", "0.51110804", "0.5109301", "0.50914603", "0.5078268", "0.5074278", "0.50726223", "0.5053349", "0.50453407", "0.503746", "0.50354487", "0.50282365", "0.502279", "0.50026953", "0.5000128", "0.4998874", "0.49923155", "0.4966641", "0.49613956", "0.4960525", "0.4951663", "0.49390608", "0.49351573", "0.49339354", "0.49320644", "0.49042365", "0.48960578", "0.48954812", "0.48938048", "0.4890298", "0.48850566", "0.48844564", "0.4881743", "0.48737654", "0.4869772", "0.48589814", "0.4852158", "0.48463374", "0.48409206", "0.48372468", "0.48351762", "0.4834413", "0.4832758", "0.48215064", "0.48008415", "0.47966596", "0.47936165", "0.47821257", "0.47807795", "0.47797877", "0.47718602", "0.4766284", "0.47627747", "0.47525337", "0.4749727" ]
0.6108174
3
Equal tells whether values v and w are structurally equal.
func Equal(v, w T) bool { switch v := v.(type) { case reflow.File: l, r := v, w.(reflow.File) return l.Equal(r) default: } return reflect.DeepEqual(v, w) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (v Value) Equal(w Value) bool {\n\treturn v.v == w.v\n}", "func TestEqual(t *testing.T) {\n\tslc := make([]float64, 10)\n\tfor i := range slc {\n\t\tslc[i] = float64(i)\n\t}\n\n\tv := NewFrom(slc)\n\tw := NewFrom(slc)\n\tif !Equal(v, w) {\n\t\tt.Error(\"Equal() != true for equal vectors.\")\n\t}\n\n\tw = New(10)\n\tif Equal(v, w) {\n\t\tt.Error(\"Equal() == true for unequal vectors.\")\n\t}\n}", "func (v Version) Equal(w Version) bool {\n\treturn v.Cmp(w) == 0\n}", "func (v Vector) Equal(o Vector) bool {\n\tvDefs := v.definables()\n\toDefs := o.definables()\n\n\tfor _, metric := range order {\n\t\ta := equivalent(metric, vDefs[metric].String())\n\t\tb := equivalent(metric, oDefs[metric].String())\n\n\t\tif a != b {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Equal(vx, vy interface{}) bool {\n\tif reflect.TypeOf(vx) != reflect.TypeOf(vy) {\n\t\treturn false\n\t}\n\n\tswitch x := vx.(type) {\n\tcase map[string]interface{}:\n\t\ty := vy.(map[string]interface{})\n\n\t\tif len(x) != len(y) {\n\t\t\treturn false\n\t\t}\n\n\t\tfor k, v := range x {\n\t\t\tval2 := y[k]\n\n\t\t\tif (v == nil) != (val2 == nil) {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tif !Equal(v, val2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\tcase []interface{}:\n\t\ty := vy.([]interface{})\n\n\t\tif len(x) != len(y) {\n\t\t\treturn false\n\t\t}\n\n\t\tvar matches int\n\t\tflagged := make([]bool, len(y))\n\t\tfor _, v := range x {\n\t\t\tfor i, v2 := range y {\n\t\t\t\tif Equal(v, v2) && !flagged[i] {\n\t\t\t\t\tmatches++\n\t\t\t\t\tflagged[i] = true\n\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn matches == len(x)\n\tdefault:\n\t\treturn vx == vy\n\t}\n}", "func (vol *VolumeInfoSimple) EqualWith(volInfo *VolumeInfoSimple) bool {\n\tif len(vol.VunitLocations) != len(volInfo.VunitLocations) {\n\t\treturn false\n\t}\n\tif vol.Vid != volInfo.Vid ||\n\t\tvol.CodeMode != volInfo.CodeMode ||\n\t\tvol.Status != volInfo.Status {\n\t\treturn false\n\t}\n\tfor i := range vol.VunitLocations {\n\t\tif vol.VunitLocations[i] != volInfo.VunitLocations[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (w *Wallet) Equal(z *Wallet) bool {\n\tif w != nil && z != nil {\n\t\treturn w.Currency == z.Currency && w.Balance == z.Balance && w.Sequence == z.Sequence\n\t}\n\treturn w == z\n}", "func (self SimpleType) Equal(u convert.Equaler) bool {\n\tother, ok := u.(SimpleType)\n\tif !ok {\n\t\treturn false\n\t}\n\treturn self.Kind == other.Kind\n}", "func equal(lhsV, rhsV reflect.Value) bool {\n\tlhsNotValid, rhsVNotValid := !lhsV.IsValid(), !rhsV.IsValid()\n\tif lhsNotValid && rhsVNotValid {\n\t\treturn true\n\t}\n\tif (!lhsNotValid && rhsVNotValid) || (lhsNotValid && !rhsVNotValid) {\n\t\treturn false\n\t}\n\n\tlhsIsNil, rhsIsNil := isNil(lhsV), isNil(rhsV)\n\tif lhsIsNil && rhsIsNil {\n\t\treturn true\n\t}\n\tif (!lhsIsNil && rhsIsNil) || (lhsIsNil && !rhsIsNil) {\n\t\treturn false\n\t}\n\tif lhsV.Kind() == reflect.Interface || lhsV.Kind() == reflect.Ptr {\n\t\tlhsV = lhsV.Elem()\n\t}\n\tif rhsV.Kind() == reflect.Interface || rhsV.Kind() == reflect.Ptr {\n\t\trhsV = rhsV.Elem()\n\t}\n\n\t// Compare a string and a number.\n\t// This will attempt to convert the string to a number,\n\t// while leaving the other side alone. Code further\n\t// down takes care of converting ints and floats as needed.\n\tif isNum(lhsV) && rhsV.Kind() == reflect.String {\n\t\trhsF, err := tryToFloat64(rhsV)\n\t\tif err != nil {\n\t\t\t// Couldn't convert RHS to a float, they can't be compared.\n\t\t\treturn false\n\t\t}\n\t\trhsV = reflect.ValueOf(rhsF)\n\t} else if lhsV.Kind() == reflect.String && isNum(rhsV) {\n\t\t// If the LHS is a string formatted as an int, try that before trying float\n\t\tlhsI, err := tryToInt64(lhsV)\n\t\tif err != nil {\n\t\t\t// if LHS is a float, e.g. \"1.2\", we need to set lhsV to a float64\n\t\t\tlhsF, err := tryToFloat64(lhsV)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tlhsV = reflect.ValueOf(lhsF)\n\t\t} else {\n\t\t\tlhsV = reflect.ValueOf(lhsI)\n\t\t}\n\t}\n\n\tif isNum(lhsV) && isNum(rhsV) {\n\t\treturn fmt.Sprintf(\"%v\", lhsV) == fmt.Sprintf(\"%v\", rhsV)\n\t}\n\n\t// Try to compare bools to strings and numbers\n\tif lhsV.Kind() == reflect.Bool || rhsV.Kind() == reflect.Bool {\n\t\tlhsB, err := tryToBool(lhsV)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\trhsB, err := tryToBool(rhsV)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\treturn lhsB == rhsB\n\t}\n\n\tif lhsV.CanInterface() && rhsV.CanInterface() {\n\t\treturn reflect.DeepEqual(lhsV.Interface(), rhsV.Interface())\n\t}\n\treturn reflect.DeepEqual(lhsV, rhsV)\n}", "func Equal(expected, actual interface{}) Truth {\n\tmustBeCleanStart()\n\treturn Truth{\n\t\tValue:reflect.DeepEqual(expected, actual),\n\t\tDump:fmt.Sprintf(\"%#v vs. %#v\", expected, actual),\n\t}\n}", "func (current GoVersion) Equal(target GoVersion) bool {\n\tcurrent.Raw, target.Raw = \"\", \"\"\n\treturn current == target\n}", "func Equal(t, other Tuplelike) bool {\n\tfor idx, value := range t.Values() {\n\t\tif !inEpsilon(value, other.At(idx)) {\n\t\t\treturn false\n\t\t}\n\n\t}\n\treturn true\n}", "func (s *Scalar) Equal(u *Scalar) int {\n\treturn s.s.Equal(&u.s)\n}", "func (q1 Quat) ApproxEqual(q2 Quat) bool {\n\treturn FloatEqual(q1.W, q2.W) && q1.V.ApproxEqual(q2.V)\n}", "func equ(a *GLSQObj, b *GLSQObj) bool {\n\tif a == b {\n\t\t// If they point to the same location they obviously are equal.\n\t\treturn true\n\t}\n\n\tif a.GlsqType != b.GlsqType {\n\t\treturn false // can't be equal if types are not the same\n\t}\n\n\tswitch a.GlsqType {\n\tcase GLSQ_TYPE_INT:\n\t\treturn a.GlsqInt == b.GlsqInt\n\n\tcase GLSQ_TYPE_FLOAT:\n\t\treturn a.GlsqFloat == b.GlsqFloat\n\n\tcase GLSQ_TYPE_BOOL:\n\t\treturn a.GlsqBool == b.GlsqBool\n\t}\n\n\treturn false\n}", "func (p Pair) Equal(v Pair) bool {\n\treturn bytes.Equal(p.Key, v.Key) && bytes.Equal(p.Value, v.Value)\n}", "func Equal(a, b interface{}) bool {\n\tif reflect.TypeOf(a) == reflect.TypeOf(b) {\n\t\treturn reflect.DeepEqual(a, b)\n\t}\n\tswitch a.(type) {\n\tcase int, int8, int16, int32, int64:\n\t\tswitch b.(type) {\n\t\tcase int, int8, int16, int32, int64:\n\t\t\treturn reflect.ValueOf(a).Int() == reflect.ValueOf(b).Int()\n\t\t}\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\tswitch b.(type) {\n\t\tcase uint, uint8, uint16, uint32, uint64:\n\t\t\treturn reflect.ValueOf(a).Uint() == reflect.ValueOf(b).Uint()\n\t\t}\n\tcase float32, float64:\n\t\tswitch b.(type) {\n\t\tcase float32, float64:\n\t\t\treturn reflect.ValueOf(a).Float() == reflect.ValueOf(b).Float()\n\t\t}\n\tcase string:\n\t\tswitch b.(type) {\n\t\tcase []byte:\n\t\t\treturn a.(string) == string(b.([]byte))\n\t\t}\n\tcase []byte:\n\t\tswitch b.(type) {\n\t\tcase string:\n\t\t\treturn b.(string) == string(a.([]byte))\n\t\t}\n\t}\n\treturn false\n}", "func (v *RelaxedVersion) Equal(u *RelaxedVersion) bool {\n\treturn v.CompareTo(u) == 0\n}", "func (v UintValue) Equal(other ValueNode) bool {\n\totherV, ok := other.(UintValue)\n\treturn ok && v == otherV\n}", "func (v *Values) Equal(other *Values) bool {\n\tv.lock.RLock()\n\tdefer v.lock.RUnlock()\n\tother.lock.RLock()\n\tdefer other.lock.RUnlock()\n\n\treturn v.root.equal(other.root)\n}", "func Equal(a, b Vector) bool {\n\tpanicLength(a, b)\n\tfor i := 0; i < a.Len(); i++ {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (a Vector) Equal(b []float64) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\n\tfor k := range a {\n\t\tif !Equal(a[k], b[k]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (l *LabelPair) Equal(o *LabelPair) bool {\n\tswitch {\n\tcase l.Name != o.Name:\n\t\treturn false\n\tcase l.Value != o.Value:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func eq(o1, o2 interface{}) bool {\n\n\tf1, ok1 := ToFloat(o1)\n\tf2, ok2 := ToFloat(o2)\n\tif ok1 && ok2 {\n\t\treturn f1 == f2\n\t}\n\n\tb1, ok1 := ToBool(o1)\n\tb2, ok1 := ToBool(o2)\n\tif ok1 && ok2 {\n\t\treturn b1 == b2\n\t}\n\n\treturn o1 == o2\n}", "func TestSetEqual(t *T) {\n\t// Degenerate case\n\ts1, s2 := NewSet(), NewSet()\n\tassert.Equal(t, true, s1.Equal(s2))\n\tassert.Equal(t, true, s2.Equal(s1))\n\n\t// False with different sizes\n\ts1, _ = s1.SetVal(1)\n\tassert.Equal(t, false, s1.Equal(s2))\n\tassert.Equal(t, false, s2.Equal(s1))\n\n\t// False with same sizes\n\ts2, _ = s2.SetVal(2)\n\tassert.Equal(t, false, s1.Equal(s2))\n\tassert.Equal(t, false, s2.Equal(s1))\n\n\t// Now true\n\ts1, _ = s1.SetVal(2)\n\ts2, _ = s2.SetVal(1)\n\tassert.Equal(t, true, s1.Equal(s2))\n\tassert.Equal(t, true, s2.Equal(s1))\n\n\t// False with embedded set\n\ts1, _ = s1.SetVal(NewSet(3))\n\tassert.Equal(t, false, s1.Equal(s2))\n\tassert.Equal(t, false, s2.Equal(s1))\n\n\t// True with embedded set\n\ts2, _ = s2.SetVal(NewSet(3))\n\tassert.Equal(t, true, s1.Equal(s2))\n\tassert.Equal(t, true, s2.Equal(s1))\n}", "func (d Dog) Equal(v interface{}) bool {\n\td2, ok := v.(Dog)\n\tif !ok {\n\t\treturn false\n\t}\n\tif d.Uuid != d2.Uuid {\n\t\treturn false\n\t}\n\treturn true\n}", "func (g *Graph) Equal(g2 *Graph, debug bool) bool {\n\n\t// Check the vertices\n\tkeys1 := g.listOfKeys()\n\tkeys2 := g2.listOfKeys()\n\n\tif !SlicesHaveSameElements(&keys1, &keys2) {\n\t\tif debug {\n\t\t\tlog.Println(\"Lists of keys are different\")\n\t\t\tlog.Printf(\"Keys1: %v\\n\", keys1)\n\t\t\tlog.Printf(\"Keys2: %v\\n\", keys2)\n\t\t}\n\t\treturn false\n\t}\n\n\t// Walk through each vertex and check its connections\n\tfor _, vertex := range keys1 {\n\t\tconns1 := g.Nodes[vertex]\n\t\tconns2 := g2.Nodes[vertex]\n\n\t\tif !SetsEqual(conns1, conns2) {\n\t\t\tif debug {\n\t\t\t\tlog.Printf(\"Connections different for vertex %v\", vertex)\n\t\t\t\tlog.Printf(\"Connections 1: %v\\n\", conns1)\n\t\t\t\tlog.Printf(\"Connections 2: %v\\n\", conns2)\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (value *Value) Equal(other *Value) bool {\n\treturn reflect.DeepEqual(value, other)\n}", "func (x *Secp256k1N) Eq(y *Secp256k1N) bool {\n\t// TODO: More efficient implementation/\n\tvar xNorm, yNorm = *x, *y\n\txNorm.Normalize()\n\tyNorm.Normalize()\n\treturn xNorm.limbs[0] == yNorm.limbs[0] &&\n\t\txNorm.limbs[1] == yNorm.limbs[1] &&\n\t\txNorm.limbs[2] == yNorm.limbs[2] &&\n\t\txNorm.limbs[3] == yNorm.limbs[3] &&\n\t\txNorm.limbs[4] == yNorm.limbs[4]\n}", "func (u1 *Upstream) Equal(u2 *Upstream) bool {\n\treturn reflect.DeepEqual(u1.Upstream, u2.Upstream)\n}", "func (r Representative) Equal(a, b uint64) bool {\n\tif r == nil {\n\t\treturn Equal(a, b)\n\t}\n\treturn r(a) == r(b)\n}", "func Equal(left Value, right Value) bool {\n\t// TODO: Stop-gap for now, this will need to be much more sophisticated.\n\treturn CoerceString(left) == CoerceString(right)\n}", "func (w *WindowCoder) Equals(o *WindowCoder) bool {\n\treturn w.Kind == o.Kind && w.Payload == o.Payload\n}", "func Equal(t TestingT, expected, actual interface{}, extras ...interface{}) bool {\n\tif !DeepEqual(expected, actual) {\n\t\treturn Errorf(t, \"Expect to be equal\", []labeledOutput{\n\t\t\t{\n\t\t\t\tlabel: labelMessages,\n\t\t\t\tcontent: formatExtras(extras...),\n\t\t\t},\n\t\t\t{\n\t\t\t\tlabel: \"Diff\",\n\t\t\t\tcontent: diff(expected, actual),\n\t\t\t},\n\t\t})\n\t}\n\n\treturn true\n}", "func compareEquality(expected, actual interface{}) bool {\n\n\tif expected == nil || actual == nil {\n\t\treturn expected == actual\n\t}\n\n\tif reflect.DeepEqual(expected, actual) {\n\t\treturn true\n\t}\n\n\texpectedValue := reflect.ValueOf(expected)\n\tactualValue := reflect.ValueOf(actual)\n\n\tif expectedValue == actualValue {\n\t\treturn true\n\t}\n\n\t// Attempt comparison after type conversion\n\tif actualValue.Type().ConvertibleTo(expectedValue.Type()) && expectedValue == actualValue.Convert(expectedValue.Type()) {\n\t\treturn true\n\t}\n\n\t// Last ditch effort\n\tif fmt.Sprintf(\"%#v\", expected) == fmt.Sprintf(\"%#v\", actual) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Equal(typ string, raw1, raw2 []byte) (bool, error) {\n\treturn EqualApprox(typ, raw1, raw2, 0)\n}", "func (j1 *JWTAuth) Equal(j2 *JWTAuth) bool {\n\treturn reflect.DeepEqual(j1.JWTAuth, j2.JWTAuth)\n}", "func (s SetValue) Equal(o attr.Value) bool {\n\tother, ok := o.(SetValue)\n\n\tif !ok {\n\t\treturn false\n\t}\n\n\tif !s.elementType.Equal(other.elementType) {\n\t\treturn false\n\t}\n\n\tif s.state != other.state {\n\t\treturn false\n\t}\n\n\tif s.state != attr.ValueStateKnown {\n\t\treturn true\n\t}\n\n\tif len(s.elements) != len(other.elements) {\n\t\treturn false\n\t}\n\n\tfor _, elem := range s.elements {\n\t\tif !other.contains(elem) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (v Set) Equal(other Set) bool {\n\tsort.Sort(other)\n\n\tfor i := 0; i < len(v); i++ {\n\t\tif v[i].Cmp(other[i]) != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (val Value) Equal(o Value) bool {\n\tif val.Type() == nil && o.Type() == nil && val.value == nil && o.value == nil {\n\t\treturn true\n\t}\n\tif val.Type() == nil {\n\t\treturn false\n\t}\n\tif o.Type() == nil {\n\t\treturn false\n\t}\n\tif !val.Type().Equal(o.Type()) {\n\t\treturn false\n\t}\n\tdiff, err := val.Diff(o)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn len(diff) < 1\n}", "func (v Var) Equal(other Value) bool {\n\tswitch other := other.(type) {\n\tcase Var:\n\t\treturn v == other\n\tdefault:\n\t\treturn false\n\t}\n}", "func Equal(a, b uint64) bool {\n\treturn a == b\n}", "func (v DoubleValue) Equal(other ValueNode) bool {\n\totherV, ok := other.(DoubleValue)\n\treturn ok && v == otherV\n}", "func (v BytesValue) Equal(other ValueNode) bool {\n\totherV, ok := other.(BytesValue)\n\treturn ok && bytes.Equal([]byte(v), []byte(otherV))\n}", "func (t *DataType) Equal(o *DataType) bool {\n\tv := int(C.H5Tequal(t.id, o.id))\n\tif v > 0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func equals(t types.Type, x, y value) bool {\n\tswitch x := x.(type) {\n\tcase bool:\n\t\treturn x == y.(bool)\n\tcase int:\n\t\treturn x == y.(int)\n\tcase int8:\n\t\treturn x == y.(int8)\n\tcase int16:\n\t\treturn x == y.(int16)\n\tcase int32:\n\t\treturn x == y.(int32)\n\tcase int64:\n\t\treturn x == y.(int64)\n\tcase uint:\n\t\treturn x == y.(uint)\n\tcase uint8:\n\t\treturn x == y.(uint8)\n\tcase uint16:\n\t\treturn x == y.(uint16)\n\tcase uint32:\n\t\treturn x == y.(uint32)\n\tcase uint64:\n\t\treturn x == y.(uint64)\n\tcase uintptr:\n\t\treturn x == y.(uintptr)\n\tcase float32:\n\t\treturn x == y.(float32)\n\tcase float64:\n\t\treturn x == y.(float64)\n\tcase complex64:\n\t\treturn x == y.(complex64)\n\tcase complex128:\n\t\treturn x == y.(complex128)\n\tcase string:\n\t\treturn x == y.(string)\n\tcase *value:\n\t\treturn x == y.(*value)\n\tcase chan value:\n\t\treturn x == y.(chan value)\n\tcase structure:\n\t\treturn x.eq(t, y)\n\tcase array:\n\t\treturn x.eq(t, y)\n\tcase iface:\n\t\treturn x.eq(t, y)\n\tcase rtype:\n\t\treturn x.eq(t, y)\n\t}\n\n\t// Since map, func and slice don't support comparison, this\n\t// case is only reachable if one of x or y is literally nil\n\t// (handled in eqnil) or via interface{} values.\n\tpanic(fmt.Sprintf(\"comparing uncomparable type %s\", t))\n}", "func equal(t *testing.T, expected, actual interface{}) {\n\tif !reflect.DeepEqual(expected, actual) {\n\t\tt.Errorf(\"Expected %v (type %v) - Got %v (type %v)\", expected, reflect.TypeOf(expected), actual, reflect.TypeOf(actual))\n\t}\n}", "func Veql(v1, v2 Vect) bool {\n\treturn goBool(C.cpveql(v1.c(), v2.c()))\n}", "func (p Params) Equal(p2 Params) bool {\n\treturn reflect.DeepEqual(p, p2)\n}", "func (v Vector) Equals(o Vector) bool {\n\tif math.Abs(o.X-v.X) > 1e-8 {\n\t\treturn false\n\t}\n\tif math.Abs(o.Y-v.Y) > 1e-8 {\n\t\treturn false\n\t}\n\treturn math.Abs(o.Z-v.Z) < 1e-8\n}", "func main() {\n\temp1 := Employee{}\n\temp1.Name=\"Gowtham\"\n\n\temp2 := Employee{}\n\temp2.Name=\"Gowtham\"\n\n\tprintln(\"the emp1 and emp2 are equal ?\" , emp1 == emp2)\n}", "func EqualVector(v1 map[string]SignedElement, v2 map[string]SignedElement) bool {\n\tfor _, process := range processes {\n\t\tif v1[process].Element != v2[process].Element {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (p Params) Equal(p2 Params) bool {\n\tbz1 := MsgCdc.MustMarshalBinary(&p)\n\tbz2 := MsgCdc.MustMarshalBinary(&p2)\n\treturn bytes.Equal(bz1, bz2)\n}", "func (v *Vec4) AreEqual(x *Vec4) bool {\n\treturn ((v.X < x.X+epsilon && v.X > x.X-epsilon) &&\n\t\t(v.Y < x.Y+epsilon && v.Y > x.Y-epsilon) &&\n\t\t(v.Z < x.Z+epsilon && v.Z > x.Z-epsilon) &&\n\t\t(v.W < x.W+epsilon && v.W > x.W-epsilon))\n}", "func (v Version) Equal(o Version) bool {\n\treturn string(v.Metadata) == string(o.Metadata) &&\n\t\tv.MiddlewareVersion.Value == o.MiddlewareVersion.Value &&\n\t\tv.MiddlewareVersion.Set == o.MiddlewareVersion.Set &&\n\t\tv.NodeVersion == o.NodeVersion &&\n\t\tv.RosettaVersion == o.RosettaVersion\n}", "func comparingEmptyStruct() {\n\ttype myStruct struct{ name string }\n\n\tvar s1 myStruct\n\ts2 := myStruct{}\n\tfmt.Printf(\"%p\\n\", &s1) // 0x40e128\n\tfmt.Printf(\"%p\\n\", &s2) // 0x40e130\n\tfmt.Printf(\"%t\\n\", &s1 == &s2) // false\n\tfmt.Printf(\"%t\\n\", s1 == s2) // true\n\n\ts1.name = \"max\"\n\tfmt.Printf(\"%p\\n\", &s1) // 0x40e128\n\tfmt.Printf(\"%p\\n\", &s2) // 0x40e130\n\tfmt.Printf(\"%t\\n\", &s1 == &s2) // false\n\tfmt.Printf(\"%t\\n\", s1 == s2) // false\n}", "func (s *StorageSuite) TestServersEquality(c *check.C) {\n\tservers := Servers{{\n\t\tAdvertiseIP: \"192.168.1.1\",\n\t\tHostname: \"node-1\",\n\t\tRole: \"worker\",\n\t}}\n\ttestCases := []struct {\n\t\tservers Servers\n\t\tresult bool\n\t\tcomment string\n\t}{\n\t\t{\n\t\t\tservers: Servers{{\n\t\t\t\tAdvertiseIP: \"192.168.1.1\",\n\t\t\t\tHostname: \"node-1\",\n\t\t\t\tRole: \"worker\",\n\t\t\t}},\n\t\t\tresult: true,\n\t\t\tcomment: \"Servers should be equal\",\n\t\t},\n\t\t{\n\t\t\tservers: Servers{\n\t\t\t\t{\n\t\t\t\t\tAdvertiseIP: \"192.168.1.1\",\n\t\t\t\t\tHostname: \"node-1\",\n\t\t\t\t\tRole: \"worker\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tAdvertiseIP: \"192.168.1.2\",\n\t\t\t\t\tHostname: \"node-2\",\n\t\t\t\t\tRole: \"worker\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tresult: false,\n\t\t\tcomment: \"Servers should not be equal: different number of servers\",\n\t\t},\n\t\t{\n\t\t\tservers: Servers{{\n\t\t\t\tAdvertiseIP: \"192.168.1.2\",\n\t\t\t\tHostname: \"node-1\",\n\t\t\t\tRole: \"worker\",\n\t\t\t}},\n\t\t\tresult: false,\n\t\t\tcomment: \"Servers should not be equal: different IPs\",\n\t\t},\n\t\t{\n\t\t\tservers: Servers{{\n\t\t\t\tAdvertiseIP: \"192.168.1.1\",\n\t\t\t\tHostname: \"node-2\",\n\t\t\t\tRole: \"worker\",\n\t\t\t}},\n\t\t\tresult: false,\n\t\t\tcomment: \"Servers should not be equal: different hostnames\",\n\t\t},\n\t\t{\n\t\t\tservers: Servers{{\n\t\t\t\tAdvertiseIP: \"192.168.1.1\",\n\t\t\t\tHostname: \"node-1\",\n\t\t\t\tRole: \"db\",\n\t\t\t}},\n\t\t\tresult: false,\n\t\t\tcomment: \"Servers should not be equal: different roles\",\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tc.Assert(servers.IsEqualTo(tc.servers), check.Equals, tc.result,\n\t\t\tcheck.Commentf(tc.comment))\n\t}\n}", "func (u UInt128) Equal(o *UInt128) bool {\n\treturn u.High == o.High && u.Low == o.Low\n}", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (v SimpleType) Equals(rhs SimpleType) bool {\n\treturn v == rhs\n}", "func Equal(dt, dt2 DataType) bool {\n\treturn Hash(dt, false, true, true) == Hash(dt2, false, true, true)\n}", "func (s Balance) Equal(t Balance, opts ...Options) bool {\n\tif !equalPointers(s.Algorithm, t.Algorithm) {\n\t\treturn false\n\t}\n\n\tif s.HashExpression != t.HashExpression {\n\t\treturn false\n\t}\n\n\tif s.HdrName != t.HdrName {\n\t\treturn false\n\t}\n\n\tif s.HdrUseDomainOnly != t.HdrUseDomainOnly {\n\t\treturn false\n\t}\n\n\tif s.RandomDraws != t.RandomDraws {\n\t\treturn false\n\t}\n\n\tif s.RdpCookieName != t.RdpCookieName {\n\t\treturn false\n\t}\n\n\tif s.URIDepth != t.URIDepth {\n\t\treturn false\n\t}\n\n\tif s.URILen != t.URILen {\n\t\treturn false\n\t}\n\n\tif s.URIPathOnly != t.URIPathOnly {\n\t\treturn false\n\t}\n\n\tif s.URIWhole != t.URIWhole {\n\t\treturn false\n\t}\n\n\tif s.URLParam != t.URLParam {\n\t\treturn false\n\t}\n\n\tif s.URLParamCheckPost != t.URLParamCheckPost {\n\t\treturn false\n\t}\n\n\tif s.URLParamMaxWait != t.URLParamMaxWait {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (s *OpenAPISchema) Equal(other *OpenAPISchema) bool {\n\tif s.ModelType() != other.ModelType() {\n\t\treturn false\n\t}\n\t// perform deep equality here.\n\tswitch s.ModelType() {\n\tcase \"any\":\n\t\treturn false\n\tcase MapType:\n\t\tif len(s.Properties) != len(other.Properties) {\n\t\t\treturn false\n\t\t}\n\t\tfor prop, nested := range s.Properties {\n\t\t\totherNested, found := other.Properties[prop]\n\t\t\tif !found || !nested.Equal(otherNested) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tif s.AdditionalProperties != nil && other.AdditionalProperties != nil &&\n\t\t\t!s.AdditionalProperties.Equal(other.AdditionalProperties) {\n\t\t\treturn false\n\t\t}\n\t\tif s.AdditionalProperties != nil && other.AdditionalProperties == nil ||\n\t\t\ts.AdditionalProperties == nil && other.AdditionalProperties != nil {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\tcase ListType:\n\t\treturn s.Items.Equal(other.Items)\n\tdefault:\n\t\treturn true\n\t}\n}", "func (v PublicKey) Equal(o PublicKey) bool {\n\treturn string(v.Bytes) == string(o.Bytes) &&\n\t\tv.CurveType == o.CurveType\n}", "func (s *TypeStruct) Equal(dt interface{}) bool {\n\tif dt == nil {\n\t\treturn false\n\t}\n\n\tv, ok := dt.(*TypeStruct)\n\tif !ok {\n\t\treturn false\n\t}\n\n\tif !s.Name.Equal(v.Name) {\n\t\treturn false\n\t}\n\n\tvar expectDoc, actualDoc []Expr\n\texpectDoc = append(expectDoc, s.DocExpr...)\n\tactualDoc = append(actualDoc, v.DocExpr...)\n\tsort.Slice(expectDoc, func(i, j int) bool {\n\t\treturn expectDoc[i].Line() < expectDoc[j].Line()\n\t})\n\n\tfor index, each := range actualDoc {\n\t\tif !each.Equal(actualDoc[index]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif s.Struct != nil {\n\t\tif s.Struct != nil {\n\t\t\tif !s.Struct.Equal(v.Struct) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(s.Fields) != len(v.Fields) {\n\t\treturn false\n\t}\n\n\tvar expected, acual []*TypeField\n\texpected = append(expected, s.Fields...)\n\tacual = append(acual, v.Fields...)\n\n\tsort.Slice(expected, func(i, j int) bool {\n\t\treturn expected[i].DataType.Expr().Line() < expected[j].DataType.Expr().Line()\n\t})\n\tsort.Slice(acual, func(i, j int) bool {\n\t\treturn acual[i].DataType.Expr().Line() < acual[j].DataType.Expr().Line()\n\t})\n\n\tfor index, each := range expected {\n\t\tac := acual[index]\n\t\tif !each.Equal(ac) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (a Bits) Equal(b Bits) bool {\n\tif a.Num != b.Num {\n\t\tpanic(\"receiver and argument have different number of bits\")\n\t}\n\tif a.Num == 0 {\n\t\treturn true\n\t}\n\tlast := len(a.Bits) - 1\n\tfor i, w := range a.Bits[:last] {\n\t\tif w != b.Bits[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn (a.Bits[last]^b.Bits[last])<<uint(len(a.Bits)*64-a.Num) == 0\n}", "func (ob *SuInstance) Equal(other any) bool {\n\tob2, ok := other.(*SuInstance)\n\tif !ok || ob.class != ob2.class {\n\t\treturn false\n\t}\n\tif ob.useDeepEquals && ob2.useDeepEquals {\n\t\treturn deepEqual(ob, ob2)\n\t}\n\treturn ob == ob2\n}", "func (w *World) AreWorldsEqual(gb2 *World) bool {\n\tif w.xSize != gb2.xSize || w.ySize != gb2.ySize {\n\t\treturn false\n\t}\n\tif len(w.cells) != len(gb2.cells) {\n\t\treturn false\n\t}\n\tfor k := range w.cells {\n\t\tif w.cells[k] != gb2.cells[k] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (a *Secret) Equal(b *Secret) bool {\n\treturn a == nil && b == nil || a != nil && b != nil && *a == *b\n}", "func (t Tags) Equal(other Tags) bool {\n\tif len(t.Values()) != len(other.Values()) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(t.Values()); i++ {\n\t\tequal := t.values[i].Name.Equal(other.values[i].Name) &&\n\t\t\tt.values[i].Value.Equal(other.values[i].Value)\n\t\tif !equal {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func equal(a, b float64) bool {\n\treturn math.Abs(a-b) <= equalityThreshold\n}", "func (d DummyEqualer) Equal(u Equaler) bool {\n\t_, ok := u.(DummyEqualer)\n\tif !ok {\n\t\treturn false\n\t}\n\treturn true\n}", "func (v Vector) Equals(other Vector) bool {\n\tv = v.reduce()\n\tother = other.reduce()\n\tif len(v.data) == len(other.data) {\n\t\tfor n, d1 := range v.data {\n\t\t\tif d2, ok := other.data[n]; !ok || d1 != d2 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (v PlainTextValue) Equal(other ValueNode) bool {\n\totherV, ok := other.(PlainTextValue)\n\treturn ok && v == otherV\n}", "func (n Name) Equal(other Name) bool {\n\treturn string(n) == string(other)\n}", "func TestCities_Equal(t *testing.T) {\n\ttype testCase struct {\n\t\tc1, c2 cities\n\t\twant bool\n\t}\n\tcases := []testCase{\n\t\t{\n\t\t\tc1: cities{},\n\t\t\tc2: cities{},\n\t\t\twant: true,\n\t\t},\n\t\t{\n\t\t\tc1: cities{},\n\t\t\tc2: cities{\n\t\t\t\tcity{name: \"Barcelona\", population: 1.6e6, cost: ReasonableCost, climate: GreatClimate}},\n\t\t\twant: false,\n\t\t},\n\t}\n\tfor _, tc := range cases {\n\t\t//\tc1 := cities{}\n\t\t//\tc2 := cities{}\n\t\t//\t\twant := true\n\t\tif tc.c1.Equal(tc.c2) != tc.want {\n\t\t\tt.Errorf(\"cities.Equal() should be %v for cities\\n%q\\nand\\n%q\\n\", tc.want, tc.c1, tc.c2)\n\t\t}\n\t}\n}", "func Equals(v1, v2 interface{}) bool {\n\t// TODO(EItanya): Should this be `proto.Equal` since these values are usually proto messages\n\treturn reflect.DeepEqual(v1, v2)\n}", "func Equal(t t, want interface{}, have interface{}) {\n\tequal(t, want, have)\n}", "func VNameEqual(v1, v2 *spb.VName) bool {\n\treturn (v1 == v2) || (v1.GetSignature() == v2.GetSignature() && v1.GetCorpus() == v2.GetCorpus() && v1.GetRoot() == v2.GetRoot() && v1.GetPath() == v2.GetPath() && v1.GetLanguage() == v2.GetLanguage())\n}", "func Equal(t *testing.T, expected, actual interface{}) {\n\tt.Helper()\n\n\tif expected != actual {\n\t\tt.Errorf(`%s: expected \"%v\" actual \"%v\"`, t.Name(), expected, actual)\n\t}\n}", "func (u OpUnion) Equal(other OpUnion) bool {\n\t// keep in sync with Pipeline.Equal as go is terrible at inlining anything with a loop\n\tif u.Type != other.Type {\n\t\treturn false\n\t}\n\n\tif u.Type == pipeline.RollupOpType {\n\t\treturn u.Rollup.Equal(other.Rollup)\n\t}\n\n\treturn u.Transformation.Type == other.Transformation.Type\n}", "func Equal(u1, u2 Unit) bool {\n\tif u1 == nil || u2 == nil {\n\t\treturn false\n\t}\n\tif !equalUnit(u1, u2) {\n\t\treturn false\n\t}\n\n\tswitch u1.Type() {\n\tcase TypeUnit:\n\t\treturn true\n\tcase TypeTextPlain:\n\t\treturn equalTextPlain(u1.(TextPlain), u2.(TextPlain))\n\tcase TypeTextMarkdown:\n\t\treturn equalTextMarkdown(u1.(TextMarkdown), u2.(TextMarkdown))\n\tcase TypeTextCode:\n\t\treturn equalTextCode(u1.(TextCode), u2.(TextCode))\n\tcase TypeTodo:\n\t\treturn equalTodo(u1.(Todo), u2.(Todo))\n\tcase TypeList:\n\t\treturn equalList(u1.(List), u2.(List))\n\t}\n\n\t// Not supported units are not equal\n\treturn false\n}", "func Equal(n1, n2 External) bool {\n\tif n1 == nil && n2 == nil {\n\t\treturn true\n\t} else if n1 == nil || n2 == nil {\n\t\treturn false\n\t}\n\tswitch n1 := n1.(type) {\n\tcase String:\n\t\tn2, ok := n2.(String)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Int:\n\t\tn2, ok := n2.(Int)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Uint:\n\t\tn2, ok := n2.(Uint)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Bool:\n\t\tn2, ok := n2.(Bool)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Float:\n\t\tn2, ok := n2.(Float)\n\t\tif ok {\n\t\t\treturn n1 == n2\n\t\t}\n\tcase Object:\n\t\tif n2, ok := n2.(Object); ok {\n\t\t\tif len(n1) != len(n2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif pointerOf(n1) == pointerOf(n2) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn n1.EqualObject(n2)\n\t\t}\n\t\tif _, ok := n2.(Node); ok {\n\t\t\treturn false\n\t\t}\n\t\treturn n1.Equal(n2)\n\tcase Array:\n\t\tif n2, ok := n2.(Array); ok {\n\t\t\tif len(n1) != len(n2) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn len(n1) == 0 || &n1[0] == &n2[0] || n1.EqualArray(n2)\n\t\t}\n\t\tif _, ok := n2.(Node); ok {\n\t\t\treturn false\n\t\t}\n\t\treturn n1.Equal(n2)\n\tdefault:\n\t\tif Same(n1, n2) {\n\t\t\treturn true\n\t\t}\n\t}\n\tif n, ok := n1.(Node); ok {\n\t\treturn n.Equal(n2)\n\t} else if n, ok = n2.(Node); ok {\n\t\treturn n.Equal(n1)\n\t}\n\treturn equalExt(n1, n2)\n}", "func (a *Mtx) Equals(b *Mtx) bool {\n\tfor i := 0; i < 4; i++ {\n\t\tfor j := 0; j < 4; j++ {\n\t\t\tif a.el[i][j] != b.el[i][j] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func TypesEqual(a, b sql.Type) bool {\n\t// TODO: replace all of the Type() == Type() calls with TypesEqual\n\n\t// We can assume they have the same implementing type if this passes, so we have to check the parameters\n\tif a == nil || b == nil || a.Type() != b.Type() {\n\t\treturn false\n\t}\n\t// Some types cannot be compared structurally as they contain non-comparable types (such as slices), so we handle\n\t// those separately.\n\tswitch at := a.(type) {\n\tcase EnumType:\n\t\taEnumType := at\n\t\tbEnumType := b.(EnumType)\n\t\tif len(aEnumType.indexToVal) != len(bEnumType.indexToVal) {\n\t\t\treturn false\n\t\t}\n\t\tfor i := 0; i < len(aEnumType.indexToVal); i++ {\n\t\t\tif aEnumType.indexToVal[i] != bEnumType.indexToVal[i] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn aEnumType.collation == bEnumType.collation\n\tcase SetType:\n\t\taSetType := at\n\t\tbSetType := b.(SetType)\n\t\tif len(aSetType.bitToVal) != len(bSetType.bitToVal) {\n\t\t\treturn false\n\t\t}\n\t\tfor bit, aVal := range aSetType.bitToVal {\n\t\t\tif bVal, ok := bSetType.bitToVal[bit]; ok && aVal != bVal {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn aSetType.collation == bSetType.collation\n\tcase TupleType:\n\t\tif tupA, ok := a.(TupleType); ok {\n\t\t\tif tupB, ok := b.(TupleType); ok && len(tupA) == len(tupB) {\n\t\t\t\tfor i := range tupA {\n\t\t\t\t\tif !TypesEqual(tupA[i], tupB[i]) {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\tdefault:\n\t\treturn a == b\n\t}\n}", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase int:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase int64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float32:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase string, byte:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (mm MoneyMarket) Equal(mmCompareTo MoneyMarket) bool {\n\tif mm.Denom != mmCompareTo.Denom {\n\t\treturn false\n\t}\n\tif !mm.BorrowLimit.Equal(mmCompareTo.BorrowLimit) {\n\t\treturn false\n\t}\n\tif mm.SpotMarketID != mmCompareTo.SpotMarketID {\n\t\treturn false\n\t}\n\tif !mm.ConversionFactor.Equal(mmCompareTo.ConversionFactor) {\n\t\treturn false\n\t}\n\tif !mm.InterestRateModel.Equal(mmCompareTo.InterestRateModel) {\n\t\treturn false\n\t}\n\tif !mm.ReserveFactor.Equal(mmCompareTo.ReserveFactor) {\n\t\treturn false\n\t}\n\tif !mm.AuctionSize.Equal(mmCompareTo.AuctionSize) {\n\t\treturn false\n\t}\n\tif !mm.KeeperRewardPercentage.Equal(mmCompareTo.KeeperRewardPercentage) {\n\t\treturn false\n\t}\n\treturn true\n}", "func (v BoolValue) Equal(other ValueNode) bool {\n\totherV, ok := other.(BoolValue)\n\treturn ok && v == otherV\n}", "func Eq(one, other interface{}) bool {\n\treturn reflect.DeepEqual(one, other)\n}", "func (mr *MockWasmDeploymentSetMockRecorder) Equal(wasmDeploymentSet interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Equal\", reflect.TypeOf((*MockWasmDeploymentSet)(nil).Equal), wasmDeploymentSet)\n}", "func (move Move) Equal(m Move) bool {\n\treturn (move & 0xffff0000) == (m & 0xffff0000)\n}", "func (mr *MockVirtualMeshSetMockRecorder) Equal(virtualMeshSet interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Equal\", reflect.TypeOf((*MockVirtualMeshSet)(nil).Equal), virtualMeshSet)\n}", "func (f FlowRef) Equal(g FlowRef) bool {\n\treturn f.ID == g.ID && f.Ver == g.Ver\n}", "func TestEquals(t *testing.T) {\n\tt.Parallel()\n\tfor ti, tt := range []struct {\n\t\tm1, m2 MatrixExp\n\t\teq bool\n\t}{\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralZeros(1, 1),\n\t\t\teq: true,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralZeros(1, 10),\n\t\t\teq: false,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(10, 1),\n\t\t\tm2: GeneralZeros(1, 1),\n\t\t\teq: false,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralOnes(1, 1),\n\t\t\teq: false,\n\t\t},\n\t} {\n\t\tif v := Equals(tt.m1, tt.m2); v != tt.eq {\n\t\t\tt.Errorf(\"%d: Equals(%v,%v) equals %v, want %v\", ti, tt.m1, tt.m2, v, tt.eq)\n\t\t}\n\t}\n}", "func Equal(t Testing, expected, actual interface{}, formatAndArgs ...interface{}) bool {\n\tif !AreEqualObjects(expected, actual) {\n\t\treturn Fail(t,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Expected values are NOT equal.%s\",\n\t\t\t\tdiffValues(expected, actual),\n\t\t\t),\n\t\t\tformatAndArgs...)\n\t}\n\n\treturn true\n}", "func (dt DateTime) Equal(u DateTime) bool {\n\treturn dt.src.Equal(u.src)\n}", "func (uview *UtreexoViewpoint) Equal(compRoots []*chainhash.Hash) bool {\n\tuViewRoots := uview.accumulator.GetRoots()\n\tif len(uViewRoots) != len(compRoots) {\n\t\tlog.Criticalf(\"Length of the given roots differs from the one\" +\n\t\t\t\"fetched from the utreexoViewpoint.\")\n\t\treturn false\n\t}\n\n\tpassedInRoots := make([]accumulator.Hash, len(compRoots))\n\n\tfor i, compRoot := range compRoots {\n\t\tpassedInRoots[i] = accumulator.Hash(*compRoot)\n\t}\n\n\tfor i, root := range passedInRoots {\n\t\tif !bytes.Equal(root[:], uViewRoots[i][:]) {\n\t\t\tlog.Criticalf(\"The compared Utreexo roots differ.\"+\n\t\t\t\t\"Passed in root:%x\\nRoot from utreexoViewpoint:%x\\n\", uViewRoots[i], root)\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (t Token) Equal(v Token) bool {\n\treturn t.ID == v.ID &&\n\t\tt.Class == v.Class &&\n\t\tt.Surface == v.Surface\n}", "func (c1 city) Equal(c2 city) bool {\n\tif c1.name != c2.name {\n\t\treturn false\n\t}\n\tif c1.population != c2.population {\n\t\treturn false\n\t}\n\tif c1.cost != c2.cost {\n\t\treturn false\n\t}\n\tif c1.climate != c2.climate {\n\t\treturn false\n\t}\n\treturn true\n}" ]
[ "0.7338705", "0.64570975", "0.6389764", "0.628415", "0.61077344", "0.5877148", "0.5758056", "0.57365495", "0.57364357", "0.5642112", "0.56111157", "0.5589437", "0.5572632", "0.55673397", "0.55565006", "0.5533532", "0.5520422", "0.5506255", "0.55059606", "0.5499088", "0.5490454", "0.54868865", "0.54783636", "0.5464265", "0.5461085", "0.5441538", "0.5434998", "0.5412859", "0.53975844", "0.5392168", "0.53888685", "0.5345672", "0.5338377", "0.532881", "0.5321863", "0.531257", "0.53043485", "0.52996445", "0.52967", "0.52912265", "0.5283723", "0.5282718", "0.52809185", "0.52797335", "0.52711403", "0.5246908", "0.5244107", "0.52338874", "0.52309173", "0.52277476", "0.5225885", "0.5224273", "0.52223754", "0.5212368", "0.5210503", "0.5209698", "0.5207134", "0.52051455", "0.5184352", "0.5184352", "0.5183986", "0.5183448", "0.5181807", "0.5180005", "0.51741856", "0.51724344", "0.51704615", "0.51640946", "0.51631904", "0.5157002", "0.5155828", "0.51519394", "0.5150548", "0.51432854", "0.51404965", "0.5134273", "0.51325125", "0.512501", "0.51199055", "0.511799", "0.5116434", "0.51149833", "0.5113119", "0.5104098", "0.5101653", "0.5099237", "0.5098269", "0.50971407", "0.5095155", "0.5094265", "0.5093665", "0.5091635", "0.5083872", "0.5083712", "0.50829846", "0.50806683", "0.5077454", "0.50643283", "0.50641215", "0.5059333" ]
0.63866067
3
Less tells whether value v is (structurally) less than w.
func Less(v, w T) bool { if v == Unit { return false } switch v := v.(type) { case *big.Int: return v.Cmp(w.(*big.Int)) < 0 case *big.Float: return v.Cmp(w.(*big.Float)) < 0 case string: return v < w.(string) case bool: return !v && w.(bool) case reflow.File: w := w.(reflow.File) if v.IsRef() != w.IsRef() { return v.IsRef() } if !v.IsRef() { return v.ID.Less(w.ID) } else if v.Source != w.Source { return v.Source < w.Source } else { return v.ETag < w.ETag } case Dir: w := w.(Dir) if v.Len() != w.Len() { return v.Len() < w.Len() } var ( vkeys = make([]string, 0, v.Len()) wkeys = make([]string, 0, w.Len()) ) for k := range v.contents { vkeys = append(vkeys, k) } for k := range w.contents { wkeys = append(wkeys, k) } sort.Strings(vkeys) sort.Strings(wkeys) for i := range vkeys { if vkeys[i] != wkeys[i] { return vkeys[i] < wkeys[i] } else if Less(v.contents[vkeys[i]], w.contents[wkeys[i]]) { return true } } return false case List: w := w.(List) if len(v) != len(w) { return len(v) < len(w) } for i := range v { if Less(v[i], w[i]) { return true } } return false case *Map: w := w.(*Map) if n, m := v.Len(), w.Len(); n != m { return n < m } var ( ventries = make([]*mapEntry, 0, v.Len()) wentries = make([]*mapEntry, 0, w.Len()) ) for _, entryp := range v.tab { for entry := *entryp; entry != nil; entry = entry.Next { ventries = append(ventries, entry) } } for _, entryp := range w.tab { for entry := *entryp; entry != nil; entry = entry.Next { wentries = append(wentries, entry) } } sort.Slice(ventries, func(i, j int) bool { return Less(ventries[i].Key, ventries[j].Key) }) sort.Slice(wentries, func(i, j int) bool { return Less(wentries[i].Key, wentries[j].Key) }) for i := range ventries { ventry, wentry := ventries[i], wentries[i] if !Equal(ventry.Key, wentry.Key) { return Less(ventry.Key, wentry.Key) } if !Equal(ventry.Value, wentry.Value) { return Less(ventry.Value, wentry.Value) } } return false case Tuple: w := w.(Tuple) for i := range v { if !Equal(v[i], w[i]) { return Less(v[i], w[i]) } } return false case Struct: w := w.(Struct) keys := make([]string, 0, len(v)) for k := range v { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { if !Equal(v[k], w[k]) { return Less(v[k], w[k]) } } return false case Module: w := w.(Module) keys := make([]string, 0, len(v)) for k := range v { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { if !Equal(v[k], w[k]) { return Less(v[k], w[k]) } } return false default: panic("attempted to compare incomparable values") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (v Version) Less(w Version) bool {\n\treturn v.Cmp(w) < 0\n}", "func (v Version) Less(w Version) bool {\n\treturn v.Compare(w) < 0\n}", "func (v Value) Less(b Value) bool {\n\treturn v.LessCase(b, false)\n}", "func (w Words) Less(i int, j int) bool {\n\treturn w[i] < w[j]\n}", "func (i *info) less(b *info) bool {\n\tswitch t := i.Val.(type) {\n\tcase int64:\n\t\treturn t < b.Val.(int64)\n\tcase float64:\n\t\treturn t < b.Val.(float64)\n\tcase string:\n\t\treturn t < b.Val.(string)\n\tdefault:\n\t\tif ord, ok := i.Val.(util.Ordered); ok {\n\t\t\treturn ord.Less(b.Val.(util.Ordered))\n\t\t}\n\t\tlog.Fatalf(\"unhandled info value type: %s\", t)\n\t}\n\treturn false\n}", "func (a byWeight) Less(i, j int) bool {\n\treturn (a[i].Weight > a[j].Weight) || (a[i].Weight == a[j].Weight && a[i].clusterName < a[j].clusterName)\n}", "func less(kA, kB, vA, vB reflect.Value) bool {\n\tswitch kA.Kind() {\n\tcase reflect.Bool:\n\t\treturn !kA.Bool() && kB.Bool()\n\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\treturn kA.Int() < kB.Int()\n\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\treturn kA.Uint() < kB.Uint()\n\tcase reflect.Float32, reflect.Float64:\n\t\tif vA.IsValid() && vB.IsValid() && math.IsNaN(kA.Float()) && math.IsNaN(kB.Float()) {\n\t\t\treturn less(vA, vB, reflect.Value{}, reflect.Value{})\n\t\t}\n\t\treturn math.IsNaN(kA.Float()) || kA.Float() < kB.Float()\n\tcase reflect.String:\n\t\treturn kA.String() < kB.String()\n\tcase reflect.Uintptr:\n\t\treturn kA.Uint() < kB.Uint()\n\tcase reflect.Array:\n\t\t// Compare the contents of both arrays.\n\t\tl := kA.Len()\n\t\tfor i := 0; i < l; i++ {\n\t\t\tav := kA.Index(i)\n\t\t\tbv := kB.Index(i)\n\t\t\tif av.Interface() == bv.Interface() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn less(av, bv, vA, vB)\n\t\t}\n\t\treturn less(vA, vB, reflect.Value{}, reflect.Value{})\n\t}\n\treturn fmt.Sprint(kA) < fmt.Sprint(kB)\n}", "func (v Version) Less(cv Version) bool {\n\tprecedence, _ := v.Compare(cv)\n\treturn precedence == Older\n}", "func (v Set) Less(i, j int) bool { return v[i].Cmp(v[j]) < 0 }", "func less(c1, c2 cost) bool {\n\tswitch {\n\tcase c1.opCode != c2.opCode:\n\t\treturn c1.opCode < c2.opCode\n\tcase c1.isUnique == c2.isUnique:\n\t\treturn c1.vindexCost <= c2.vindexCost\n\tdefault:\n\t\treturn c1.isUnique\n\t}\n}", "func (v ResourceNodes) Less(i, j int) bool {\n\treturn v[i].Tokens.LT(v[j].Tokens)\n}", "func (v ValidatorUpdates) Less(i, j int) bool {\n\treturn v[i].PubKey.Compare(v[j].PubKey) <= 0\n}", "func (t byDiffDesc) Less(i, j int) bool {\n\treturn t[i].diff() > t[j].diff() ||\n\t\t(t[i].diff() == t[j].diff() && t[i].weight > t[j].weight)\n}", "func (gdt *Vector3) OperatorLess(b Vector3) Bool {\n\targ0 := gdt.getBase()\n\targ1 := b.getBase()\n\n\tret := C.go_godot_vector3_operator_less(GDNative.api, arg0, arg1)\n\n\treturn Bool(ret)\n}", "func (b Buildings) Less(i, j int) bool {\n\treturn b[i].Left < b[j].Left\n}", "func Less(lhs, rhs Expression) Expression {\n\treturn NewCall(\"less\", []Expression{lhs, rhs}, nil)\n}", "func (hil HTTPServiceInfoList) Less(i, j int) bool {\n\treturn hil[i].Name < hil[j].Name\n}", "func (a Value) Less(b Value) bool {\n\treturn a.Compare(b) < 0\n}", "func (q *Query) Less(field_name string, val interface{}) *Query {\n\treturn q.addCondition(field_name, query.OpLess, val)\n}", "func (h minPath) Less(i, j int) bool {\n\treturn h[i].value < h[j].value\n}", "func (this byDmnInfo) Less(i, j int) bool {\n\n\tif this[i].Name != this[j].Name {\n\t\treturn this[i].Name < this[j].Name\n\t} else if this[i].Key != this[j].Key {\n\t\treturn this[i].Key < this[j].Key\n\t} else if this[i].Version != this[j].Version {\n\t\treturn this[i].Version < this[j].Version\n\t} else {\n\t\treturn this[i].Id < this[j].Id\n\t}\n}", "func less(data []Result) func(i, j int) bool {\n\treturn func(i, j int) bool {\n\t\tif data[i].Status == data[j].Status {\n\t\t\treturn data[i].Name < data[j].Name\n\t\t}\n\t\treturn data[i].Status > data[j].Status\n\t}\n}", "func (es Envelopes) Less(i, j int) bool {\n\treturn (es[i].w < es[j].w) || (es[i].w == es[j].w && es[i].h >= es[j].h)\n}", "func LessVector(v1 map[string]SignedElement, v2 map[string]SignedElement, lattice *Lattice) bool {\n\tfor _, process := range processes {\n\t\tif LessEqual(v1[process].Element, v2[process].Element, lattice) == false {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor _, process := range processes {\n\t\tif Less(v1[process].Element, v2[process].Element, lattice) == false {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (c CatalogVersionCollection) Less(i, j int) bool {\n\treturn c[i].RuntimeVersion.LessThan(c[j].RuntimeVersion)\n}", "func (servers byPriorityWeight) Less(i, j int) bool {\n\treturn servers[i].Priority < servers[j].Priority ||\n\t\t(servers[i].Priority == servers[j].Priority && servers[i].Weight < servers[j].Weight)\n}", "func lessWordPair(x, y interface{}) bool\t{ return x.(*wordPair).canon < y.(*wordPair).canon }", "func (p PolicyEntriesDump) Less(i, j int) bool {\n\tiDeny := p[i].PolicyEntry.IsDeny()\n\tjDeny := p[j].PolicyEntry.IsDeny()\n\tswitch {\n\tcase iDeny && !jDeny:\n\t\treturn true\n\tcase !iDeny && jDeny:\n\t\treturn false\n\t}\n\tif p[i].Key.TrafficDirection < p[j].Key.TrafficDirection {\n\t\treturn true\n\t}\n\treturn p[i].Key.TrafficDirection <= p[j].Key.TrafficDirection &&\n\t\tp[i].Key.Identity < p[j].Key.Identity\n}", "func (b *byWidth) Less(i, j int) bool {\n\treturn b.Values[i].Width < b.Values[j].Width\n}", "func (s *userFactorValue) Less(i, j int) bool {\n\treturn s.Values[i] < s.Values[j]\n}", "func (list VulnerabilityList) Less(i, j int) bool {\n\treturn compareSeverity(Serverity(list[i].Severity), Serverity(list[j].Severity))\n}", "func WeightLT(v float64) predicate.Opt {\n\treturn predicate.Opt(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldWeight), v))\n\t})\n}", "func (p *printableAWSPolicyDocument) Less(i, j int) bool {\n\treturn p.Statements[i].Effect < p.Statements[j].Effect\n}", "func (s *String) Less(v Value) bool {\n\tif s.Kind() != v.Kind() {\n\t\treturn s.Kind() < v.Kind()\n\t}\n\n\treturn string(s.s) < string(v.(*String).s)\n}", "func (a Tags) Less(i, j int) bool { return a[i].Version.GT(a[j].Version) }", "func (a *Pair) Less (b *Pair) bool {\n return a.x < b.x\n}", "func (s Migrations) Less(i, j int) bool {\n\tiTag, err := semver.Make(s[i].Tag)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tjTag, err := semver.Make(s[j].Tag)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn iTag.LT(jTag)\n}", "func (e *Edge) Less(other Edge) bool {\n\treturn e.weight < other.weight\n}", "func (t toc) Less(i, j int) bool {\n\treturn t[i].value < t[j].value\n}", "func (a Vertex) Less(b Vertex) bool {\n\tfor i := range a.pos {\n\t\tswitch {\n\t\tcase a.pos[i] < b.pos[i]:\n\t\t\treturn true\n\t\tcase a.pos[i] > b.pos[i]:\n\t\t\treturn false\n\t\t}\n\t}\n\tfor i := range a.color {\n\t\tswitch {\n\t\tcase a.color[i] < b.color[i]:\n\t\t\treturn true\n\t\tcase a.color[i] > b.color[i]:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}", "func (s MsgFormats) Less(i, j int) bool {\n\treturn s[i].SendTime < s[j].SendTime\n}", "func (c Channel) Less(comp Channel) bool {\n\n\tswitch {\n\tcase strings.ToLower(c.Make) < strings.ToLower(comp.Make):\n\t\treturn true\n\tcase strings.ToLower(c.Make) > strings.ToLower(comp.Make):\n\t\treturn false\n\tcase strings.ToLower(c.Model) < strings.ToLower(comp.Model):\n\t\treturn true\n\tcase strings.ToLower(c.Model) > strings.ToLower(comp.Model):\n\t\treturn false\n\tcase c.Number < comp.Number:\n\t\treturn true\n\tcase c.Number > comp.Number:\n\t\treturn false\n\tcase c.SamplingRate < comp.SamplingRate:\n\t\treturn true\n\tcase c.SamplingRate > comp.SamplingRate:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func (s StructFields) Less(i, j int) bool {\n\treturn s[i].Type.Name < s[j].Type.Name\n}", "func Less(a, b string) bool {\n\treturn VerCmp(a, b) == -1\n}", "func (b *limitedBroadcast) Less(than btree.Item) bool {\n\to := than.(*limitedBroadcast)\n\tif b.transmits < o.transmits {\n\t\treturn true\n\t} else if b.transmits > o.transmits {\n\t\treturn false\n\t}\n\tif b.msgLen > o.msgLen {\n\t\treturn true\n\t} else if b.msgLen < o.msgLen {\n\t\treturn false\n\t}\n\treturn b.id > o.id\n}", "func Less(fieldPtr interface{}, value interface{}) Filter {\n\treturn &ComparisonFilter{\n\t\tLeft: fieldPtr,\n\t\tComparison: \"<\",\n\t\tRight: value,\n\t}\n}", "func LessThan(a cty.Value, b cty.Value) (cty.Value, error) {\n\treturn LessThanFunc.Call([]cty.Value{a, b})\n}", "func (s *SortableStruct) Less(other interface{}) bool {\n\totherss, ok := other.(*SortableStruct)\n\tif !ok {\n\t\tlog.Printf(\"Type assertion failed in SortableStruct; got other of type %#v\", other)\n\t\treturn true\n\t}\n\tres := s.Val < otherss.Val\n\treturn res\n}", "func (sorter *modelSorter) Less(i, j int) bool {\n\treturn sorter.lessFunc(sorter.models[i], sorter.models[j])\n}", "func (s APIParams) Less(i, j int) bool {\n\treturn s[i].Name < s[j].Name\n}", "func (k *joinKey) less(other joinKey) bool {\n\ta, b := k, other\n\tfor i := 0; i < len(k.values); i++ {\n\t\tif b.values[i].IsNull() {\n\t\t\treturn true\n\t\t} else if a.values[i].IsNull() {\n\t\t\treturn false\n\t\t}\n\n\t\tswitch a.columns[i].Type {\n\t\tcase flux.TBool:\n\t\t\tif av, bv := a.values[i].Bool(), b.values[i].Bool(); av != bv {\n\t\t\t\treturn bv\n\t\t\t}\n\t\tcase flux.TInt:\n\t\t\tif av, bv := a.values[i].Int(), b.values[i].Int(); av != bv {\n\t\t\t\treturn av < bv\n\t\t\t}\n\t\tcase flux.TUInt:\n\t\t\tif av, bv := a.values[i].UInt(), b.values[i].UInt(); av != bv {\n\t\t\t\treturn av < bv\n\t\t\t}\n\t\tcase flux.TFloat:\n\t\t\tif av, bv := a.values[i].Float(), b.values[i].Float(); av != bv {\n\t\t\t\treturn av < bv\n\t\t\t}\n\t\tcase flux.TString:\n\t\t\tif av, bv := a.values[i].Str(), b.values[i].Str(); av != bv {\n\t\t\t\treturn av < bv\n\t\t\t}\n\t\tcase flux.TTime:\n\t\t\tif av, bv := a.values[i].Time(), b.values[i].Time(); av != bv {\n\t\t\t\treturn av < bv\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (tsl LegacyTabletStatsList) Less(i, j int) bool {\n\tname1 := tsl[i].Name\n\tif name1 == \"\" {\n\t\tname1 = tsl[i].Key\n\t}\n\tname2 := tsl[j].Name\n\tif name2 == \"\" {\n\t\tname2 = tsl[j].Key\n\t}\n\treturn name1 < name2\n}", "func LessThan[\n\tValueT typecons.Ordered,\n](refValue ValueT) OrderedConstraint[ValueT] {\n\treturn &relOpConstraint[ValueT]{ref: refValue, op: relOpLess}\n}", "func (sv *sorterValues) Less(i, j int) bool {\n\tri := sv.rows[i]\n\trj := sv.rows[j]\n\n\treturn sv.invertSorting != sv.RowLess(ri, rj)\n}", "func (moves Moves) Less(i, j int) bool {\n\tif len(moves[i].From) < len(moves[j].From) {\n\t\treturn true\n\t}\n\n\tif len(moves[i].From) > len(moves[j].From) {\n\t\treturn false\n\t}\n\n\treturn moves[i].String() < moves[j].String()\n}", "func (l DriverInfoList) Less(i, j int) bool { return l[i].Order < l[j].Order }", "func (v *intChain) IsLess(comp int) IntChainer {\n f := func() bool { \n return v.Num < comp\n }\n v.chains = append(v.chains, f)\n\n return v\n}", "func (c *comparables) Less(i, j int) bool {\n\treturn c.IsLess(c.values[i], c.values[j])\n}", "func (p *IntVector) Less(i, j int) bool\t{ return p.At(i) < p.At(j) }", "func (s *NodeSorter) Less(i, j int) bool {\n\treturn s.by(s.nodes[i], s.nodes[j])\n}", "func Less(v1, v2 string) bool {\n\treturn semver.Compare(canonicalizeSemverPrefix(v1), canonicalizeSemverPrefix(v2)) < 0\n}", "func (el Elements) Less(i, j int) bool {\n\treturn el[i].Name < el[j].Name\n}", "func (ms *multiSorter) Less(i, j int) bool {\n\tp, q := &ms.vsrecs[i], &ms.vsrecs[j]\n\t// Try all but the last comparison.\n\tvar k int\n\tfor k = 0; k < len(ms.less)-1; k++ {\n\t\tless := ms.less[k]\n\t\tswitch {\n\t\tcase less(p, q):\n\t\t\t// p < q, so we have a decision.\n\t\t\treturn true\n\t\tcase less(q, p):\n\t\t\t// p > q, so we have a decision.\n\t\t\treturn false\n\t\t}\n\t\t// p == q; try the next comparison.\n\t}\n\t// All comparisons to here said \"equal\", so just return whatever\n\t// the final comparison reports.\n\treturn ms.less[k](p, q)\n}", "func (a Vec2) Less(b Vec2) bool {\n\treturn a.X < b.X && a.Y < b.Y\n}", "func IsLess(fl validator.FieldLevel) bool {\n\n\tvalue := fl.Field().Int()\n\tparam:= fl.Param()\n\treturn value < cast.ToInt64(param)\n}", "func (b ByDistance) Less(i, j int) bool {\n\treturn b.Servers[i].Distance < b.Servers[j].Distance\n}", "func (b byNumberOfVotes) Less(i, j int) bool {\n\treturn b[i].NumVotes < b[j].NumVotes\n}", "func (b *Bucket) less(r *Bucket) bool {\n\tif b.First || r.First {\n\t\treturn b.First\n\t}\n\treturn b.Signature.less(&r.Signature)\n}", "func (s *mapSorter) Less(i, j int) bool {\n\treturn less(s.keys[i], s.keys[j], s.vals[i], s.vals[j])\n}", "func (s *nodeSorter) Less(i, j int) bool {\n\treturn s.by(s.nodes[i], s.nodes[j])\n}", "func Less(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"Less\",\n\t\tInput: []tf.Input{\n\t\t\tx, y,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func (c card) less(d card) bool {\n\tif c.number != d.number {\n\t\treturn c.number < d.number\n\t} else {\n\t\treturn c.suit < d.suit\n\t}\n}", "func (v *TypeVisitor) VisitNumLess(e NumLess) {\n\tcheckTypeNumOp(v, e.Left, e.Right)\n}", "func (a Int) Less(b Item) bool {\n return a < b.(Int)\n}", "func (s byFCFS) Less(i, j int) (ret bool) {\n\tp_i := s.WorkList[i].Info.Priority\n\tp_j := s.WorkList[j].Info.Priority\n\tswitch {\n\tcase p_i > p_j:\n\t\treturn true\n\tcase p_i < p_j:\n\t\treturn false\n\tcase p_i == p_j:\n\t\treturn s.WorkList[i].Info.SubmitTime.Before(s.WorkList[j].Info.SubmitTime)\n\t}\n\treturn\n}", "func (c UintCompare) IsLess(lhs, rhs uint) bool { return c(lhs, rhs) }", "func (s btreeString) Less(i btree.Item) bool {\r\n\treturn s.l(s.s, i.(btreeString).s)\r\n}", "func (p priorities) Less(i, j int) bool { return p[i] < p[j] }", "func (d Drones) Less(i, j int) bool {\n\treturn d[i].AvailableIn() < d[j].AvailableIn()\n}", "func lessByDelta(i, j BenchCmp, calcDelta func(BenchCmp) Delta) bool {\n\tiDelta, jDelta := calcDelta(i).mag(), calcDelta(j).mag()\n\tif iDelta != jDelta {\n\t\treturn iDelta < jDelta\n\t}\n\treturn i.Name() < j.Name()\n}", "func (c Int32Compare) IsLess(lhs, rhs int32) bool { return c(lhs, rhs) }", "func (v *RelaxedVersion) LessThan(u *RelaxedVersion) bool {\n\treturn v.CompareTo(u) < 0\n}", "func (a blocksByWork) Less(i, j int) bool { return a[i].work > a[j].work }", "func (i1 Int) Less(i2 freetree.Comparable) bool { return i1 < i2.(Int) }", "func (p byPoints) Less(i, j int) bool {\n\t// 4 is actually less than anything else\n\tif p[i] == 4 {\n\t\treturn true\n\t}\n\treturn p[i] < p[j]\n}", "func (n *NodeSorter) Less(i, j int) bool {\n\tnode1, node2 := n.nodes[i], n.nodes[j]\n\tfor _, processor := range n.processors {\n\t\tif val := processor.ScaleDownEarlierThan(node1, node2); val || processor.ScaleDownEarlierThan(node2, node1) {\n\t\t\treturn val\n\t\t}\n\t}\n\treturn false\n}", "func (p Page) Less(i, j int) bool {\n\treturn p[i].Order < p[j].Order\n}", "func (c Conditions) Less(i, j int) bool {\n\treturn c[i].CanIterate() && c[i].Len() < c[j].Len()\n}", "func Less(h LessCallback) func(types.Station, []string) {\n\treturn AcceptN(1, func(s types.Station, msg string, from types.StationDesc) {\n\t\trst := h(msg, from)\n\n\t\ts.Block(rst).From(from)\n\t\ts.Close()\n\t})\n}", "func (c CounterList) Less(i, j int) bool { return c[i].Count < c[j].Count }", "func (m *Metric) Less(m2 *Metric) bool {\n\tif m.Name != m2.Name {\n\t\treturn m.Name < m2.Name\n\t}\n\tif m.Help != m2.Help {\n\t\treturn m.Help < m2.Help\n\t}\n\tif m.Type != m2.Type {\n\t\treturn m.Type < m2.Type\n\t}\n\n\tmLabels := make(model.LabelSet, len(m.Labels))\n\tfor k, v := range m.Labels {\n\t\tmLabels[model.LabelName(k)] = model.LabelValue(v)\n\t}\n\tm2Labels := make(model.LabelSet, len(m2.Labels))\n\tfor k, v := range m2.Labels {\n\t\tm2Labels[model.LabelName(k)] = model.LabelValue(v)\n\t}\n\treturn mLabels.Before(m2Labels)\n}", "func (ms *menuSorter) Less(i, j int) bool { return ms.by(ms.menu[i], ms.menu[j]) }", "func (d *Definitions) Less(i, j int) bool {\n\treturn (*d)[i].Ordinal < (*d)[j].Ordinal\n}", "func (s *RecommendDataSorter) Less(i, j int) bool {\n\treturn s.By(s.Datas[i], s.Datas[j])\n}", "func (s Sorter) Less(x, y interface{}) bool {\n\tswitch x.(type) {\n\tcase int:\n\t\ta, b := x.(int), y.(int)\n\t\tif a > b {\n\t\t\treturn false\n\t\t}\n\tcase string:\n\t\ta, b := x.(string), y.(string)\n\t\tif a > b {\n\t\t\treturn false\n\t\t}\n\tcase float64:\n\t\ta, b := x.(float64), y.(float64)\n\t\tif a > b {\n\t\t\treturn false\n\t\t}\n\tcase float32:\n\t\ta, b := x.(float32), y.(float32)\n\t\tif a > b {\n\t\t\treturn false\n\t\t}\n\tcase nil:\n\t\tpanic(\"nil types cant compare\")\n\tdefault:\n\t\tpanic(\"unhandled types\")\n\t}\n\treturn true\n}", "func (v Value) LessCase(b Value, caseSensitive bool) bool {\n\tif v.kind < b.kind {\n\t\treturn true\n\t}\n\tif v.kind > b.kind {\n\t\treturn false\n\t}\n\tif v.kind == Number {\n\t\treturn v.num < b.num\n\t}\n\tif v.kind == String {\n\t\tif caseSensitive {\n\t\t\treturn v.data < b.data\n\t\t}\n\t\treturn stringLessInsensitive(v.data, b.data)\n\t}\n\treturn v.data < b.data\n}", "func (i *Item) Less(o *Item) bool {\n\treturn i.GetKey() < o.GetKey()\n}", "func (s *recordSorter) Less(i, j int) bool {\n\treturn s.by(&s.records[i], &s.records[j])\n}", "func (kv TrieBenchKV) Less(than btree.Item) bool {\n\tanotherKV := than.(*TrieBenchKV)\n\n\treturn kv.Key < anotherKV.Key\n}", "func (f CFields) Less(i, j int) bool {\n\tif f[i].Tag < f[j].Tag {\n\t\treturn true\n\t}\n\tif f[i].Tag == f[j].Tag && f[i].Value < f[j].Value {\n\t\treturn true\n\t}\n\treturn false\n}" ]
[ "0.7416697", "0.7210338", "0.67413294", "0.6604478", "0.6261659", "0.6218948", "0.6203722", "0.61974305", "0.61092466", "0.607995", "0.6053616", "0.60527766", "0.59950066", "0.5985393", "0.5887996", "0.5887226", "0.5881947", "0.5881279", "0.5880172", "0.5875157", "0.58737886", "0.58699435", "0.5863003", "0.583139", "0.58168936", "0.580253", "0.5797799", "0.5789829", "0.57816935", "0.577078", "0.5736819", "0.57279986", "0.57241", "0.5700448", "0.5700143", "0.5694511", "0.5688114", "0.568604", "0.5684524", "0.56780595", "0.5677355", "0.56726253", "0.56674045", "0.56630546", "0.56215936", "0.56165785", "0.5607492", "0.5607311", "0.5603318", "0.5601569", "0.5600094", "0.5599312", "0.5584734", "0.5576944", "0.55745167", "0.5571678", "0.5565236", "0.55385625", "0.5537984", "0.552793", "0.5522366", "0.55154395", "0.55057144", "0.5484806", "0.5483502", "0.54759383", "0.547449", "0.54729366", "0.5467147", "0.54668814", "0.54656774", "0.54633963", "0.54624665", "0.54570967", "0.54551756", "0.54549193", "0.5454577", "0.54520994", "0.54518706", "0.5451268", "0.54501694", "0.54483825", "0.5441745", "0.54273164", "0.5427179", "0.5427043", "0.54235613", "0.5421954", "0.54209375", "0.5416695", "0.54162705", "0.54137915", "0.5405703", "0.5404011", "0.5398873", "0.53951055", "0.53825027", "0.53812486", "0.5380361", "0.53785396" ]
0.6616858
3
Sprint returns a prettyprinted version of value v with type t.
func Sprint(v T, t *types.T) string { switch arg := v.(type) { case shorter: return arg.Short() case stringer: return arg.String() case digester: return fmt.Sprintf("delayed(%v)", arg.Digest()) } switch t.Kind { case types.ErrorKind, types.BottomKind: panic("illegal type") case types.IntKind: return v.(*big.Int).String() case types.FloatKind: return v.(*big.Float).String() case types.StringKind: return fmt.Sprintf("%q", v.(string)) case types.BoolKind: if v.(bool) { return "true" } return "false" case types.FileKind: file := v.(reflow.File) if file.IsRef() { return fmt.Sprintf("file(source=%s, etag=%s)", file.Source, file.ETag) } return fmt.Sprintf("file(sha256=%s, size=%d)", file.ID, file.Size) case types.DirKind: dir := v.(Dir) entries := make([]string, 0, dir.Len()) for scan := dir.Scan(); scan.Scan(); { entries = append(entries, fmt.Sprintf("%q: %s", scan.Path(), Sprint(scan.File(), types.File))) } return fmt.Sprintf("dir(%s)", strings.Join(entries, ", ")) case types.FilesetKind: // We can't access the FileSet struct here because it would introduce // a circular dependency between reflow/ and reflow/values. We could // move the fileset definition elsewhere, but since this is anyway just a // backwards compatibility issue, we'll keep it opaque for now. d := v.(digester) return fmt.Sprintf("fileset(%s)", d.Digest().Short()) case types.UnitKind: return "()" case types.ListKind: list := v.(List) elems := make([]string, len(list)) for i, e := range list { elems[i] = Sprint(e, t.Elem) } return fmt.Sprintf("[%v]", strings.Join(elems, ", ")) case types.MapKind: var keys, values []string for _, entryp := range v.(*Map).tab { for entry := *entryp; entry != nil; entry = entry.Next { keys = append(keys, Sprint(entry.Key, t.Index)) values = append(values, Sprint(entry.Value, t.Elem)) } } elems := make([]string, len(keys)) for i := range keys { elems[i] = fmt.Sprintf("%s: %s", keys[i], values[i]) } return fmt.Sprintf("[%s]", strings.Join(elems, ", ")) case types.TupleKind: tuple := v.(Tuple) elems := make([]string, len(t.Fields)) for i, f := range t.Fields { elems[i] = Sprint(tuple[i], f.T) } return fmt.Sprintf("(%s)", strings.Join(elems, ", ")) case types.StructKind: s := v.(Struct) elems := make([]string, len(t.Fields)) for i, f := range t.Fields { elems[i] = fmt.Sprintf("%s: %s", f.Name, Sprint(s[f.Name], f.T)) } return fmt.Sprintf("{%s}", strings.Join(elems, ", ")) case types.ModuleKind: s := v.(Module) elems := make([]string, len(t.Fields)) for i, f := range t.Fields { elems[i] = fmt.Sprintf("val %s = %s", f.Name, Sprint(s[f.Name], f.T)) } return fmt.Sprintf("module{%s}", strings.Join(elems, "; ")) case types.SumKind: variant := v.(*Variant) variantTyp := t.VariantMap()[variant.Tag] if variantTyp == nil { return fmt.Sprintf("#%s", variant.Tag) } return fmt.Sprintf("#%s(%s)", variant.Tag, Sprint(variant.Elem, variantTyp)) case types.FuncKind: return fmt.Sprintf("func(?)") default: panic("unknown type " + t.String()) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func PrettyPrint(v interface{}) {\n\tfmt.Printf(\"%# v\\n\", pretty.Formatter(v))\n}", "func (p *Printer) PrintT(template string, v ...interface{}) FullPrinter {\n state, fc := p.initState()\n defer p.reset(state)\n p.formatter.PrintTemplate(fc, template, v...)\n state.Buffer.WriteNewLine()\n p.fc.Writer.Write(state.Buffer)\n return p\n}", "func Sprint(a ...interface{}) string {\n\treturn p.Sprint(a...)\n}", "func PrettyPrint(v interface{}) {\n\tb, _ := json.MarshalIndent(v, \"\", \" \")\n\tprintln(string(b))\n}", "func Print(v interface{}) {\n\tPrintT(\"{{printf \\\"%+v\\\" .}}\", v)\n}", "func Print(v interface{}) {\n\tPrintT(\"{{printf \\\"%+v\\\" .}}\", v)\n}", "func PrettyPrint(v interface{}, prefix string, indent string) (string, error) {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to marshal\")\n\t}\n\n\tvar out bytes.Buffer\n\tif err := json.Indent(&out, b, prefix, indent); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to indent\")\n\t}\n\n\tif _, err := out.WriteString(\"\\n\"); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to write string\")\n\t}\n\n\treturn out.String(), nil\n}", "func PrintT(templateString string, v interface{}) {\n\tif printer.Quiet {\n\t\treturn\n\t}\n\tswitch printer.Format {\n\tcase FormatPlain:\n\t\ttpl := template.Must(template.New(\"\").Funcs(printer.templateFuncs).Parse(templateString))\n\t\tsb := &strings.Builder{}\n\t\tif err := tpl.Execute(sb, v); err != nil {\n\t\t\tPrintError(\"Can't print the message using the provided template: \" + templateString)\n\t\t\treturn\n\t\t}\n\t\tprinter.Lines = append(printer.Lines, sb.String())\n\tcase FormatJSON:\n\t\tprinter.Lines = append(printer.Lines, v)\n\t}\n}", "func Sprint(a ...interface{}) string { return fmt.Sprint(a...) }", "func FormattedValue(v interface{}) string {\n\tswitch x := v.(type) {\n\tcase int:\n\t\treturn fmt.Sprintf(\"%d\", x)\n\tcase float32, float64:\n\t\treturn fmt.Sprintf(\"%f\", x)\n\tcase string:\n\t\treturn fmt.Sprintf(\"'%s'\", x)\n\tcase bool:\n\t\treturn fmt.Sprintf(\"%v\", x)\n\tcase []interface{}:\n\t\tif len(x) == 0 {\n\t\t\treturn \"\"\n\t\t}\n\t\tswitch x[0].(type) {\n\t\tcase string:\n\t\t\tstr := \"[\"\n\t\t\tfor idx, sVal := range x {\n\t\t\t\tstr += fmt.Sprintf(\"'%s'\", sVal)\n\t\t\t\tif idx != len(x)-1 {\n\t\t\t\t\tstr += \",\"\n\t\t\t\t}\n\t\t\t}\n\t\t\tstr += \"]\"\n\t\t\treturn str\n\t\tcase int, float32, float64, bool:\n\t\t\treturn strings.Join(strings.Fields(fmt.Sprint(x)), \",\")\n\t\tdefault:\n\t\t\tzap.L().Error(\"invalid type for formatted value\", zap.Any(\"type\", reflect.TypeOf(x[0])))\n\t\t\treturn \"\"\n\t\t}\n\tdefault:\n\t\tzap.L().Error(\"invalid type for formatted value\", zap.Any(\"type\", reflect.TypeOf(x)))\n\t\treturn \"\"\n\t}\n}", "func Pretty(v interface{}) (string, error) {\n\tout, err := json.MarshalIndent(v, \"\", \" \")\n\treturn string(out), err\n}", "func PrettyPrint(i interface{}) string {\n\tswitch t := i.(type) {\n\tcase nil:\n\t\treturn \"None\"\n\tcase string:\n\t\treturn capitalizeFirst(t)\n\tdefault:\n\t\treturn capitalizeFirst(fmt.Sprintf(\"%s\", t))\n\t}\n}", "func (p Property) Sprint(a ...interface{}) string {\n\treturn fmt.Sprintf(\"%s%s%s\", p, fmt.Sprint(a...), Reset)\n}", "func pv(v interface{}) {\n\tfmt.Printf(\"%v, %T\\n\", v, v)\n}", "func (v PBETValue) PrettyString(depth uint, withHeader bool, opts ...pretty.Option) string {\n\tvar lines []string\n\tif withHeader {\n\t\tlines = append(lines, pretty.Header(depth, \"PBET Value\", v))\n\t}\n\tlines = append(lines, pretty.SubValue(depth+1, \"PBET Value\", \"\", v.PBETValue(), opts...)...)\n\treturn strings.Join(lines, \"\\n\")\n}", "func valueString(v reflect.Value, opts tagOptions, sf reflect.StructField) string {\n\tfor v.Kind() == reflect.Ptr {\n\t\tif v.IsNil() {\n\t\t\treturn \"\"\n\t\t}\n\t\tv = v.Elem()\n\t}\n\n\tif v.Kind() == reflect.Bool && opts.Contains(\"int\") {\n\t\tif v.Bool() {\n\t\t\treturn \"1\"\n\t\t}\n\t\treturn \"0\"\n\t}\n\n\tif v.Type() == timeType {\n\t\tt := v.Interface().(time.Time)\n\t\tif opts.Contains(\"unix\") {\n\t\t\treturn strconv.FormatInt(t.Unix(), 10)\n\t\t}\n\t\tif opts.Contains(\"unixmilli\") {\n\t\t\treturn strconv.FormatInt((t.UnixNano() / 1e6), 10)\n\t\t}\n\t\tif opts.Contains(\"unixnano\") {\n\t\t\treturn strconv.FormatInt(t.UnixNano(), 10)\n\t\t}\n\t\tif layout := sf.Tag.Get(\"layout\"); layout != \"\" {\n\t\t\treturn t.Format(layout)\n\t\t}\n\t\treturn t.Format(time.RFC3339)\n\t}\n\n\treturn fmt.Sprint(v.Interface())\n}", "func Sprint(val value.Value) string {\n\treturn val.Sprint(ivyCfg)\n}", "func (s *state) printValue(n parse.Node, v reflect.Value) {\n\ts.at(n)\n\tiface, ok := printableValue(v)\n\tif !ok {\n\t\ts.errorf(\"can't print %s of type %s\", n, v.Type())\n\t}\n\tfmt.Fprint(s.wr, iface)\n}", "func PrintT(templateString string, v interface{}) {\n\tswitch printer.Format {\n\tcase FormatPlain:\n\t\tt := template.Must(template.New(\"\").Parse(templateString))\n\t\tvar tpl bytes.Buffer\n\t\tif err := t.Execute(&tpl, v); err != nil {\n\t\t\tPrintError(\"Can't print the message using the provided template: \" + templateString)\n\t\t}\n\t\ttplString := tpl.String()\n\t\tprinter.Lines = append(printer.Lines, tplString)\n\t\tfmt.Fprintln(printer.writer, tplString)\n\tcase FormatJSON:\n\t\tprinter.Lines = append(printer.Lines, v)\n\t}\n}", "func PrettyPint(v interface{}) (err error) {\n\tb, err := json.MarshalIndent(v, \"\", \" \")\n\tif err == nil {\n\t\tfmt.Println(string(b))\n\t}\n\treturn\n}", "func (w *QWriter) V(v interface{}) {\n\tfmt.Fprintf(w, \"%v\", v)\n}", "func Format(v interface{}) string {\n\treturn DefaultPrinter.Format(v)\n}", "func (v *V3) Dump() string { return fmt.Sprintf(\"%2.9f\", *v) }", "func PrettyPrint(val interface{}) {\n\to, e := json.MarshalIndent(val, \"\", \" \")\n\tif e != nil {\n\t\tlog.Panic(e.Error())\n\t}\n\tfmt.Printf(string(o))\n\tfmt.Println()\n}", "func (v T) String() string {\n\treturn fmt.Sprint(uint32(v))\n}", "func (p *Printer) PrintTKV(template string, keyValues ...interface{}) FullPrinter {\n state, fc := p.initState()\n defer p.reset(state)\n p.formatter.PrintTemplateKeyValue(fc, template, keyValues...)\n state.Buffer.WriteNewLine()\n p.fc.Writer.Write(state.Buffer)\n return p\n}", "func PrettyReport(v interface{}) string {\n\tb, err := json.MarshalIndent(v, \"\", \" \")\n\tif err == nil {\n\t\treturn string(b)\n\t}\n\treturn \"\"\n}", "func prettyPrint(v interface{}) {\n\tencoder := json.NewEncoder(os.Stdout)\n\tencoder.SetIndent(\"\", \" \")\n\n\tif err := encoder.Encode(v); err != nil {\n\t\tlog.Warning(\"Unable to pretty-print tunnel information, will dump raw data instead...\")\n\t\tfmt.Printf(\"%+v\\n\", v)\n\t}\n}", "func (t *SentryTaggedStruct) valueToString(field reflect.Value) string {\n\tk := field.Kind()\n\tswitch {\n\tcase k == reflect.Bool:\n\t\treturn strconv.FormatBool(field.Bool())\n\tcase k >= reflect.Int && k <= reflect.Int64:\n\t\treturn strconv.FormatInt(field.Int(), 10)\n\tcase k >= reflect.Uint && k <= reflect.Uintptr:\n\t\treturn strconv.FormatUint(field.Uint(), 10)\n\tcase k == reflect.Float32 || k == reflect.Float64:\n\t\tbitSize := 32\n\t\tif k == reflect.Float64 {\n\t\t\tbitSize = 64\n\t\t}\n\t\treturn strconv.FormatFloat(field.Float(), 'f', 12, bitSize)\n\tdefault:\n\t\treturn field.String()\n\t}\n}", "func (p *printer) print(x reflect.Value) {\n\t// Note: This test is only needed because AST nodes\n\t// embed a token.Position, and thus all of them\n\t// understand the String() method (but it only\n\t// applies to the Position field).\n\t// TODO: Should reconsider this AST design decision.\n\n\n\tif !NotNilFilter(\"\", x) {\n\t\tp.printf(\"nil\")\n\t\treturn\n\t}\n\n\tswitch v := x.(type) {\n\tcase *reflect.InterfaceValue:\n\t\tp.print(v.Elem())\n\n\tcase *reflect.MapValue:\n\t\tp.printf(\"%v {\\n\", x.Type())\n\t\tp.indent++\n\t\tfor _, key := range v.Keys() {\n\t\t\tp.print(key)\n\t\t\tp.printf(\": \")\n\t\t\tp.print(v.Elem(key))\n\t\t}\n\t\tp.indent--\n\t\tp.printf(\"}\")\n\n\tcase *reflect.PtrValue:\n\t\tp.printf(\"&\")\n\t\t// type-checked ASTs may contain cycles - use ptrmap\n\t\t// to keep track of objects that have been printed\n\t\t// already and print the respective line number instead\n\t\tptr := v.Interface()\n\t\tif line, exists := p.ptrmap[ptr]; exists {\n\t\t\tp.printf(\"(obj @ %d)\", line)\n\t\t} else {\n\t\t\tp.ptrmap[ptr] = p.line\n\t\t\tp.print(v.Elem())\n\t\t}\n\n\tcase *reflect.SliceValue:\n\t\tif s, ok := v.Interface().([]byte); ok {\n\t\t\tp.printf(\"%#v\", s)\n\t\t\treturn\n\t\t}\n\t\tp.indent++\n\t\tp.printf(\"%s{\\n\", v.Type())\n\t\tfor i, n := 0, v.Len(); i < n; i++ {\n\t\t\tp.print(v.Elem(i))\n\t\t\tp.printf(\",\\n\")\n\t\t}\n\t\tp.indent--\n\t\tp.printf(\"}\")\n\n\tcase *reflect.StructValue:\n\t\tif x.Type() == reflect.Typeof(token.Position{}) {\n\t\t\tp.printf(\"token.Position{}\")\n\t\t\treturn\n\t\t}\n\n\t\tp.printf(\"%v {\\n\", x.Type())\n\t\tp.indent++\n\t\tt := v.Type().(*reflect.StructType)\n\t\tfor i, n := 0, t.NumField(); i < n; i++ {\n\t\t\tname := t.Field(i).Name\n\t\t\tvalue := v.Field(i)\n\t\t\tif p.filter == nil || p.filter(name, value) {\n\t\t\t\tp.printf(\"%s: \", name)\n\t\t\t\tp.print(value)\n\t\t\t\tp.printf(\",\\n\")\n\t\t\t}\n\t\t}\n\t\tp.indent--\n\t\tp.printf(\"}\")\n\n\tdefault:\n\t\tp.printf(\"%#v\", x.Interface())\n\t}\n}", "func RunSprint(x interface{}) {\n\tvar msg string\n\n\ttype stringer interface {\n\t\tString() string\n\t}\n\n\tswitch x := x.(type) {\n\tcase stringer:\n\t\tmsg = x.String()\n\tcase string:\n\t\tmsg = x\n\tcase int:\n\t\tmsg = strconv.Itoa(x)\n\tcase bool:\n\t\tif x {\n\t\t\tmsg = \"true\"\n\t\t}\n\t\tmsg = \"false\"\n\tdefault:\n\t\tmsg = \"???\" // array, chan, func, map, pointer, slice, struct\n\t}\n\n\tfmt.Print(msg)\n}", "func printValue(path string, v reflect.Value) {\n\tswitch v.Kind() {\n\tcase reflect.Invalid:\n\t\tfmt.Printf(\"%s = invalid\\n\", path)\n\tcase reflect.Slice, reflect.Array:\n\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\tprintValue(fmt.Sprintf(\"%s[%d]\", path, i), v.Index(i))\n\t\t}\n\tcase reflect.Struct:\n\t\tfor i := 0; i < v.NumField(); i++ {\n\t\t\tfieldPath := fmt.Sprintf(\"%s.%s\", path, v.Type().Field(i).Name)\n\t\t\tprintValue(fieldPath, v.Field(i))\n\t\t}\n\tcase reflect.Map:\n\t\tfor _, key := range v.MapKeys() {\n\t\t\tprintValue(fmt.Sprintf(\"%s[%s]\", path,\n\t\t\t\tformatAtom(key)), v.MapIndex(key))\n\t\t}\n\tcase reflect.Ptr:\n\t\tif v.IsNil() {\n\t\t\tfmt.Printf(\"%s = nil\\n\", path)\n\t\t} else {\n\t\t\tprintValue(fmt.Sprintf(\"(*%s)\", path), v.Elem())\n\t\t}\n\tcase reflect.Interface:\n\t\tif v.IsNil() {\n\t\t\tfmt.Printf(\"%s = nil\\n\", path)\n\t\t} else {\n\t\t\tfmt.Printf(\"%s.type = %s\\n\", path, v.Elem().Type())\n\t\t\tprintValue(path+\".value\", v.Elem())\n\t\t}\n\tdefault: // basic types, channels, funcs\n\t\tfmt.Printf(\"%s = %s\\n\", path, formatAtom(v))\n\t}\n}", "func (f SprinterFunc) Sprint(any Any) string { return f(any) }", "func prettyWithFlags(value interface{}, flags uint32) string {\n\t// Handling the most common types without reflect is a small perf win.\n\tswitch v := value.(type) {\n\tcase bool:\n\t\treturn strconv.FormatBool(v)\n\tcase string:\n\t\tif flags&flagRawString > 0 {\n\t\t\treturn v\n\t\t}\n\t\t// This is empirically faster than strings.Builder.\n\t\treturn `\"` + v + `\"`\n\tcase int:\n\t\treturn strconv.FormatInt(int64(v), 10)\n\tcase int8:\n\t\treturn strconv.FormatInt(int64(v), 10)\n\tcase int16:\n\t\treturn strconv.FormatInt(int64(v), 10)\n\tcase int32:\n\t\treturn strconv.FormatInt(int64(v), 10)\n\tcase int64:\n\t\treturn strconv.FormatInt(int64(v), 10)\n\tcase uint:\n\t\treturn strconv.FormatUint(uint64(v), 10)\n\tcase uint8:\n\t\treturn strconv.FormatUint(uint64(v), 10)\n\tcase uint16:\n\t\treturn strconv.FormatUint(uint64(v), 10)\n\tcase uint32:\n\t\treturn strconv.FormatUint(uint64(v), 10)\n\tcase uint64:\n\t\treturn strconv.FormatUint(v, 10)\n\tcase uintptr:\n\t\treturn strconv.FormatUint(uint64(v), 10)\n\tcase float32:\n\t\treturn strconv.FormatFloat(float64(v), 'f', -1, 32)\n\tcase float64:\n\t\treturn strconv.FormatFloat(v, 'f', -1, 64)\n\t}\n\n\tbuf := bytes.NewBuffer(make([]byte, 0, 256))\n\tt := reflect.TypeOf(value)\n\tif t == nil {\n\t\treturn \"null\"\n\t}\n\tv := reflect.ValueOf(value)\n\tswitch t.Kind() {\n\tcase reflect.Bool:\n\t\treturn strconv.FormatBool(v.Bool())\n\tcase reflect.String:\n\t\tif flags&flagRawString > 0 {\n\t\t\treturn v.String()\n\t\t}\n\t\t// This is empirically faster than strings.Builder.\n\t\treturn `\"` + v.String() + `\"`\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn strconv.FormatInt(int64(v.Int()), 10)\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn strconv.FormatUint(uint64(v.Uint()), 10)\n\tcase reflect.Float32:\n\t\treturn strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)\n\tcase reflect.Float64:\n\t\treturn strconv.FormatFloat(v.Float(), 'f', -1, 64)\n\tcase reflect.Struct:\n\t\tbuf.WriteRune('{')\n\t\tfor i := 0; i < t.NumField(); i++ {\n\t\t\tf := t.Field(i)\n\t\t\tif f.PkgPath != \"\" {\n\t\t\t\t// reflect says this field is only defined for non-exported fields.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif i > 0 {\n\t\t\t\tbuf.WriteRune(',')\n\t\t\t}\n\t\t\tbuf.WriteRune('\"')\n\t\t\tname := f.Name\n\t\t\tif tag, found := f.Tag.Lookup(\"json\"); found {\n\t\t\t\tif comma := strings.Index(tag, \",\"); comma != -1 {\n\t\t\t\t\tname = tag[:comma]\n\t\t\t\t} else {\n\t\t\t\t\tname = tag\n\t\t\t\t}\n\t\t\t}\n\t\t\tbuf.WriteString(name)\n\t\t\tbuf.WriteRune('\"')\n\t\t\tbuf.WriteRune(':')\n\t\t\tbuf.WriteString(pretty(v.Field(i).Interface()))\n\t\t}\n\t\tbuf.WriteRune('}')\n\t\treturn buf.String()\n\tcase reflect.Slice, reflect.Array:\n\t\tbuf.WriteRune('[')\n\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\tif i > 0 {\n\t\t\t\tbuf.WriteRune(',')\n\t\t\t}\n\t\t\te := v.Index(i)\n\t\t\tbuf.WriteString(pretty(e.Interface()))\n\t\t}\n\t\tbuf.WriteRune(']')\n\t\treturn buf.String()\n\tcase reflect.Map:\n\t\tbuf.WriteRune('{')\n\t\t// This does not sort the map keys, for best perf.\n\t\tit := v.MapRange()\n\t\ti := 0\n\t\tfor it.Next() {\n\t\t\tif i > 0 {\n\t\t\t\tbuf.WriteRune(',')\n\t\t\t}\n\t\t\t// JSON only does string keys.\n\t\t\tbuf.WriteRune('\"')\n\t\t\tbuf.WriteString(prettyWithFlags(it.Key().Interface(), flagRawString))\n\t\t\tbuf.WriteRune('\"')\n\t\t\tbuf.WriteRune(':')\n\t\t\tbuf.WriteString(pretty(it.Value().Interface()))\n\t\t\ti++\n\t\t}\n\t\tbuf.WriteRune('}')\n\t\treturn buf.String()\n\tcase reflect.Ptr, reflect.Interface:\n\t\treturn pretty(v.Elem().Interface())\n\t}\n\treturn fmt.Sprintf(`\"<unhandled-%s>\"`, t.Kind().String())\n}", "func (c *Color) Sprint(a ...interface{}) string {\n\treturn c.wrap(fmt.Sprint(a...))\n}", "func (c *Color) Sprint(a ...interface{}) string {\n\treturn c.wrap(fmt.Sprint(a...))\n}", "func (p BasicTextPrinter) Sprint(a ...interface{}) string {\n\tif p.Style == nil {\n\t\tp.Style = NewStyle()\n\t}\n\treturn p.Style.Sprint(a...)\n}", "func (r *renderer) toString(v reflect.Value, ldelim, rdelim string) (string, error) {\n\tswitch v.Kind() {\n\tcase reflect.String:\n\t\treturn v.String(), nil\n\tcase reflect.Bool:\n\t\treturn strconv.FormatBool(v.Bool()), nil\n\tcase reflect.Complex64, reflect.Complex128:\n\t\treturn fmt.Sprintf(\"%v\", v.Complex()), nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn strconv.FormatInt(v.Int(), 10), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn strconv.FormatFloat(v.Float(), 'f', -1, 64), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn strconv.FormatUint(v.Uint(), 10), nil\n\tcase reflect.Func:\n\t\tif v.IsNil() {\n\t\t\treturn \"\", nil\n\t\t}\n\n\t\tt := v.Type()\n\t\tisArity0 := t.NumIn() == 0 && t.NumOut() == 1\n\t\tif !isArity0 {\n\t\t\treturn \"\", nil\n\t\t}\n\n\t\tv = v.Call(nil)[0]\n\t\tif v.Kind() != reflect.String {\n\t\t\treturn r.toString(v, ldelim, rdelim)\n\t\t}\n\t\ttree, err := parse.Parse(\"lambda\", v.String(), ldelim, rdelim)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\ts, err := r.renderToString(tree)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn s, nil\n\n\tcase reflect.Ptr, reflect.Interface:\n\t\treturn r.toString(indirect(v), ldelim, rdelim)\n\tcase reflect.Chan:\n\t\treturn \"\", nil\n\tcase reflect.Invalid:\n\t\treturn \"\", nil\n\tdefault:\n\t\treturn fmt.Sprintf(\"%v\", v.Interface()), nil\n\t}\n}", "func (s Style) Sprint(a ...interface{}) string {\n\treturn fmt.Sprintf(\"%s%s%s\", s, fmt.Sprint(a...), Reset)\n}", "func ToString(v interface{}) string {\n\tswitch v := v.(type) {\n\tcase float64:\n\t\treturn formatFloat64(v)\n\tcase string:\n\t\treturn v\n\tcase Stringer:\n\t\treturn v.String()\n\tdefault:\n\t\treturn Repr(v, NoPretty)\n\t}\n}", "func (p *Printer) Print(v ...interface{}) FullPrinter {\n state, fc := p.initState()\n defer p.reset(state)\n p.formatter.Print(fc, v...)\n p.fc.Writer.Write(state.Buffer)\n return p\n}", "func PrettyPrint(x interface{}) {\n\tb, err := json.MarshalIndent(x, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Println(\"error:\", err)\n\t}\n\tfmt.Println(string(b))\n}", "func (v Value) String() string {\n\tswitch v.Type() {\n\tcase TypeString:\n\t\treturn v.v.Interface().(stringObject).v\n\tcase TypeUndefined:\n\t\treturn \"<undefined>\"\n\tcase TypeNull:\n\t\treturn \"<null>\"\n\tcase TypeBoolean:\n\t\treturn fmt.Sprintf(\"<boolean: %v>\", v.Bool())\n\tcase TypeNumber:\n\t\treturn fmt.Sprintf(\"<number: %v>\", v.Float())\n\tcase TypeSymbol:\n\t\treturn \"<symbol>\"\n\tcase TypeObject:\n\t\treturn \"<object>\"\n\tcase TypeFunction:\n\t\treturn \"<function>\"\n\tdefault:\n\t\tpanic(\"bad type\")\n\t}\n}", "func (v Value) String() string {\n\tswitch {\n\tcase v.FloatValue != nil:\n\t\treturn fmt.Sprintf(\"%v\", *v.FloatValue)\n\tcase v.IntValue != nil:\n\t\treturn fmt.Sprintf(\"%v\", *v.IntValue)\n\tcase v.StringValue != nil:\n\t\treturn fmt.Sprintf(\"%q\", *v.StringValue)\n\tcase v.BooleanValue != nil:\n\t\treturn fmt.Sprintf(\"%v\", *v.BooleanValue)\n\tcase v.ListValue != nil:\n\t\tstrs := []string{}\n\t\tfor _, item := range v.ListValue.Items {\n\t\t\tstrs = append(strs, item.String())\n\t\t}\n\t\treturn \"[\" + strings.Join(strs, \",\") + \"]\"\n\tcase v.MapValue != nil:\n\t\tstrs := []string{}\n\t\tfor _, i := range v.MapValue.Items {\n\t\t\tstrs = append(strs, fmt.Sprintf(\"%v=%v\", i.Name, i.Value))\n\t\t}\n\t\treturn \"{\" + strings.Join(strs, \";\") + \"}\"\n\tdefault:\n\t\tfallthrough\n\tcase v.Null == true:\n\t\treturn \"null\"\n\t}\n}", "func toString(v value) string {\n\tvar b bytes.Buffer\n\twriteValue(&b, v)\n\treturn b.String()\n}", "func ToString(ctx context.Context,\n\tscope Scope, x interface{}) string {\n\tswitch t := x.(type) {\n\tcase fmt.Stringer:\n\t\treturn t.String()\n\n\t\t// Reduce any LazyExpr to materialized types\n\tcase LazyExpr:\n\t\treturn ToString(ctx, scope, t.Reduce(ctx))\n\n\tcase Materializer:\n\t\treturn ToString(ctx, scope, t.Materialize(ctx, scope))\n\n\t\t// Materialize stored queries into an array.\n\tcase StoredQuery:\n\t\treturn ToString(ctx, scope, Materialize(ctx, scope, t))\n\n\t\t// A dict may expose a callable as a member - we just\n\t\t// call it lazily if it is here.\n\tcase func() Any:\n\t\treturn ToString(ctx, scope, t())\n\n\tcase StringProtocol:\n\t\treturn t.ToString(scope)\n\n\tcase string:\n\t\treturn t\n\n\tcase *string:\n\t\treturn *t\n\n\tcase []byte:\n\t\treturn string(t)\n\n\tdefault:\n\t\treturn fmt.Sprintf(\"%v\", t)\n\t}\n}", "func (p Property) Sprintln(a ...interface{}) string {\n\treturn fmt.Sprintf(\"%s%s%s\\n\", p, fmt.Sprint(a...), Reset)\n}", "func (v Value) String() string {\n\tif v.Kind() == reflect.String {\n\t\treturn v.Value.String()\n\t}\n\treturn fmt.Sprint(v.Interface())\n}", "func toPrettyJSON(v interface{}) string {\n\toutput, _ := json.MarshalIndent(v, \"\", \" \")\n\treturn string(output)\n}", "func (self *T) String() string {\n\treturn fmt.Sprintf(\"%f %f %f %f\", self[0], self[1], self[2], self[3])\n}", "func Printt(template string, data interface{}) (n int, err error) {\n\treturn Fprintt(os.Stdout, template, data)\n}", "func (t *TimeToLive) String() string {\n\treturn fmt.Sprintf(\"value=%v\", *t)\n}", "func (v Value) String() string {\n\tswitch v.Type {\n\tcase Undefined:\n\t\treturn \"undefined\"\n\tcase Null:\n\t\treturn \"null\"\n\tcase String:\n\t\treturn v.object.(string)\n\tcase Int:\n\t\treturn strconv.FormatInt(v.ToInt(), 10)\n\tcase Float:\n\t\treturn strconv.FormatFloat(v.ToFloat(), 'f', 6, 64)\n\tcase Bool:\n\t\tif v.ToBool() {\n\t\t\treturn \"true\"\n\t\t}\n\t\treturn \"false\"\n\tcase Rune:\n\t\treturn string(v.ToRune())\n\tcase Enum:\n\t\treturn \"[enum]\"\n\tcase Func:\n\t\treturn \"[function]\"\n\tcase NativeFunc:\n\t\treturn \"[native function]\"\n\tcase Bytes:\n\t\treturn \"[bytes]\"\n\tcase Map:\n\t\treturn \"[map]\"\n\tcase Object:\n\t\tif stg, ok := v.object.(fmt.Stringer); ok {\n\t\t\treturn stg.String()\n\t\t}\n\t\tif stg, ok := v.Export(0).(fmt.Stringer); ok {\n\t\t\treturn stg.String()\n\t\t}\n\t\tif n, ok := v.object.(NamedType); ok {\n\t\t\treturn n.Type()\n\t\t}\n\t\treturn fmt.Sprintf(\"[%T]\", v.object)\n\tdefault:\n\t\treturn fmt.Sprintf(\"[%v]\", v.Type)\n\t}\n}", "func Sprint(args ...interface{}) string {\n\treturn wrapString(func() string {\n\t\treturn fmt.Sprint(styleArgs(args)...)\n\t})\n}", "func Dumpv(name string, value interface{}) {\n\tDebugf(\"%s: %v\", keyf(name), value)\n}", "func (t Tower) String() string {\n\treturn fmt.Sprint([]int(t))\n}", "func formatValueWithType(val interface{}) string {\n\tif val == nil {\n\t\treturn fmt.Sprintf(\"%d:nil\", nilType)\n\t}\n\n\tswitch t := val.(type) {\n\t// Custom pq types.\n\tcase *pq.Error:\n\t\treturn fmt.Sprintf(\"%d:%s\", pqErrorType, formatPqError(t))\n\n\t// Custom pgx types.\n\tcase *pgconn.PgError:\n\t\treturn fmt.Sprintf(\"%d:%s\", pgConnErrorType, formatPgConnError(t))\n\n\t// Built-in Go types.\n\tcase string:\n\t\treturn fmt.Sprintf(\"%d:%s\", stringType, strconv.Quote(t))\n\tcase int:\n\t\treturn fmt.Sprintf(\"%d:%d\", intType, val)\n\tcase int64:\n\t\treturn fmt.Sprintf(\"%d:%d\", int64Type, val)\n\tcase float64:\n\t\treturn fmt.Sprintf(\"%d:%g\", float64Type, t)\n\tcase bool:\n\t\treturn fmt.Sprintf(\"%d:%v\", boolType, t)\n\tcase error:\n\t\treturn fmt.Sprintf(\"%d:%s\", errorType, strconv.Quote(t.Error()))\n\tcase time.Time:\n\t\t// time.Format normalizes the +00:00 UTC timezone into \"Z\". This causes\n\t\t// the recorded output to differ from the \"real\" driver output. Use a\n\t\t// format that's round-trippable by parseValueWithType.\n\t\ts := t.Format(time.RFC3339Nano)\n\t\tif strings.HasSuffix(s, \"Z\") && t.Location() != time.UTC {\n\t\t\ts = s[:len(s)-1] + \"+00:00\"\n\t\t}\n\t\treturn fmt.Sprintf(\"%d:%s\", timeType, s)\n\tcase []string:\n\t\tvar buf bytes.Buffer\n\t\tbuf.WriteByte('[')\n\t\tfor i, s := range t {\n\t\t\tif i != 0 {\n\t\t\t\tbuf.WriteByte(',')\n\t\t\t}\n\t\t\tbuf.WriteString(strconv.Quote(s))\n\t\t}\n\t\tbuf.WriteByte(']')\n\t\treturn fmt.Sprintf(\"%d:%s\", stringSliceType, buf.String())\n\tcase []byte:\n\t\ts := base64.RawStdEncoding.EncodeToString(t)\n\t\treturn fmt.Sprintf(\"%d:%s\", byteSliceType, s)\n\tcase []driver.Value:\n\t\tvar buf bytes.Buffer\n\t\tbuf.WriteByte('[')\n\t\tfor i, v := range t {\n\t\t\tif i != 0 {\n\t\t\t\tbuf.WriteByte(',')\n\t\t\t}\n\t\t\tbuf.WriteString(formatValueWithType(v))\n\t\t}\n\t\tbuf.WriteByte(']')\n\t\treturn fmt.Sprintf(\"%d:%s\", valueSliceType, buf.String())\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unsupported type: %T\", t))\n\t}\n}", "func ToString(v Value) string {\n\tif s, ok := v.(Stringer); ok {\n\t\treturn s.String()\n\t}\n\treturn v.Repr()\n}", "func ToString(v Value) string {\n\tif s, ok := v.(Stringer); ok {\n\t\treturn s.String()\n\t}\n\treturn v.Repr()\n}", "func (p *Printer) Sprint(a ...interface{}) string {\n\tpp := newPrinter(p)\n\tpp.doPrint(a)\n\ts := pp.String()\n\tpp.free()\n\treturn s\n}", "func (qt QueryTimes) PrettyPrint() {\n\tfmt.Println(\"No of processed queries:\", len(qt))\n\tfmt.Println(\"Minimum query time: \", qt.min())\n\tfmt.Println(\"Maximum query time: \", qt.max())\n\tfmt.Println(\"Median query time: \", qt.median())\n\tfmt.Println(\"Average query time: \", qt.average())\n\tfmt.Println(\"Total processing time:\", qt.sum())\n}", "func (me TOpacityValueType) String() string { return xsdt.String(me).String() }", "func (p BasicTextPrinter) Sprintln(a ...interface{}) string {\n\tstr := fmt.Sprintln(a...)\n\treturn Sprintln(p.Sprint(str))\n}", "func (d duration) pretty() string {\n\treturn fmt.Sprintf(\"Duration: %d\", &d) // modify *duration and *d => &d\n}", "func (p *Printer) Format(v any) string {\n\tvar b strings.Builder\n\n\tif _, err := p.Write(&b, v); err != nil {\n\t\t// CODE COVERAGE: At the time of writing, strings.Builder.Write() never\n\t\t// returns an error.\n\t\tpanic(err)\n\t}\n\n\treturn b.String()\n}", "func (v Value10) String() string { return unparse(int64(v), 1000, mult10) }", "func (v Value) String() string {\n\tif v.Typ == '$' {\n\t\treturn string(v.Str)\n\t}\n\tswitch v.Typ {\n\tcase '+', '-':\n\t\treturn string(v.Str)\n\tcase ':':\n\t\treturn strconv.FormatInt(int64(v.IntegerV), 10)\n\tcase '*':\n\t\tbuf := bytes.NewBuffer(nil)\n\t\tconcatArray(buf, v.ArrayV...)\n\t\treturn strings.TrimSuffix(buf.String(), \" \")\n\tcase '\\r':\n\t\treturn \"\\r\\n\"\n\t}\n\treturn \"\"\n}", "func Sprint(a ...any) string {\n\tapplyColors(&a, -1, DisableColors)\n\treturn fmt.Sprint(a...)\n}", "func (flag *sliceValue[T]) String() string {\n\tif flag.values == nil {\n\t\treturn \"\"\n\t}\n\n\tvar vals []string\n\n\tfor _, val := range *flag.values {\n\t\tvals = append(vals, flag.valueType.Clone(&val).String())\n\t}\n\n\treturn strings.Join(vals, flag.valSep)\n}", "func (p *Printer) Sprint(a ...interface{}) string {\n\treturn RenderCode(p.String(), a...)\n}", "func PrettyPrint(s string, indentLevel int, color Color) {\n\tvar tabStr string\n\tfor i := 0; i < indentLevel; i++ {\n\t\ttabStr += \"\\t\"\n\t}\n\tstr := fmt.Sprintf(\"%s%s\\n\\n\", tabStr, regexp.MustCompile(\"\\n\").ReplaceAllString(s, \"\\n\"+tabStr))\n\tif color != NoColor {\n\t\tif cfunc, ok := colorFunc[color]; !ok {\n\t\t\tfmt.Print(\"COLOR NOT SUPPORTED\")\n\t\t} else {\n\t\t\tcfunc(str)\n\t\t}\n\t} else {\n\t\tfmt.Print(str)\n\t}\n}", "func (scp *SCP) PrettyPrint(indentation int) string {\n\treturn strings.Repeat(\" \", indentation) + scp.String()\n}", "func stringValue(v reflect.Value) string {\n\tif v.CanInterface() {\n\t\treturn fmt.Sprintf(\"%#v\", v.Interface())\n\t} else {\n\t\treturn v.String()\n\t}\n}", "func toPrettyJson(v interface{}) string {\n\toutput, _ := json.MarshalIndent(v, \"\", \" \")\n\treturn string(output)\n}", "func FormatValue(val reflect.Value, config *FormatConfig) string {\n\tif !val.IsValid() {\n\t\treturn config.Nil\n\t}\n\tderefVal, derefType := reflection.DerefValueAndType(val)\n\tif f, ok := config.TypeFormatters[derefType]; ok && derefVal.IsValid() {\n\t\treturn f.FormatValue(derefVal, config)\n\t}\n\n\tif nullable.ReflectIsNull(val) {\n\t\treturn config.Nil\n\t}\n\n\ttextMarshaller, _ := val.Interface().(encoding.TextMarshaler)\n\tif textMarshaller == nil && val.CanAddr() {\n\t\ttextMarshaller, _ = val.Addr().Interface().(encoding.TextMarshaler)\n\t}\n\tif textMarshaller == nil {\n\t\ttextMarshaller, _ = derefVal.Interface().(encoding.TextMarshaler)\n\t}\n\tif textMarshaller != nil {\n\t\ttext, err := textMarshaller.MarshalText()\n\t\tif err != nil {\n\t\t\treturn string(text)\n\t\t}\n\t}\n\n\tswitch derefType.Kind() {\n\tcase reflect.Bool:\n\t\tif derefVal.Bool() {\n\t\t\treturn config.True\n\t\t}\n\t\treturn config.False\n\n\tcase reflect.String:\n\t\treturn derefVal.String()\n\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn float.Format(\n\t\t\tderefVal.Float(),\n\t\t\tconfig.Float.ThousandsSep,\n\t\t\tconfig.Float.DecimalSep,\n\t\t\tconfig.Float.Precision,\n\t\t\tconfig.Float.PadPrecision,\n\t\t)\n\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn strconv.FormatInt(derefVal.Int(), 10)\n\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn strconv.FormatUint(derefVal.Uint(), 10)\n\t}\n\n\tif s, ok := val.Interface().(fmt.Stringer); ok {\n\t\treturn s.String()\n\t}\n\tif val.CanAddr() {\n\t\tif s, ok := val.Addr().Interface().(fmt.Stringer); ok {\n\t\t\treturn s.String()\n\t\t}\n\t}\n\tif s, ok := derefVal.Interface().(fmt.Stringer); ok {\n\t\treturn s.String()\n\t}\n\n\tswitch x := derefVal.Interface().(type) {\n\tcase []byte:\n\t\treturn string(x)\n\tdefault:\n\t\treturn fmt.Sprint(val.Interface())\n\t}\n}", "func (self *T) String() string {\n\treturn fmt.Sprintf(\"%f %f\", self[0], self[1])\n}", "func (this *structHelper) valueToString(v reflect.Value) (string, error) {\n\tif !v.IsValid() {\n\t\treturn \"null\", nil\n\t}\n\n\tswitch v.Kind() {\n\tcase reflect.Ptr:\n\t\treturn this.valueToString(reflect.Indirect(v))\n\tcase reflect.Interface:\n\t\treturn this.valueToString(v.Elem())\n\tcase reflect.Bool:\n\t\tx := v.Bool()\n\t\tif x {\n\t\t\treturn \"true\", nil\n\t\t} else {\n\t\t\treturn \"false\", nil\n\t\t}\n\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn strconv.FormatInt(v.Int(), 10), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn strconv.FormatUint(v.Uint(), 10), nil\n\tcase reflect.UnsafePointer:\n\t\treturn strconv.FormatUint(uint64(v.Pointer()), 10), nil\n\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn strconv.FormatFloat(v.Float(), 'g', -1, 64), nil\n\n\tcase reflect.String:\n\t\treturn v.String(), nil\n\n\t\t//This is kind of a rough hack to replace the old []byte\n\t\t//detection with reflect.Uint8Type, it doesn't catch\n\t\t//zero-length byte slices\n\tcase reflect.Slice:\n\t\ttyp := v.Type()\n\t\tif typ.Elem().Kind() == reflect.Uint || typ.Elem().Kind() == reflect.Uint8 || typ.Elem().Kind() == reflect.Uint16 || typ.Elem().Kind() == reflect.Uint32 || typ.Elem().Kind() == reflect.Uint64 || typ.Elem().Kind() == reflect.Uintptr {\n\t\t\tif v.Len() > 0 {\n\t\t\t\tif v.Index(0).OverflowUint(257) {\n\t\t\t\t\treturn string(v.Interface().([]byte)), nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t//fmt.Println(\"v.Kind()\", v.Kind().String())\n\n\treturn \"\", errors.New(\"Unsupported type -- \" + v.Kind().String())\n}", "func stringify(valueOf reflect.Value, nonStringPostFix string) string {\n\tswitch valueOf.Kind() {\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn strconv.FormatInt(valueOf.Int(), 10) + nonStringPostFix\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn strconv.FormatUint(valueOf.Uint(), 10) + nonStringPostFix\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn strconv.FormatFloat(valueOf.Float(), 'f', -1, 64) + nonStringPostFix\n\tcase reflect.String:\n\t\treturn valueOf.String()\n\tcase reflect.Ptr:\n\t\ti := reflect.Indirect(valueOf)\n\t\treturn stringify(i, nonStringPostFix)\n\n\tdefault:\n\t\treturn \"\"\n\t}\n}", "func Stringify(v interface{}, opts ...interface{}) string {\n\tvar buf []byte\n\tvar err error\n\tif len(opts) > 0 {\n\t\tbuf, err = json.MarshalIndent(v, \"\", \"\\t\")\n\t} else {\n\t\tbuf, err = json.Marshal(v)\n\t}\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"<error:%v>\", err)\n\t}\n\treturn string(buf)\n}", "func (me TClipValueType) String() string { return xsdt.String(me).String() }", "func Debugv(v ...interface{}) {\n\t// idiomatic debug\n\tif !isVeryVerbose {\n\t\treturn\n\t}\n\tdebugPrinter.Print(v...)\n}", "func (me TSpacingValueType) String() string { return xsdt.String(me).String() }", "func ClickHouseFormattedValue(v interface{}) string {\n\tswitch x := v.(type) {\n\tcase int:\n\t\treturn fmt.Sprintf(\"%d\", x)\n\tcase float32, float64:\n\t\treturn fmt.Sprintf(\"%f\", x)\n\tcase string:\n\t\treturn fmt.Sprintf(\"'%s'\", x)\n\tcase bool:\n\t\treturn fmt.Sprintf(\"%v\", x)\n\tcase []interface{}:\n\t\tif len(x) == 0 {\n\t\t\treturn \"\"\n\t\t}\n\t\tswitch x[0].(type) {\n\t\tcase string:\n\t\t\tstr := \"[\"\n\t\t\tfor idx, sVal := range x {\n\t\t\t\tstr += fmt.Sprintf(\"'%s'\", sVal)\n\t\t\t\tif idx != len(x)-1 {\n\t\t\t\t\tstr += \",\"\n\t\t\t\t}\n\t\t\t}\n\t\t\tstr += \"]\"\n\t\t\treturn str\n\t\tcase int, float32, float64, bool:\n\t\t\treturn strings.Join(strings.Fields(fmt.Sprint(x)), \",\")\n\t\tdefault:\n\t\t\tzap.S().Error(\"invalid type for formatted value\", zap.Any(\"type\", reflect.TypeOf(x[0])))\n\t\t\treturn \"\"\n\t\t}\n\tdefault:\n\t\tzap.S().Error(\"invalid type for formatted value\", zap.Any(\"type\", reflect.TypeOf(x)))\n\t\treturn \"\"\n\t}\n}", "func (t *testResult) PrettyPrint() string {\n\tout := t.PrettyPrintLines()\n\treturn strings.Join(out, \"\\n\")\n}", "func printResult(w io.Writer, v interface{}) {\n\t// TODO the result should be formatted in Go syntax, without\n\t// package qualifiers for types defined within the interpreter.\n\tfmt.Fprintf(w, \"%+v\", v)\n}", "func printableValue(v reflect.Value) (interface{}, bool) {\n\tif v.Kind() == reflect.Ptr {\n\t\tv, _ = indirect(v) // fmt.Fprint handles nil.\n\t}\n\tif !v.IsValid() {\n\t\treturn \"<no value>\", true\n\t}\n\n\tif !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {\n\t\tif v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {\n\t\t\tv = v.Addr()\n\t\t} else {\n\t\t\tswitch v.Kind() {\n\t\t\tcase reflect.Chan, reflect.Func:\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t}\n\t}\n\treturn v.Interface(), true\n}", "func ToString(v Value) string {\n\treturn quad.ToString(v)\n}", "func (p *Printer) PrintSKV(message string, keyValues ...interface{}) FullPrinter {\n state, fc := p.initState()\n defer p.reset(state)\n p.formatter.PrintStructKeyValues(fc, message, keyValues...)\n p.fc.Writer.Write(state.Buffer)\n return p\n}", "func PrettyString(inObj interface{}, depthLimit int) string {\n\tvar buf bytes.Buffer\n\tformObj(&buf, reflect.ValueOf(inObj), 0, depthLimit)\n\tbuf.WriteString(\"\\n\")\n\treturn buf.String()\n}", "func ToString(v Value) string {\n\tswitch v := v.(type) {\n\tcase nil:\n\t\treturn \"\"\n\tcase String:\n\t\treturn string(v)\n\tcase Int:\n\t\treturn strconv.FormatInt(int64(v), 10)\n\tcase Uint:\n\t\treturn strconv.FormatUint(uint64(v), 10)\n\tcase Bool:\n\t\tif v {\n\t\t\treturn \"true\"\n\t\t}\n\t\treturn \"false\"\n\tcase Float:\n\t\treturn strconv.FormatFloat(float64(v), 'g', -1, 64)\n\tdefault:\n\t\treturn fmt.Sprint(v)\n\t}\n}", "func (me TReviewPolicyLevel) String() string { return xsdt.String(me).String() }", "func PrettyPrintStruct(data interface{}) string {\n\tv := reflect.ValueOf(data)\n\tmethod := v.MethodByName(\"String\")\n\tif method.IsValid() && method.Type().NumIn() == 0 && method.Type().NumOut() == 1 &&\n\t\tmethod.Type().Out(0).Kind() == reflect.String {\n\t\tresult := method.Call([]reflect.Value{})\n\t\treturn result[0].String()\n\t}\n\treturn fmt.Sprintf(\"%#v\", data)\n}", "func (ps *Parser) PrettyPrint() string {\n\tindent, output := 0, \"\"\n\tfor _, section := range ps.Tokens.Sections {\n\t\toutput += \"<\" + section.Type + \">\" + \"\\n\"\n\t\tfor _, item := range section.Items {\n\t\t\tindent++\n\t\t\tfor i := 0; i < indent; i++ {\n\t\t\t\toutput += \"\\t\"\n\t\t\t}\n\t\t\tif len(item.Parts) == 0 {\n\t\t\t\toutput += item.TValue + \" <\" + item.TType + \">\" + \"\\n\"\n\t\t\t} else {\n\t\t\t\toutput += \"<\" + item.TType + \">\" + \"\\n\"\n\t\t\t}\n\t\t\tfor _, part := range item.Parts {\n\t\t\t\tindent++\n\t\t\t\tfor i := 0; i < indent; i++ {\n\t\t\t\t\toutput += \"\\t\"\n\t\t\t\t}\n\t\t\t\toutput += part.Token.TValue + \" <\" + part.Token.TType + \">\" + \"\\n\"\n\t\t\t\tindent--\n\t\t\t}\n\t\t\tindent--\n\t\t}\n\t}\n\treturn output\n}", "func (pt MDTurbo) String() string {\n\tvar buf bytes.Buffer\n\tw := tabwriter.NewWriter(&buf, 6, 0, 4, ' ', tabwriter.AlignRight|tabwriter.Debug)\n\tfmt.Fprintf(w, PrettyPrint(pt))\n\tw.Flush()\n\treturn buf.String()\n}", "func (me TBaselineShiftValueType) String() string { return xsdt.String(me).String() }", "func String(v *Value, def string) string {\n\ts, err := v.String()\n\tif err != nil {\n\t\treturn def\n\t}\n\treturn s\n}", "func ToString(val any) string {\n\tif val == nil {\n\t\treturn \"nil\"\n\t}\n\n\tswitch tval := val.(type) {\n\tcase reflect.Value:\n\t\tnewVal, ok := dark.GetInterface(tval, true)\n\t\tif ok {\n\t\t\treturn ToString(newVal)\n\t\t}\n\n\tcase []reflect.Value:\n\t\tvar buf bytes.Buffer\n\t\tSliceToBuffer(&buf, tval)\n\t\treturn buf.String()\n\n\t\t// no \"(string) \" prefix for printable strings\n\tcase string:\n\t\treturn tdutil.FormatString(tval)\n\n\t\t// no \"(int) \" prefix for ints\n\tcase int:\n\t\treturn strconv.Itoa(tval)\n\n\t\t// no \"(float64) \" prefix for float64s\n\tcase float64:\n\t\ts := strconv.FormatFloat(tval, 'g', -1, 64)\n\t\tif strings.ContainsAny(s, \"e.IN\") { // I for Inf, N for NaN\n\t\t\treturn s\n\t\t}\n\t\treturn s + \".0\" // to distinguish from ints\n\n\t\t// no \"(bool) \" prefix for booleans\n\tcase bool:\n\t\treturn TernStr(tval, \"true\", \"false\")\n\n\tcase types.TestDeepStringer:\n\t\treturn tval.String()\n\t}\n\n\treturn tdutil.SpewString(val)\n}", "func (scope *Scope) Pretty(level int) string {\n\tpadding := \"\"\n\tfor i := 0; i < level; i++ {\n\t\tpadding += \" \"\n\t}\n\tvar buffer bytes.Buffer\n\tfor k, v := range scope.variables {\n\t\tbuffer.WriteString(fmt.Sprintf(\"%s%s %v\\n\", padding, k, v))\n\t}\n\tif scope.previous != nil {\n\t\tbuffer.WriteString(scope.previous.Pretty(level + 1))\n\t}\n\treturn buffer.String()\n}", "func formatValue(format string, value rel.Value) string {\n\tvar v interface{}\n\tif set, ok := value.(rel.Set); ok {\n\t\tif s, is := rel.AsString(set); is {\n\t\t\tv = s\n\t\t} else {\n\t\t\tv = rel.Repr(set)\n\t\t}\n\t} else {\n\t\tv = value.Export()\n\t}\n\tswitch format[len(format)-1] {\n\tcase 't':\n\t\tv = value.IsTrue()\n\tcase 'c', 'd', 'o', 'O', 'x', 'X', 'U':\n\t\tv = int(value.Export().(float64))\n\tcase 'q':\n\t\tif f, ok := v.(float64); ok {\n\t\t\tv = int(f)\n\t\t}\n\t}\n\treturn fmt.Sprintf(format, v)\n}", "func TestFormatterPrintln(t *testing.T) {\n\tf := F(1)\n\texpect := \"<v=F(1)>\\n\"\n\ts := Sprint(f, \"\\n\")\n\tif s != expect {\n\t\tt.Errorf(\"Sprint wrong with Formatter: expected %q got %q\", expect, s)\n\t}\n\ts = Sprintln(f)\n\tif s != expect {\n\t\tt.Errorf(\"Sprintln wrong with Formatter: expected %q got %q\", expect, s)\n\t}\n\ts = Sprintf(\"%v\\n\", f)\n\tif s != expect {\n\t\tt.Errorf(\"Sprintf wrong with Formatter: expected %q got %q\", expect, s)\n\t}\n}" ]
[ "0.66413844", "0.60485935", "0.6026737", "0.6004122", "0.5987677", "0.5987677", "0.5957976", "0.5933606", "0.58966583", "0.5868913", "0.58346736", "0.5832137", "0.5808272", "0.5807705", "0.5788891", "0.578023", "0.57239884", "0.5717909", "0.5705717", "0.5700035", "0.56876427", "0.5656148", "0.56376964", "0.5618413", "0.56116456", "0.5576198", "0.55665565", "0.5566052", "0.55367273", "0.55343986", "0.5528661", "0.5504941", "0.5498015", "0.5491545", "0.54772145", "0.54772145", "0.5465324", "0.54109687", "0.53456646", "0.5344696", "0.53434455", "0.53233695", "0.53214645", "0.53145087", "0.53125185", "0.5307339", "0.53068405", "0.53019685", "0.52973956", "0.5297053", "0.52957046", "0.5291445", "0.5288276", "0.52729475", "0.52677196", "0.52666706", "0.52577126", "0.525753", "0.525753", "0.52450895", "0.52394426", "0.5231355", "0.52264875", "0.52254295", "0.52247626", "0.52201545", "0.5215066", "0.52119833", "0.5210268", "0.51921713", "0.5179073", "0.5176005", "0.5175926", "0.5171018", "0.5146041", "0.51424026", "0.51416075", "0.51376975", "0.51361644", "0.51330274", "0.5123131", "0.5117238", "0.5109437", "0.51080096", "0.51030415", "0.5090821", "0.5089796", "0.5072846", "0.5072384", "0.5068007", "0.5067909", "0.50664496", "0.50630105", "0.5058672", "0.5046121", "0.5043807", "0.504351", "0.50431424", "0.5039758", "0.50373644" ]
0.6356819
1
Digest computes the digest for value v, given type t.
func Digest(v T, t *types.T) digest.Digest { w := Digester.NewWriter() WriteDigest(w, v, t) return w.Digest() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func WriteDigest(w io.Writer, v T, t *types.T) {\n\tif d, ok := v.(digester); ok {\n\t\tdigest.WriteDigest(w, d.Digest())\n\t\treturn\n\t}\n\n\tw.Write([]byte{t.Kind.ID()})\n\tswitch t.Kind {\n\tcase types.ErrorKind, types.BottomKind, types.RefKind:\n\t\tpanic(\"illegal type\")\n\tcase types.IntKind:\n\t\tvi := v.(*big.Int)\n\t\t// Bytes returns the normalized big-endian (i.e., free of a zero\n\t\t// prefix) representation of the absolute value of the integer.\n\t\tp := vi.Bytes()\n\t\tif len(p) == 0 {\n\t\t\t// This is the representation of \"0\"\n\t\t\treturn\n\t\t}\n\t\tif p[0] == 0 {\n\t\t\tpanic(\"big.Int byte representation is not normalized\")\n\t\t}\n\t\tif vi.Sign() < 0 {\n\t\t\tw.Write([]byte{0})\n\t\t}\n\t\tw.Write(p)\n\tcase types.FloatKind:\n\t\tw.Write([]byte(v.(*big.Float).Text('e', 10)))\n\tcase types.StringKind:\n\t\tio.WriteString(w, v.(string))\n\tcase types.BoolKind:\n\t\tif v.(bool) {\n\t\t\tw.Write(trueByte)\n\t\t} else {\n\t\t\tw.Write(falseByte)\n\t\t}\n\tcase types.FileKind:\n\t\tdigest.WriteDigest(w, v.(reflow.File).Digest())\n\tcase types.DirKind:\n\t\tdir := v.(Dir)\n\t\tfor scan := dir.Scan(); scan.Scan(); {\n\t\t\tio.WriteString(w, scan.Path())\n\t\t\tdigest.WriteDigest(w, scan.File().Digest())\n\t\t}\n\t// Filesets are digesters, so they don't need to be handled here.\n\tcase types.UnitKind:\n\tcase types.ListKind:\n\t\twriteLength(w, len(v.(List)))\n\t\tfor _, e := range v.(List) {\n\t\t\tWriteDigest(w, e, t.Elem)\n\t\t}\n\tcase types.MapKind:\n\t\tm := v.(*Map)\n\t\twriteLength(w, m.Len())\n\t\ttype kd struct {\n\t\t\tk T\n\t\t\td digest.Digest\n\t\t}\n\t\tkeys := make([]kd, 0, m.Len())\n\t\tfor _, entryp := range m.tab {\n\t\t\tfor entry := *entryp; entry != nil; entry = entry.Next {\n\t\t\t\tkeys = append(keys, kd{entry.Key, Digest(entry.Key, t.Index)})\n\t\t\t}\n\t\t}\n\t\t// Sort the map so that it produces a consistent digest. We sort\n\t\t// its keys by their digest because the values may not yet be\n\t\t// evaluated.\n\t\tsort.Slice(keys, func(i, j int) bool {\n\t\t\treturn keys[i].d.Less(keys[j].d)\n\t\t})\n\t\tfor _, k := range keys {\n\t\t\tWriteDigest(w, k.k, t.Index)\n\t\t\tWriteDigest(w, m.Lookup(k.d, k.k), t.Elem)\n\t\t}\n\tcase types.TupleKind:\n\t\twriteLength(w, len(t.Fields))\n\t\ttuple := v.(Tuple)\n\t\tfor i, f := range t.Fields {\n\t\t\tWriteDigest(w, tuple[i], f.T)\n\t\t}\n\tcase types.StructKind:\n\t\twriteLength(w, len(t.Fields))\n\t\ts := v.(Struct)\n\t\tkeys := make([]string, len(t.Fields))\n\t\tfor i, f := range t.Fields {\n\t\t\tkeys[i] = f.Name\n\t\t}\n\t\tsort.Strings(keys)\n\t\tfm := t.FieldMap()\n\t\tfor _, k := range keys {\n\t\t\tWriteDigest(w, s[k], fm[k])\n\t\t}\n\tcase types.ModuleKind:\n\t\twriteLength(w, len(t.Fields))\n\t\ts := v.(Module)\n\t\tkeys := make([]string, len(t.Fields))\n\t\tfor i, f := range t.Fields {\n\t\t\tkeys[i] = f.Name\n\t\t}\n\t\tsort.Strings(keys)\n\t\tfm := t.FieldMap()\n\t\tfor _, k := range keys {\n\t\t\tWriteDigest(w, s[k], fm[k])\n\t\t}\n\tcase types.SumKind:\n\t\tvariant := v.(*Variant)\n\t\tio.WriteString(w, variant.Tag)\n\t\tWriteDigest(w, variant.Elem, t.VariantMap()[variant.Tag])\n\tcase types.FuncKind:\n\t\tdigest.WriteDigest(w, v.(Func).Digest())\n\t}\n}", "func (q *Qsign) Digest(v interface{}) ([]byte, error) {\n\tbuf := new(bytes.Buffer)\n\n\tif q.prefixGenerator != nil {\n\t\tif _, err := buf.WriteString(q.prefixGenerator()); err != nil {\n\t\t\treturn buf.Bytes(), err\n\t\t}\n\t}\n\n\tvs := getStructValues(v)\n\n\tpairs := []string{}\n\tfor _, f := range vs {\n\t\tif !q.filter(f.name, f.value) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar buf strings.Builder\n\t\tbuf.WriteString(f.name)\n\t\tbuf.WriteString(q.connector)\n\t\tbuf.WriteString(f.value)\n\n\t\tpairs = append(pairs, buf.String())\n\t}\n\tconnected := strings.Join(pairs, q.delimiter)\n\tbuf.WriteString(connected)\n\n\tif q.suffixGenerator != nil {\n\t\tif _, err := buf.WriteString(q.suffixGenerator()); err != nil {\n\t\t\treturn buf.Bytes(), err\n\t\t}\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func HashFromDigest(algo Type, digest Digest) Hash {\n\treturn HashFromSum(algo, digest[:])\n}", "func (o VirtualDatabaseStatusOutput) Digest() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v VirtualDatabaseStatus) *string { return v.Digest }).(pulumi.StringPtrOutput)\n}", "func (b SignDetail) Digest() (common.Hash, error) {\n\tvar hash common.Hash\n\tvar signFormatData apitypes.TypedData\n\tif err := json.Unmarshal([]byte(b.SignSchema.Schema), &signFormatData); err != nil {\n\t\treturn hash, err\n\t}\n\tparams, err := b.GetContractParams()\n\tif err != nil {\n\t\treturn hash, err\n\t}\n\tdata, err := buildTypedData(signFormatData, params)\n\n\tif err != nil {\n\t\treturn hash, err\n\t}\n\thash, err = crypto2.Keccak256HashEIP712(data)\n\treturn hash, err\n}", "func Hash(value int64) uint64 {\n\treturn FNVHash64(uint64(value))\n}", "func (t *Tree) Digest() *crypto.Digest { return t.dig }", "func (tc ScannerTestcase) Digest() claircore.Digest {\n\td, err := claircore.ParseDigest(tc.Hash)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn d\n}", "func (n *Node) Digest() ([]byte, error) {\n\t// HMAC(Nonce,Inputs[*]|(Cryptex||Secret||Marker))\n\thash := hmac.New(sha256.New, n.Nonce)\n\n\tfor _, input := range n.Inputs {\n\t\tif _, err := hash.Write(input); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tvar (\n\t\tdata []byte\n\t\terr error\n\t)\n\n\tswitch {\n\tcase n.cryptex != nil:\n\t\tdata, err = n.cryptex.Marshal()\n\tcase n.secret != nil:\n\t\tdata, err = n.secret.Marshal()\n\tcase n.Marker != nil:\n\t\tdata, err = n.Marker.Marshal()\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif _, err := hash.Write(data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hash.Sum(nil), nil\n}", "func HashOf(v Value) []byte {\n\treturn quad.HashOf(v)\n}", "func (o GetReposRepoTagOutput) Digest() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetReposRepoTag) string { return v.Digest }).(pulumi.StringOutput)\n}", "func (commit *Commit) Digest() []byte {\n\treturn commit.signingID.Hash(commit.signedRequest.GetRequest())\n}", "func (o VirtualDatabaseStatusPtrOutput) Digest() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *VirtualDatabaseStatus) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Digest\n\t}).(pulumi.StringPtrOutput)\n}", "func Hash(t *Token) (hash []byte) {\n var sum []byte\n\n // Compute the SHA1 sum of the Token\n {\n shasum := sha1.Sum([]byte(salt+string(*t)))\n copy(sum[:], shasum[:20])\n }\n\n // Encode the sum to hexadecimal\n hex.Encode(sum, sum)\n\n return\n}", "func DigestSize() int {\n\treturn sha256DigestSize\n}", "func (oc *OAuthConsumer) digest(key string, m string) string {\n\th := hmac.NewSHA1([]byte(key))\n\th.Write([]byte(m))\n\treturn base64encode(h.Sum())\n\n/*\ts := bytes.TrimSpace(h.Sum())\n\td := make([]byte, base64.StdEncoding.EncodedLen(len(s)))\n\tbase64.StdEncoding.Encode(d, s)\n\tds := strings.TrimSpace(bytes.NewBuffer(d).String())\n*/\n//\treturn ds\n\n}", "func (s *ShardMap) hash(v interface{}) int {\n\tswitch s.Type {\n\tcase \"string\":\n\t\tval, ok := v.(string)\n\t\tif !ok {\n\t\t\treturn -1\n\t\t}\n\n\t\thash := fnv.New32()\n\t\thash.Write([]byte(val))\n\t\treturn int(hash.Sum32() % NumShards)\n\tcase \"int32\":\n\t\t// Values that come as numbers in JSON are of type float64.\n\t\tval, ok := v.(float64)\n\t\tif !ok {\n\t\t\treturn -1\n\t\t}\n\n\t\treturn int(int32(val) % NumShards)\n\tdefault:\n\t\treturn -1\n\t}\n}", "func (b *Base) Digest(req *DigestReq) (*DigestResp, error) {\n\treturn nil, ErrFunctionNotSupported\n}", "func CalcDigest(obj Object, downloadRangeSize int64, algorithm string) ([] byte, error) {\n\th, err := newHash(algorithm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = Download(obj, downloadRangeSize, h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdigest := h.Sum(nil)\n\treturn digest, nil\n}", "func hash(t types.Type, x value) int {\n\tswitch x := x.(type) {\n\tcase bool:\n\t\tif x {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\tcase int:\n\t\treturn x\n\tcase int8:\n\t\treturn int(x)\n\tcase int16:\n\t\treturn int(x)\n\tcase int32:\n\t\treturn int(x)\n\tcase int64:\n\t\treturn int(x)\n\tcase uint:\n\t\treturn int(x)\n\tcase uint8:\n\t\treturn int(x)\n\tcase uint16:\n\t\treturn int(x)\n\tcase uint32:\n\t\treturn int(x)\n\tcase uint64:\n\t\treturn int(x)\n\tcase uintptr:\n\t\treturn int(x)\n\tcase float32:\n\t\treturn int(x)\n\tcase float64:\n\t\treturn int(x)\n\tcase complex64:\n\t\treturn int(real(x))\n\tcase complex128:\n\t\treturn int(real(x))\n\tcase string:\n\t\treturn hashString(x)\n\tcase *value:\n\t\treturn int(uintptr(unsafe.Pointer(x)))\n\tcase chan value:\n\t\treturn int(uintptr(reflect.ValueOf(x).Pointer()))\n\tcase structure:\n\t\treturn x.hash(t)\n\tcase array:\n\t\treturn x.hash(t)\n\tcase iface:\n\t\treturn x.hash(t)\n\tcase rtype:\n\t\treturn x.hash(t)\n\t}\n\tpanic(fmt.Sprintf(\"%T is unhashable\", x))\n}", "func (d Digest) Value() (driver.Value, error) {\n\tb, err := json.Marshal(d)\n\tif err != nil {\n\t\treturn driver.Value([]byte{}), err\n\t}\n\treturn driver.Value(b), nil\n}", "func Hash(i interface{}) string {\n\tv := reflect.ValueOf(i)\n\tif v.Kind() != reflect.Ptr {\n\t\tif !v.CanAddr(){\n\t\t\treturn \"\"\n\t\t}\n\t\tv = v.Addr()\n\t}\n\n\tsize := unsafe.Sizeof(v.Interface())\n\tb := (*[1 << 10]uint8)(unsafe.Pointer(v.Pointer()))[:size:size]\n\n\th := md5.New()\n\treturn base64.StdEncoding.EncodeToString(h.Sum(b))\n}", "func (in *Instance) hash(x, y, mu *big.Int, T uint64) *big.Int {\n\tb := sha512.New()\n\tb.Write(x.Bytes())\n\tb.Write(y.Bytes())\n\tb.Write(mu.Bytes())\n\tbits := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(bits, T)\n\tb.Write(bits)\n\tres := new(big.Int).SetBytes(b.Sum(nil))\n\tres.Mod(res, in.rsaModulus)\n\treturn res\n}", "func (b *ItemBundle) Digest() (isolated.HexDigest, int64, error) {\n\th := sha1.New()\n\tcw := &iotools.CountingWriter{Writer: h}\n\tif err := b.writeTar(cw); err != nil {\n\t\treturn \"\", 0, err\n\t}\n\treturn isolated.Sum(h), cw.Count, nil\n}", "func (_BondedECDSAKeep *BondedECDSAKeepCaller) Digest(opts *bind.CallOpts) ([32]byte, error) {\n\tvar (\n\t\tret0 = new([32]byte)\n\t)\n\tout := ret0\n\terr := _BondedECDSAKeep.contract.Call(opts, out, \"digest\")\n\treturn *ret0, err\n}", "func (s *GetRunOutput) SetDigest(v string) *GetRunOutput {\n\ts.Digest = &v\n\treturn s\n}", "func (c closure) Digest() digest.Digest {\n\treturn c.expr.Digest(c.env)\n}", "func (me TDigestValueType) String() string { return xsdt.Base64Binary(me).String() }", "func typehash(t *types.Type) uint32 {\n\tp := t.LongString()\n\n\t// Using MD5 is overkill, but reduces accidental collisions.\n\th := md5.Sum([]byte(p))\n\treturn binary.LittleEndian.Uint32(h[:4])\n}", "func (s *WorkflowListItem) SetDigest(v string) *WorkflowListItem {\n\ts.Digest = &v\n\treturn s\n}", "func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {\n\tif t == hash.MD5 && o.fs.hasOCMD5 {\n\t\treturn o.md5, nil\n\t}\n\tif t == hash.SHA1 && (o.fs.hasOCSHA1 || o.fs.hasMESHA1) {\n\t\treturn o.sha1, nil\n\t}\n\treturn \"\", hash.ErrUnsupported\n}", "func (m *GGCRImage) Digest() (v1.Hash, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Digest\")\n\tret0, _ := ret[0].(v1.Hash)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func hash(stav Stav) uint64{\n\tstr := \"\"\n\n\tfor i := 0; i < len(stav.Auta); i++ {\n\t\tstr += stav.Auta[i].Farba\n\t\tstr += strconv.Itoa(int(stav.Auta[i].X))\n\t\tstr += strconv.Itoa(int(stav.Auta[i].Y))\n\t\tstr += strconv.FormatBool(stav.Auta[i].Smer)\n\t\tstr += strconv.Itoa(int(stav.Auta[i].Dlzka))\n\t}\n\n\th := fnv.New64a()\n\th.Write([]byte(str))\n\treturn h.Sum64()\n\n}", "func Hash(params PublicParameters, password, salt []byte, workFactor int, preHash bool, postHashLen int) (*Digest, error) {\n\tif _, _, err := wfMant(uint32(workFactor)); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif salt == nil {\n\t\tsalt = make([]byte, 16)\n\t\tif _, err := rand.Read(salt); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif preHash {\n\t\tpassword = kdf(params.Hash, password, 64)\n\t}\n\n\tk := params.N.BitLen() / 8\n\tif k < 160 {\n\t\treturn nil, errors.New(\"modulus too short\")\n\t}\n\n\tu := len(password)\n\tif u > 255 || u > (k-32) {\n\t\treturn nil, errors.New(\"password too long\")\n\t}\n\n\t// sb = KDF(salt || password || BYTE(u), k - 2 - u)\n\tsb := kdf(params.Hash, append(append(salt, password...), byte(u)), k-2-u)\n\n\t//xb = BYTE(0x00) || sb || password || BYTE(u)\n\txb := append(append(append([]byte{0x00}, sb...), password...), byte(u))\n\n\tx := new(big.Int).SetBytes(xb)\n\tfor i := 0; i <= workFactor; i++ {\n\t\tx = new(big.Int).Exp(x, two, params.N)\n\t}\n\n\tout := pad(params.N, x)\n\tif postHashLen > 0 {\n\t\tout = kdf(params.Hash, out, postHashLen)\n\t}\n\n\treturn &Digest{\n\t\tModulusID: params.ModulusID(),\n\t\tHash: out,\n\t\tSalt: salt,\n\t\tWorkFactor: workFactor,\n\t\tPreHash: preHash,\n\t\tPostHashLen: postHashLen,\n\t}, nil\n}", "func HashFromSum(algo Type, digest []byte) Hash {\n\tsize := len(digest)\n\tvar result Encoded\n\t// multihash: hash codec\n\tn := binary.PutUvarint(result[:], algo.Code())\n\t// multihash: digest size\n\tn += binary.PutUvarint(result[n:], uint64(size))\n\t// multihash: copy digest\n\tcopy(result[n:], digest)\n\treturn customHash{code: algo, size: size, start: n, body: result}\n}", "func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {\n\treturn \"\", hash.ErrUnsupported\n}", "func (q *findMissingQueue) deriveDigest(blobDigest *remoteexecution.Digest) (digest.Digest, error) {\n\tderivedDigest, err := q.digestFunction.NewDigestFromProto(blobDigest)\n\tif err != nil {\n\t\treturn digest.BadDigest, util.StatusWrapWithCode(err, codes.NotFound, \"Action result contained malformed digest\")\n\t}\n\treturn derivedDigest, err\n}", "func (d *Digester) Digest(ctx context.Context, sub broker.Subscriber, opts ...digester.Option) error {\n\tdopts := digester.Options{}\n\tfor _, apply := range opts {\n\t\tapply(&dopts)\n\t}\n\n\th := dopts.Handler\n\tif h == nil {\n\t\th = handlers.DumpData\n\t}\n\n\treturn d.handle(ctx, sub, h)\n}", "func (s *GetWorkflowOutput) SetDigest(v string) *GetWorkflowOutput {\n\ts.Digest = &v\n\treturn s\n}", "func DigestUrl(baseURL string, digest types.Digest) string {\n\tbaseURL = strings.TrimRight(baseURL, \"/\")\n\treturn fmt.Sprintf(urlTemplate, baseURL, digest)\n}", "func d(digest types.Digest) []byte {\n\tif len(digest) != 2*md5.Size {\n\t\tpanic(\"digest wrong length \" + string(digest))\n\t}\n\tb, err := hex.DecodeString(string(digest))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn b\n}", "func d(digest types.Digest) []byte {\n\tif len(digest) != 2*md5.Size {\n\t\tpanic(\"digest wrong length \" + string(digest))\n\t}\n\tb, err := hex.DecodeString(string(digest))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn b\n}", "func DigestInfo(h crypto.Hash, data []byte) []byte {\n\t// Create a copy of the prefix, so that Sum can append to it directly.\n\t// Allocate space for both the prefix and the hash.\n\tp := hp[h]\n\tprefix := make([]byte, len(p), len(p)+h.Size())\n\tcopy(prefix, p)\n\n\thash := h.New()\n\thash.Write(data) // nolint: errcheck, gas, hash.Write never returns an error.\n\treturn hash.Sum(prefix)\n}", "func (t TDigest) AsBytes() ([]byte, error) {\n\tbuffer := new(bytes.Buffer)\n\n\terr := binary.Write(buffer, endianess, smallEncoding)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = binary.Write(buffer, endianess, t.compression)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = binary.Write(buffer, endianess, int32(t.summary.Len()))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar x float64\n\tt.summary.ForEach(func(mean float64, count uint32) bool {\n\t\tdelta := mean - x\n\t\tx = mean\n\t\terr = binary.Write(buffer, endianess, float32(delta))\n\n\t\treturn err == nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt.summary.ForEach(func(mean float64, count uint32) bool {\n\t\terr = encodeUint(buffer, count)\n\t\treturn err == nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buffer.Bytes(), nil\n}", "func (v Var) Hash() int {\n\th := xxhash.ChecksumString64S(string(v), hashSeed0)\n\treturn int(h)\n}", "func Hash(k0, k1 uint64, p []byte) uint64 {\n\tvar d digest\n\td.size = Size\n\td.k0 = k0\n\td.k1 = k1\n\td.Reset()\n\td.Write(p)\n\treturn d.Sum64()\n}", "func (w *DiskImage) Digest() digest.Digest {\n\treturn w.digester.Digest()\n}", "func hashType(t types.Type) int {\n\tmu.Lock()\n\th := int(hasher.Hash(t))\n\tmu.Unlock()\n\treturn h\n}", "func (md *Metadata) Digest() metadata.Digest {\n\tsshKeys := make(map[string][]ssh.Key)\n\n\tfor usr, rawKeys := range md.PublicKeys {\n\t\tkeys := strings.Split(rawKeys, \"\\n\")\n\n\t\tfor _, key := range keys {\n\t\t\tif key != \"\" {\n\t\t\t\tsshKeys[usr] = append(sshKeys[usr], ssh.Key(key))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn metadata.Digest{\n\t\tHostname: md.Hostname,\n\t\tSSHKeys: sshKeys,\n\t}\n}", "func hash(i interface{}) string {\n\n var s []byte\n\n switch i.(type) {\n case []byte:\n s = reflect.ValueOf(i).Bytes()\n\n case string:\n s = []byte(reflect.ValueOf(i).String())\n\n default:\n panic(\"Cannot use interface type given in hash\")\n }\n\n h := md5.New()\n\n h.Write(s)\n\n b := h.Sum(nil)\n\n return hex.EncodeToString(b)\n}", "func (o *KubernetesAddonDefinitionAllOf) SetDigest(v string) {\n\to.Digest = &v\n}", "func (es *externalSigner) NewDigest(sig *model.PdfSignature) (model.Hasher, error) {\n\treturn bytes.NewBuffer(nil), nil\n}", "func computeHash(nstObj megav1.NamespaceTemplate) uint64 {\n\thash, err := hashstructure.Hash(nstObj, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Printf(\"computeHash: %d\\n\", hash)\n\treturn hash\n}", "func (f *Filter) Hash(extraBytes []byte) (string, error) {\n\th := sha1.New()\n\n\t// copy by value to ignore ETag without affecting f\n\tf2 := *f\n\tf2.ETag = \"\"\n\n\tfilterBytes, err := bson.Marshal(f2)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif _, err := h.Write(append(filterBytes, extraBytes...)); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%x\", h.Sum(nil)), nil\n}", "func Hasher(value string) string {\n\th := fnv.New32a()\n\t_, _ = h.Write([]byte(value))\n\treturn fmt.Sprintf(\"%v\", h.Sum32())\n}", "func F(v *big.Int, gamma []byte) []byte {\n\tvBytes := v.Bytes()\n\thmac_ins := hmac.New(sha256.New, gamma)\n\thmac_ins.Write(vBytes[:])\n\thashed := hmac_ins.Sum(nil)\n\treturn hashed\n}", "func DigestFile(filename string) (string, error) {\n\tb, err := DigestFileBytes(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hex.EncodeToString(b), nil\n}", "func (me *XsdGoPkgHasElem_DigestValue) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElem_DigestValue; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func hash(value string) uint32 {\n\th := fnv.New32a()\n\th.Write([]byte(value))\n\n\treturn h.Sum32()\n}", "func (t *Target) hash() uint64 {\n\th := fnv.New64a()\n\n\t//nolint: errcheck\n\th.Write([]byte(fmt.Sprintf(\"%016d\", t.labels.Hash())))\n\t//nolint: errcheck\n\th.Write([]byte(t.URL().String()))\n\n\treturn h.Sum64()\n}", "func HashTo(v Value, p []byte) {\n\tquad.HashTo(v, p)\n}", "func (o ConnectedRegistryNotificationOutput) Digest() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ConnectedRegistryNotification) *string { return v.Digest }).(pulumi.StringPtrOutput)\n}", "func (s *GenericStorage) Checksum(gvk schema.GroupVersionKind, uid runtime.UID) (string, error) {\n\treturn s.raw.Checksum(KeyForUID(gvk, uid))\n}", "func Hashit(tox string) string {\n h:= sha256.New()\n h.Write([]byte(tox))\n bs := h.Sum([]byte{})\n str := base64.StdEncoding.EncodeToString(bs)\n return str\n}", "func MustNewDigest(instanceName string, digestFunctionEnum remoteexecution.DigestFunction_Value, hash string, sizeBytes int64) Digest {\n\tdigestFunction := MustNewFunction(instanceName, digestFunctionEnum)\n\td, err := digestFunction.NewDigest(hash, sizeBytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn d\n}", "func (_BondedECDSAKeep *BondedECDSAKeepSession) Digest() ([32]byte, error) {\n\treturn _BondedECDSAKeep.Contract.Digest(&_BondedECDSAKeep.CallOpts)\n}", "func JavaDigest(h hash.Hash) string {\n\thash := h.Sum(nil)\n \n\t// Check for negative hashes\n\tnegative := (hash[0] & 0x80) == 0x80\n\tif negative {\n\t\thash = twosComplement(hash)\n\t}\n \n\t// Trim away zeroes\n\tres := strings.TrimLeft(fmt.Sprintf(\"%x\", hash), \"0\")\n\tif negative {\n\t\tres = \"-\" + res\n\t}\n \n\treturn res\n}", "func (o *ImageImportManifest) SetDigest(v string) {\n\to.Digest = &v\n}", "func Hash(seed maphash.Seed, k Key) uint64 {\n\tvar buf [8]byte\n\tswitch v := k.(type) {\n\tcase mapKey:\n\t\treturn hashMapKey(seed, v)\n\tcase interfaceKey:\n\t\ts := v.Hash()\n\t\t// Mix up the hash to ensure it covers 64-bits\n\t\tbinary.LittleEndian.PutUint64(buf[:8], uint64(s))\n\t\treturn hashBytes(seed, buf[:8])\n\tcase strKey:\n\t\treturn hashString(seed, string(v))\n\tcase bytesKey:\n\t\treturn hashBytes(seed, []byte(v))\n\tcase int8Key:\n\t\tbuf[0] = byte(v)\n\t\treturn hashBytes(seed, buf[:1])\n\tcase int16Key:\n\t\tbinary.LittleEndian.PutUint16(buf[:2], uint16(v))\n\t\treturn hashBytes(seed, buf[:2])\n\tcase int32Key:\n\t\tbinary.LittleEndian.PutUint32(buf[:4], uint32(v))\n\t\treturn hashBytes(seed, buf[:4])\n\tcase int64Key:\n\t\tbinary.LittleEndian.PutUint64(buf[:8], uint64(v))\n\t\treturn hashBytes(seed, buf[:8])\n\tcase uint8Key:\n\t\tbuf[0] = byte(v)\n\t\treturn hashBytes(seed, buf[:1])\n\tcase uint16Key:\n\t\tbinary.LittleEndian.PutUint16(buf[:2], uint16(v))\n\t\treturn hashBytes(seed, buf[:2])\n\tcase uint32Key:\n\t\tbinary.LittleEndian.PutUint32(buf[:4], uint32(v))\n\t\treturn hashBytes(seed, buf[:4])\n\tcase uint64Key:\n\t\tbinary.LittleEndian.PutUint64(buf[:8], uint64(v))\n\t\treturn hashBytes(seed, buf[:8])\n\tcase float32Key:\n\t\tbinary.LittleEndian.PutUint32(buf[:4], math.Float32bits(float32(v)))\n\t\treturn hashBytes(seed, buf[:4])\n\tcase float64Key:\n\t\tbinary.LittleEndian.PutUint64(buf[:8], math.Float64bits(float64(v)))\n\t\treturn hashBytes(seed, buf[:8])\n\tcase boolKey:\n\t\tif v {\n\t\t\tbuf[0] = 1\n\t\t}\n\t\treturn hashBytes(seed, buf[:1])\n\tcase sliceKey:\n\t\treturn hashSliceKey(seed, v)\n\tcase pointerKey:\n\t\treturn hashSliceKey(seed, v.sliceKey)\n\tcase pathKey:\n\t\treturn hashSliceKey(seed, v.sliceKey)\n\tcase nilKey:\n\t\treturn hashBytes(seed, nil)\n\tcase Hashable:\n\t\t// Mix up the hash to ensure it covers 64-bits\n\t\tbinary.LittleEndian.PutUint64(buf[:8], v.Hash())\n\t\treturn hashBytes(seed, buf[:8])\n\tdefault:\n\t\ts := _nilinterhash(v.Key())\n\t\tbinary.LittleEndian.PutUint64(buf[:8], uint64(s))\n\t\treturn hashBytes(seed, buf[:8])\n\t}\n}", "func (d *DigestEntry) Type() EntityType {\n\treturn EntityTypes.DIGEST\n}", "func Reflect_Value(v interface{}) *vector_tile.Tile_Value {\n\tvar tv *vector_tile.Tile_Value\n\t//fmt.Print(v)\n\tvv := reflect.ValueOf(v)\n\tkd := vv.Kind()\n\tif (reflect.Float64 == kd) || (reflect.Float32 == kd) {\n\t\t//fmt.Print(v, \"float\", k)\n\t\ttv = Make_Tv_Float(float64(vv.Float()))\n\t\t//hash = Hash_Tv(tv)\n\t} else if (reflect.Int == kd) || (reflect.Int8 == kd) || (reflect.Int16 == kd) || (reflect.Int32 == kd) || (reflect.Int64 == kd) || (reflect.Uint8 == kd) || (reflect.Uint16 == kd) || (reflect.Uint32 == kd) || (reflect.Uint64 == kd) {\n\t\t//fmt.Print(v, \"int\", k)\n\t\ttv = Make_Tv_Int(int(vv.Int()))\n\t\t//hash = Hash_Tv(tv)\n\t} else if reflect.String == kd {\n\t\t//fmt.Print(v, \"str\", k)\n\t\ttv = Make_Tv_String(string(vv.String()))\n\t\t//hash = Hash_Tv(tv)\n\n\t} else {\n\t\ttv := new(vector_tile.Tile_Value)\n\t\tt := \"\"\n\t\ttv.StringValue = &t\n\t}\n\treturn tv\n}", "func (d Digest) GetDigestFunction() Function {\n\tdigestFunction, _, _, sizeBytesEnd := d.unpack()\n\treturn Function{\n\t\tinstanceName: InstanceName{\n\t\t\tvalue: d.value[sizeBytesEnd+1:],\n\t\t},\n\t\tbareFunction: getBareFunction(digestFunction, 0),\n\t}\n}", "func (o *Object) Hash(t fs.HashType) (string, error) {\n\treturn \"\", fs.ErrHashUnsupported\n}", "func Hash(v []byte) string {\n\th := sha256.Sum256(v)\n\treturn hex.EncodeToString(h[:])\n}", "func (flavor *Flavor) getFlavorDigest() ([]byte, error) {\n\t// account for a differences in properties set at runtime\n\ttempFlavor := *flavor\n\ttempFlavor.Meta.ID = uuid.Nil\n\n\tflavorJSON, err := json.Marshal(tempFlavor)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"An error occurred attempting to convert the flavor to json\")\n\t}\n\n\tif flavorJSON == nil || len(flavorJSON) == 0 {\n\t\treturn nil, errors.New(\"The flavor json was not provided\")\n\t}\n\n\thashEntity := sha512.New384()\n\t_, err = hashEntity.Write(flavorJSON)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Error writing flavor hash\")\n\t}\n\treturn hashEntity.Sum(nil), nil\n}", "func (spec Spec) DeepHash() string {\n\thash := sha512.New512_224()\n\tspec.DefaultService.hash(hash)\n\tfor _, rule := range spec.Rules {\n\t\trule.hash(hash)\n\t}\n\tsvcs := make([]string, len(spec.AllServices))\n\ti := 0\n\tfor k := range spec.AllServices {\n\t\tsvcs[i] = k\n\t\ti++\n\t}\n\tsort.Strings(svcs)\n\tfor _, svc := range svcs {\n\t\thash.Write([]byte(svc))\n\t\tspec.AllServices[svc].hash(hash)\n\t}\n\tspec.ShardCluster.hash(hash)\n\thash.Write([]byte(spec.VCL))\n\tfor _, auth := range spec.Auths {\n\t\tauth.hash(hash)\n\t}\n\tfor _, acl := range spec.ACLs {\n\t\tacl.hash(hash)\n\t}\n\tfor _, rw := range spec.Rewrites {\n\t\trw.hash(hash)\n\t}\n\tfor _, reqDisp := range spec.Dispositions {\n\t\treqDisp.hash(hash)\n\t}\n\th := new(big.Int)\n\th.SetBytes(hash.Sum(nil))\n\treturn h.Text(62)\n}", "func SaltedHash(v string) (string, error) {\n\tbSalted := []byte(salted(v))\n\tbSum := md5.Sum(bSalted)\n\treturn fmt.Sprintf(\"%x\", bSum), nil\n}", "func NewDigests(appliedRuleset *rulesets.AppliedRulesetSummary, statuses []scanner.ScanStatus) ([]Digest, error) {\n\tds := make([]Digest, 0)\n\terrs := make([]string, 0, 0)\n\n\tfor i := range statuses {\n\t\ts := statuses[i]\n\n\t\tvar e *scans.Evaluation\n\t\tif appliedRuleset != nil && appliedRuleset.RuleEvaluationSummary != nil {\n\t\t\tfor i := range appliedRuleset.RuleEvaluationSummary.Ruleresults {\n\t\t\t\tif appliedRuleset.RuleEvaluationSummary.Ruleresults[i].ID == s.ID {\n\t\t\t\t\te = &appliedRuleset.RuleEvaluationSummary.Ruleresults[i]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\td, err := _newDigests(&s, e)\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Sprintf(\"failed to make digest(s) from scan: %v\", err.Error()))\n\t\t\tcontinue\n\t\t}\n\n\t\tds = append(ds, d...)\n\t}\n\n\tsort.Slice(ds, func(i, j int) bool { return ds[i].Index < ds[j].Index })\n\n\tif len(errs) > 0 {\n\t\treturn ds, fmt.Errorf(\"failed to make some digests: %v\", strings.Join(errs, \"; \"))\n\t}\n\n\treturn ds, nil\n}", "func Digest(done <-chan interface{}, f func(interface{}, ...interface{}) interface{}, in <-chan interface{}, params []interface{}) (outchan <-chan interface{}) {\n\tout := make(chan interface{})\n\tvar wg sync.WaitGroup\n\tconst numDigesters = 20\n\twg.Add(numDigesters)\n\tfor i := 0; i < numDigesters; i++ {\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\t// recover from panic if one occured. Set err to nil otherwise.\n\t\t\t\tif recover() != nil {\n\t\t\t\t\tfmt.Println(\"defer launch\")\n\t\t\t\t\twg.Done()\n\n\t\t\t\t}\n\t\t\t}()\n\t\t\tdigester(done, f, params, in, out)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func computeHash(w http.ResponseWriter, req *http.Request) {\n\tvalues := req.URL.Query()\n\tdata := values.Get(\"data\")\n\tif data == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"400 - data param not present\"))\n\t\treturn\n\t}\n\tsalt := values.Get(\"salt\")\n\tif salt == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"400 - salt param not present\"))\n\t\treturn\n\t}\n\th := sha256.Sum256([]byte(data+salt))\n\tencodedStr := hex.EncodeToString(h[:])\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(encodedStr))\n}", "func (n *NamedRepository) Digest() string {\n\treturn n.digest\n}", "func ComputeHash(template *v1.PodSpec) uint32 {\n\tpodTemplateSpecHasher := fnv.New32a()\n\thashutil.DeepHashObject(podTemplateSpecHasher, *template)\n\treturn podTemplateSpecHasher.Sum32()\n}", "func hash(obj interface{}) KHash {\n\tvar buffer bytes.Buffer\n\tencoder := json.NewEncoder(&buffer)\n\terr := encoder.Encode(obj)\n\tif err != nil {\n\t\tpanic(\"cannot encode object\")\n\t}\n\n\tdata := buffer.Bytes()\n\th := sha256.Sum256(data)\n\n\t// log.Printf(\"hashing %#v represented as %s with hash %X\", obj, data, h)\n\treturn h\n}", "func (_BondedECDSAKeep *BondedECDSAKeepCallerSession) Digest() ([32]byte, error) {\n\treturn _BondedECDSAKeep.Contract.Digest(&_BondedECDSAKeep.CallOpts)\n}", "func hash(values ...[]byte) ([]byte, error) {\n\th := swarm.NewHasher()\n\tfor _, v := range values {\n\t\t_, err := h.Write(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn h.Sum(nil), nil\n}", "func (c *Cluster) Hash(v interface{}) (int, error) {\n\th, err := hashstructure.Hash(v, nil)\n\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\t// get cluster index\n\tci := int(h % uint64(c.metadata.NumShards))\n\n\treturn ci, nil\n}", "func (bcc *Bcc) Digest(b ...byte) {\n\tfor _, i := range b {\n\t\t*bcc = (*bcc) ^ Bcc(i)\n\t}\n}", "func (k CmdHasher) Hash(r *http.Request) string {\n\n\tencodedReq, err := json.Marshal(&request{r})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tstdin := strings.NewReader(string(encodedReq))\n\n\tvar stderr bytes.Buffer\n\tcmd := k.NewCmd(k.Command, &stderr, stdin)\n\tout, err := k.Run(cmd)\n\n\tif err != nil {\n\t\tlog.Printf(\"%v:\\nSTDOUT:\\n%v\\n\\nSTDERR:\\n%v\", err, string(out), stderr.String())\n\t\tpanic(err)\n\t}\n\n\thasher := md5.New()\n\t// This method always succeeds\n\t_, _ = hasher.Write(out)\n\treturn hex.EncodeToString(hasher.Sum(nil))\n}", "func computeMD5Hash(toBeEncoded int64) string {\n\tvar computedString string\n\n\tbyteArray := make([]byte, 1024)\n\tn := binary.PutVarint(byteArray, toBeEncoded)\n\n\tcomputedMD5 := md5.Sum(byteArray[0:n])\n\n\tcomputedString = fmt.Sprintf(\"%x\", computedMD5)\n\n\treturn computedString\n\n}", "func Hash(obj interface{}) (string, error) {\n\tb, err := GetBytes(obj)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\thasher := sha256.New()\n\thasher.Write(b)\n\n\treturn base64.URLEncoding.EncodeToString(hasher.Sum(nil)), nil\n}", "func (d Digest64) Sum(b []byte) []byte {\n\th1 := d.Sum64()\n\treturn append(b,\n\t\tbyte(h1>>56), byte(h1>>48), byte(h1>>40), byte(h1>>32),\n\t\tbyte(h1>>24), byte(h1>>16), byte(h1>>8), byte(h1))\n}", "func DigestForBlock(number uint64) (blockdigest.Digest, error) {\n\tglobalData.Lock()\n\tdefer globalData.Unlock()\n\n\t// valid block number\n\tif number <= genesis.BlockNumber {\n\t\tif mode.IsTesting() {\n\t\t\treturn genesis.TestGenesisDigest, nil\n\t\t}\n\t\treturn genesis.LiveGenesisDigest, nil\n\t}\n\n\t// fetch block and compute digest\n\tn := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(n, number)\n\tpacked := storage.Pool.Blocks.Get(n) // ***** FIX THIS: possible optimisation is to store the block hashes in a separate index\n\tif nil == packed {\n\t\treturn blockdigest.Digest{}, fault.ErrBlockNotFound\n\t}\n\n\t_, digest, _, err := blockrecord.ExtractHeader(packed, 0)\n\n\treturn digest, err\n}", "func New(hash string, size int64) (*repb.Digest, error) {\n\tdigest := &repb.Digest{Hash: hash, SizeBytes: size}\n\tif err := Validate(digest); err != nil {\n\t\treturn nil, err\n\t}\n\treturn digest, nil\n}", "func (me TDigestValueType) ToXsdtBase64Binary() xsdt.Base64Binary { return xsdt.Base64Binary(me) }", "func HASH(s string, salt string) string {\n\treturn MD5(SHA256(s, salt), salt)\n}", "func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {\n\tif o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {\n\t\treturn \"\", errNotSupportedInSharedMode\n\t}\n\tif t != DbHashType {\n\t\treturn \"\", hash.ErrUnsupported\n\t}\n\terr := o.readMetaData(ctx)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to read hash from metadata: %w\", err)\n\t}\n\treturn o.hash, nil\n}", "func (o *GetIconParams) SetDigest(digest string) {\n\to.Digest = digest\n}", "func hash(s string) string {\n\treturn fmt.Sprintf(\"%x\", md5.Sum([]byte(s)))\n}", "func (d *digest) Sum64() uint64 { return d.crc }", "func New() hash.Digest {\n\tref := &digest{}\n\tref.Reset()\n\treturn ref\n}" ]
[ "0.71098894", "0.64068437", "0.57950824", "0.573337", "0.5672699", "0.55973285", "0.55431145", "0.5513988", "0.5474869", "0.5373887", "0.5371913", "0.53038967", "0.52510256", "0.5207102", "0.52021974", "0.5186848", "0.5170909", "0.513816", "0.50961375", "0.50706947", "0.5042459", "0.50414616", "0.50321144", "0.5014351", "0.4976219", "0.49752614", "0.49664766", "0.496537", "0.4962065", "0.49589628", "0.4952214", "0.49461645", "0.49292457", "0.4923784", "0.49149436", "0.49121195", "0.48923528", "0.48863524", "0.48505706", "0.48500046", "0.48488572", "0.48488572", "0.48415133", "0.48320433", "0.48264122", "0.4825901", "0.4794343", "0.4788074", "0.47612795", "0.47610405", "0.47588974", "0.47492805", "0.47462016", "0.4742307", "0.47416636", "0.4738043", "0.47223198", "0.47171733", "0.47072133", "0.46990147", "0.46965304", "0.4695413", "0.4683856", "0.46753213", "0.46730348", "0.46626124", "0.46582544", "0.46338922", "0.46335796", "0.46278828", "0.4607164", "0.46045032", "0.46024668", "0.4588667", "0.4578069", "0.4573446", "0.45726448", "0.45671678", "0.45524028", "0.4514985", "0.450321", "0.45010847", "0.4498602", "0.4495025", "0.44934738", "0.4493426", "0.44893667", "0.44882768", "0.4485552", "0.4480936", "0.44773096", "0.44769394", "0.44733056", "0.44717368", "0.44652137", "0.44605002", "0.4460424", "0.44489494", "0.4443588", "0.44350636" ]
0.7762607
0
WriteDigest writes digest material for value v (given type t) into the writer w.
func WriteDigest(w io.Writer, v T, t *types.T) { if d, ok := v.(digester); ok { digest.WriteDigest(w, d.Digest()) return } w.Write([]byte{t.Kind.ID()}) switch t.Kind { case types.ErrorKind, types.BottomKind, types.RefKind: panic("illegal type") case types.IntKind: vi := v.(*big.Int) // Bytes returns the normalized big-endian (i.e., free of a zero // prefix) representation of the absolute value of the integer. p := vi.Bytes() if len(p) == 0 { // This is the representation of "0" return } if p[0] == 0 { panic("big.Int byte representation is not normalized") } if vi.Sign() < 0 { w.Write([]byte{0}) } w.Write(p) case types.FloatKind: w.Write([]byte(v.(*big.Float).Text('e', 10))) case types.StringKind: io.WriteString(w, v.(string)) case types.BoolKind: if v.(bool) { w.Write(trueByte) } else { w.Write(falseByte) } case types.FileKind: digest.WriteDigest(w, v.(reflow.File).Digest()) case types.DirKind: dir := v.(Dir) for scan := dir.Scan(); scan.Scan(); { io.WriteString(w, scan.Path()) digest.WriteDigest(w, scan.File().Digest()) } // Filesets are digesters, so they don't need to be handled here. case types.UnitKind: case types.ListKind: writeLength(w, len(v.(List))) for _, e := range v.(List) { WriteDigest(w, e, t.Elem) } case types.MapKind: m := v.(*Map) writeLength(w, m.Len()) type kd struct { k T d digest.Digest } keys := make([]kd, 0, m.Len()) for _, entryp := range m.tab { for entry := *entryp; entry != nil; entry = entry.Next { keys = append(keys, kd{entry.Key, Digest(entry.Key, t.Index)}) } } // Sort the map so that it produces a consistent digest. We sort // its keys by their digest because the values may not yet be // evaluated. sort.Slice(keys, func(i, j int) bool { return keys[i].d.Less(keys[j].d) }) for _, k := range keys { WriteDigest(w, k.k, t.Index) WriteDigest(w, m.Lookup(k.d, k.k), t.Elem) } case types.TupleKind: writeLength(w, len(t.Fields)) tuple := v.(Tuple) for i, f := range t.Fields { WriteDigest(w, tuple[i], f.T) } case types.StructKind: writeLength(w, len(t.Fields)) s := v.(Struct) keys := make([]string, len(t.Fields)) for i, f := range t.Fields { keys[i] = f.Name } sort.Strings(keys) fm := t.FieldMap() for _, k := range keys { WriteDigest(w, s[k], fm[k]) } case types.ModuleKind: writeLength(w, len(t.Fields)) s := v.(Module) keys := make([]string, len(t.Fields)) for i, f := range t.Fields { keys[i] = f.Name } sort.Strings(keys) fm := t.FieldMap() for _, k := range keys { WriteDigest(w, s[k], fm[k]) } case types.SumKind: variant := v.(*Variant) io.WriteString(w, variant.Tag) WriteDigest(w, variant.Elem, t.VariantMap()[variant.Tag]) case types.FuncKind: digest.WriteDigest(w, v.(Func).Digest()) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Digest(v T, t *types.T) digest.Digest {\n\tw := Digester.NewWriter()\n\tWriteDigest(w, v, t)\n\treturn w.Digest()\n}", "func (q *Qsign) Digest(v interface{}) ([]byte, error) {\n\tbuf := new(bytes.Buffer)\n\n\tif q.prefixGenerator != nil {\n\t\tif _, err := buf.WriteString(q.prefixGenerator()); err != nil {\n\t\t\treturn buf.Bytes(), err\n\t\t}\n\t}\n\n\tvs := getStructValues(v)\n\n\tpairs := []string{}\n\tfor _, f := range vs {\n\t\tif !q.filter(f.name, f.value) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar buf strings.Builder\n\t\tbuf.WriteString(f.name)\n\t\tbuf.WriteString(q.connector)\n\t\tbuf.WriteString(f.value)\n\n\t\tpairs = append(pairs, buf.String())\n\t}\n\tconnected := strings.Join(pairs, q.delimiter)\n\tbuf.WriteString(connected)\n\n\tif q.suffixGenerator != nil {\n\t\tif _, err := buf.WriteString(q.suffixGenerator()); err != nil {\n\t\t\treturn buf.Bytes(), err\n\t\t}\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (o VirtualDatabaseStatusOutput) Digest() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v VirtualDatabaseStatus) *string { return v.Digest }).(pulumi.StringPtrOutput)\n}", "func (d *digest) Write(p []byte) (n int, err error) {\r\n\td.crc = crc64.Update(d.crc, d.tab, p)\r\n\treturn len(p), nil\r\n}", "func (o VirtualDatabaseStatusPtrOutput) Digest() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *VirtualDatabaseStatus) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Digest\n\t}).(pulumi.StringPtrOutput)\n}", "func (oc *OAuthConsumer) digest(key string, m string) string {\n\th := hmac.NewSHA1([]byte(key))\n\th.Write([]byte(m))\n\treturn base64encode(h.Sum())\n\n/*\ts := bytes.TrimSpace(h.Sum())\n\td := make([]byte, base64.StdEncoding.EncodedLen(len(s)))\n\tbase64.StdEncoding.Encode(d, s)\n\tds := strings.TrimSpace(bytes.NewBuffer(d).String())\n*/\n//\treturn ds\n\n}", "func (t *taprootSigHashOptions) writeDigestExtensions(w io.Writer) error {\n\tswitch t.extFlag {\n\t// The base extension, used for tapscript keypath spends doesn't modify\n\t// the digest at all.\n\tcase baseSigHashExtFlag:\n\t\treturn nil\n\n\t// The tapscript base leaf version extension adds the leaf hash, key\n\t// version, and code separator position to the final digest.\n\tcase tapscriptSighashExtFlag:\n\t\tif _, err := w.Write(t.tapLeafHash); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := w.Write([]byte{t.keyVersion}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr := binary.Write(w, binary.LittleEndian, t.codeSepPos)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func NVWriteValueAuth(rw io.ReadWriter, index, offset uint32, data []byte, auth []byte) error {\n\tif auth == nil {\n\t\treturn fmt.Errorf(\"no auth value given but mandatory\")\n\t}\n\tsharedSecret, osapr, err := newOSAPSession(rw, etOwner, khOwner, auth[:])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to start new auth session: %v\", err)\n\t}\n\tdefer osapr.Close(rw)\n\tdefer zeroBytes(sharedSecret[:])\n\tauthIn := []interface{}{ordNVWriteValueAuth, index, offset, len(data), data}\n\tca, err := newCommandAuth(osapr.AuthHandle, osapr.NonceEven, nil, sharedSecret[:], authIn)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to construct auth fields: %v\", err)\n\t}\n\tdata, ra, ret, err := nvWriteValue(rw, index, offset, uint32(len(data)), data, ca)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to write to NVRAM: %v\", err)\n\t}\n\traIn := []interface{}{ret, ordNVWriteValueAuth, tpmutil.U32Bytes(data)}\n\tif err := ra.verify(ca.NonceOdd, sharedSecret[:], raIn); err != nil {\n\t\treturn fmt.Errorf(\"failed to verify authenticity of response: %v\", err)\n\t}\n\treturn nil\n}", "func (n *Node) Digest() ([]byte, error) {\n\t// HMAC(Nonce,Inputs[*]|(Cryptex||Secret||Marker))\n\thash := hmac.New(sha256.New, n.Nonce)\n\n\tfor _, input := range n.Inputs {\n\t\tif _, err := hash.Write(input); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tvar (\n\t\tdata []byte\n\t\terr error\n\t)\n\n\tswitch {\n\tcase n.cryptex != nil:\n\t\tdata, err = n.cryptex.Marshal()\n\tcase n.secret != nil:\n\t\tdata, err = n.secret.Marshal()\n\tcase n.Marker != nil:\n\t\tdata, err = n.Marker.Marshal()\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif _, err := hash.Write(data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn hash.Sum(nil), nil\n}", "func (o *KubernetesAddonDefinitionAllOf) SetDigest(v string) {\n\to.Digest = &v\n}", "func (me TDigestValueType) String() string { return xsdt.Base64Binary(me).String() }", "func EncodePayloadDigest(data []byte) []byte {\n\tsum := md5.Sum(data)\n\treturn sum[:]\n}", "func WriteUint64(w io.Writer, v uint64) error {\n\tvar data [9]byte\n\tvar size int\n\tswitch {\n\tcase v <= 0x80:\n\t\tdata[0] = byte(v)\n\t\tsize = 1\n\tcase v < (1 << 8):\n\t\tdata[0] = 0x80 + 1\n\t\tdata[1] = byte(v)\n\t\tsize = 2\n\tcase v < (1 << 16):\n\t\tdata[0] = 0x80 + 2\n\t\tdata[1] = byte(v >> 8)\n\t\tdata[2] = byte(v)\n\t\tsize = 3\n\tcase v < (1 << 24):\n\t\tdata[0] = 0x80 + 3\n\t\tdata[1] = byte(v >> 16)\n\t\tdata[2] = byte(v >> 8)\n\t\tdata[3] = byte(v)\n\t\tsize = 4\n\tcase v < (1 << 32):\n\t\tdata[0] = 0x80 + 4\n\t\tdata[1] = byte(v >> 24)\n\t\tdata[2] = byte(v >> 16)\n\t\tdata[3] = byte(v >> 8)\n\t\tdata[4] = byte(v)\n\t\tsize = 5\n\tcase v < (1 << 40):\n\t\tdata[0] = 0x80 + 5\n\t\tdata[1] = byte(v >> 32)\n\t\tdata[2] = byte(v >> 24)\n\t\tdata[3] = byte(v >> 16)\n\t\tdata[4] = byte(v >> 8)\n\t\tdata[5] = byte(v)\n\t\tsize = 6\n\tcase v < (1 << 48):\n\t\tdata[0] = 0x80 + 6\n\t\tdata[1] = byte(v >> 40)\n\t\tdata[2] = byte(v >> 32)\n\t\tdata[3] = byte(v >> 24)\n\t\tdata[4] = byte(v >> 16)\n\t\tdata[5] = byte(v >> 8)\n\t\tdata[6] = byte(v)\n\t\tsize = 7\n\tcase v < (1 << 56):\n\t\tdata[0] = 0x80 + 7\n\t\tdata[1] = byte(v >> 48)\n\t\tdata[2] = byte(v >> 40)\n\t\tdata[3] = byte(v >> 32)\n\t\tdata[4] = byte(v >> 24)\n\t\tdata[5] = byte(v >> 16)\n\t\tdata[6] = byte(v >> 8)\n\t\tdata[7] = byte(v)\n\t\tsize = 8\n\tdefault:\n\t\tdata[0] = 0x80 + 8\n\t\tdata[1] = byte(v >> 56)\n\t\tdata[2] = byte(v >> 48)\n\t\tdata[3] = byte(v >> 40)\n\t\tdata[4] = byte(v >> 32)\n\t\tdata[5] = byte(v >> 24)\n\t\tdata[6] = byte(v >> 16)\n\t\tdata[7] = byte(v >> 8)\n\t\tdata[8] = byte(v)\n\t\tsize = 9\n\t}\n\t_, err := w.Write(data[0:size])\n\treturn err\n}", "func (me *XsdGoPkgHasElem_DigestValue) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElem_DigestValue; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (o *GetIconParams) SetDigest(digest string) {\n\to.Digest = digest\n}", "func (o *ImageImportManifest) SetDigest(v string) {\n\to.Digest = &v\n}", "func (s *GetWorkflowOutput) SetDigest(v string) *GetWorkflowOutput {\n\ts.Digest = &v\n\treturn s\n}", "func (t *Tube) writeSync(key string, rev Rev, value interface{}) {\n\tvar wg sync.WaitGroup\n\tfor _, downAvatar := range t.folk.Opened() {\n\t\tydown := YTube{\n\t\t\ttissue.FolkAvatar(downAvatar),\n\t\t}\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tydown.Write(key, rev, value)\n\t\t}()\n\t}\n\twg.Wait()\n}", "func (vd *tValueDiffer) writeTypeValue(idx int, v reflect.Value, ht, hv bool) {\n\tv = vd.writeTypeBeforeValue(idx, v, ht)\n\tvd.writeValueAfterType(idx, v, hv)\n}", "func (s *GetRunOutput) SetDigest(v string) *GetRunOutput {\n\ts.Digest = &v\n\treturn s\n}", "func (s *WorkflowListItem) SetDigest(v string) *WorkflowListItem {\n\ts.Digest = &v\n\treturn s\n}", "func (o ConnectedRegistryNotificationOutput) Digest() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ConnectedRegistryNotification) *string { return v.Digest }).(pulumi.StringPtrOutput)\n}", "func (t TDigest) AsBytes() ([]byte, error) {\n\tbuffer := new(bytes.Buffer)\n\n\terr := binary.Write(buffer, endianess, smallEncoding)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = binary.Write(buffer, endianess, t.compression)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = binary.Write(buffer, endianess, int32(t.summary.Len()))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar x float64\n\tt.summary.ForEach(func(mean float64, count uint32) bool {\n\t\tdelta := mean - x\n\t\tx = mean\n\t\terr = binary.Write(buffer, endianess, float32(delta))\n\n\t\treturn err == nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt.summary.ForEach(func(mean float64, count uint32) bool {\n\t\terr = encodeUint(buffer, count)\n\t\treturn err == nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buffer.Bytes(), nil\n}", "func digestToBytes(t *testing.T, digest types.Digest) []byte {\n\tbytes, err := sql.DigestToBytes(digest)\n\trequire.NoError(t, err)\n\treturn bytes\n}", "func (o GetReposRepoTagOutput) Digest() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetReposRepoTag) string { return v.Digest }).(pulumi.StringOutput)\n}", "func Write(w io.Writer, v interface{}, name string) (err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tif er, ok := e.(error); ok {\n\t\t\t\terr = er\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"format/nbt: %s\", e)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}()\n\tval := reflect.ValueOf(v)\n\tif val.Kind() == reflect.Ptr {\n\t\tval = val.Elem()\n\t}\n\tfs := fields(val.Type())\n\ten := &msgEncoder{}\n\n\t//Write name\n\tbs := en.b[:3]\n\tbs[0] = 10\n\tbinary.BigEndian.PutUint16(bs[1:], uint16(len(name)))\n\t_, err = w.Write(bs)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write([]byte(name))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn write(w, en, fs, val)\n}", "func (blob *Blob) SummingWriteContentsTo(w io.Writer, v interface{}) (sum *Sum,\n\tn int64, err error) {\n\tvar (\n\t\tb [BlobRandomSz]byte\n\t\tx N\n\t)\n\th := sha512.New()\n\tm := io.MultiWriter(w, h)\n\tdefer func() {\n\t\tif err == nil {\n\t\t\tsum = new(Sum)\n\t\t\tcopy(sum[:], h.Sum([]byte{}))\n\t\t} else {\n\t\t\tDiag.Println(err)\n\t\t}\n\t\th.Reset()\n\t\th = nil\n\t\tn = int64(x)\n\t}()\n\tif err = x.Plus(Latest.WriteTo(m)); err != nil {\n\t\treturn\n\t}\n\tif err = x.Plus(BlobId.Version(Latest).WriteTo(m)); err != nil {\n\t\treturn\n\t}\n\tif err = x.Plus(m.Write([]byte(BlobMagic))); err != nil {\n\t\treturn\n\t}\n\trand.Reader.Read(b[:BlobRandomSz])\n\tif err = x.Plus(m.Write(b[:BlobRandomSz])); err != nil {\n\t\treturn\n\t}\n\tif err = x.Plus(m.Write(blob.Owner[:])); err != nil {\n\t\treturn\n\t}\n\tif err = x.Plus(m.Write(blob.Author[:])); err != nil {\n\t\treturn\n\t}\n\tif err = x.Plus((NBOWriter{m}).WriteNBO(blob.Time)); err != nil {\n\t\treturn\n\t}\n\tb[0] = byte(len(blob.Name))\n\tif err = x.Plus(m.Write(b[:1])); err != nil {\n\t\treturn\n\t}\n\tif b[0] > 0 {\n\t\tif err = x.Plus(m.Write([]byte(blob.Name[:]))); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tswitch t := v.(type) {\n\tcase Mark:\n\t\terr = x.Plus(t.WriteTo(m))\n\tcase Sums:\n\t\terr = x.Plus(t.WriteTo(m))\n\tcase *bytes.Buffer:\n\t\terr = x.Plus(t.WriteTo(m))\n\tcase []byte:\n\t\terr = x.Plus(m.Write(t))\n\tcase string:\n\t\terr = x.Plus(m.Write([]byte(t)))\n\tcase io.Reader:\n\t\terr = x.Plus(io.Copy(m, t))\n\t}\n\treturn\n}", "func (vd *tValueDiffer) writeType(idx int, t reflect.Type, hl bool) {\n\tb := vd.bufi(idx)\n\tif t.PkgPath() == \"\" {\n\t\tswitch t.Kind() {\n\t\tcase reflect.Ptr:\n\t\t\tb.Write(hl, \"*\")\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Func:\n\t\t\tvd.writeTypeFunc(idx, t, hl)\n\t\tcase reflect.Chan:\n\t\t\tvd.writeTypeHeadChan(idx, t, hl, false)\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Array:\n\t\t\tb.Write(hl, \"[\", t.Len(), \"]\")\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Slice:\n\t\t\tb.Write(hl, \"[]\")\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Map:\n\t\t\tb.Write(hl, \"map[\")\n\t\t\tvd.writeType(idx, t.Key(), hl)\n\t\t\tb.Write(hl, \"]\")\n\t\t\tvd.writeType(idx, t.Elem(), hl)\n\t\tcase reflect.Struct: // must be unnamed\n\t\t\tb.Write(hl, \"struct\")\n\t\tdefault:\n\t\t\tb.Write(hl, t)\n\t\t}\n\t} else {\n\t\tb.Write(hl, t)\n\t}\n}", "func writeVDT(file *os.File, index int64, vdt *virtualDirectoryTree) {\n\tfile.Seek(index, 0)\n\t//Empezamos el proceso de guardar en binario la data en memoria del struct\n\tvar binaryDisc bytes.Buffer\n\tbinary.Write(&binaryDisc, binary.BigEndian, vdt)\n\twriteNextBytes(file, binaryDisc.Bytes())\n}", "func (s *Storage) WriteDiff(hash common.Hash, diff *big.Int) {\n\ts.set(DIFFICULTY, hash.Bytes(), diff.Bytes())\n}", "func (b SignDetail) Digest() (common.Hash, error) {\n\tvar hash common.Hash\n\tvar signFormatData apitypes.TypedData\n\tif err := json.Unmarshal([]byte(b.SignSchema.Schema), &signFormatData); err != nil {\n\t\treturn hash, err\n\t}\n\tparams, err := b.GetContractParams()\n\tif err != nil {\n\t\treturn hash, err\n\t}\n\tdata, err := buildTypedData(signFormatData, params)\n\n\tif err != nil {\n\t\treturn hash, err\n\t}\n\thash, err = crypto2.Keccak256HashEIP712(data)\n\treturn hash, err\n}", "func (i *AppxDigest) digestFile(f *zipslicer.File, doPageHash bool) error {\n\tvar peWriters []io.WriteCloser\n\tvar peResults []<-chan peDigestResult\n\tvar sink io.Writer\n\tif strings.HasSuffix(f.Name, \".exe\") || strings.HasSuffix(f.Name, \".dll\") {\n\t\t// DigestPE wants a Reader so make a pipe for each one and sink data into the pipes\n\t\tpeWriters, peResults = setupPeDigests(f.Name, i.Hash, doPageHash)\n\t\tdefer func() {\n\t\t\tfor _, w := range peWriters {\n\t\t\t\tw.Close()\n\t\t\t}\n\t\t}()\n\t\tmw := make([]io.Writer, len(peWriters))\n\t\tfor i, w := range peWriters {\n\t\t\tmw[i] = w\n\t\t}\n\t\tsink = io.MultiWriter(mw...)\n\t}\n\tif err := i.blockMap.AddFile(f, i.axpc, sink); err != nil {\n\t\treturn err\n\t}\n\tif peWriters != nil {\n\t\tfor _, w := range peWriters {\n\t\t\tw.Close()\n\t\t}\n\t\tfor _, ch := range peResults {\n\t\t\tresult := <-ch\n\t\t\tif result.err != nil {\n\t\t\t\treturn result.err\n\t\t\t}\n\t\t\ti.peDigests = append(i.peDigests, result.digest)\n\t\t}\n\t}\n\treturn nil\n}", "func (vlog *valueLog) write(reqs []*request) error {\n\tvlog.filesLock.RLock()\n\tcurlf := vlog.filesMap[vlog.maxFid]\n\tvlog.filesLock.RUnlock()\n\n\ttoDisk := func() error {\n\t\tif vlog.buf.Len() == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tvlog.elog.Printf(\"Flushing %d blocks of total size: %d\", len(reqs), vlog.buf.Len())\n\t\tn, err := curlf.fd.Write(vlog.buf.Bytes())\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Unable to write to value log file: %q\", curlf.path)\n\t\t}\n\t\ty.NumWrites.Add(1)\n\t\ty.NumBytesWritten.Add(int64(n))\n\t\tvlog.elog.Printf(\"Done\")\n\t\tcurlf.offset += uint32(n)\n\t\tvlog.buf.Reset()\n\n\t\tif curlf.offset > uint32(vlog.opt.ValueLogFileSize) {\n\t\t\tvar err error\n\t\t\tif err = curlf.doneWriting(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tnewid := atomic.AddUint32(&vlog.maxFid, 1)\n\t\t\ty.AssertTruef(newid < 1<<16, \"newid will overflow uint16: %v\", newid)\n\t\t\tnewlf, err := vlog.createVlogFile(newid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcurlf = newlf\n\t\t}\n\t\treturn nil\n\t}\n\n\tfor i := range reqs {\n\t\tb := reqs[i]\n\t\tb.Ptrs = b.Ptrs[:0]\n\t\tfor j := range b.Entries {\n\t\t\te := b.Entries[j]\n\t\t\tvar p valuePointer\n\n\t\t\tif !vlog.opt.SyncWrites && len(e.Value) < vlog.opt.ValueThreshold {\n\t\t\t\t// No need to write to value log.\n\t\t\t\tb.Ptrs = append(b.Ptrs, p)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tp.Fid = curlf.fid\n\t\t\tp.Offset = curlf.offset + uint32(vlog.buf.Len()) // Use the offset including buffer length so far.\n\t\t\tplen, err := encodeEntry(e, &vlog.buf) // Now encode the entry into buffer.\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp.Len = uint32(plen)\n\t\t\tb.Ptrs = append(b.Ptrs, p)\n\n\t\t\tif p.Offset > uint32(vlog.opt.ValueLogFileSize) {\n\t\t\t\tif err := toDisk(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn toDisk()\n\n\t// Acquire mutex locks around this manipulation, so that the reads don't try to use\n\t// an invalid file descriptor.\n}", "func (restorer *APTRestorer) writeManifest(manifestType, algorithm string, restoreState *models.RestoreState) {\n\tif algorithm != constants.AlgMd5 && algorithm != constants.AlgSha256 {\n\t\trestorer.Context.MessageLog.Fatalf(\"writeManifest: Unsupported algorithm: %s\", algorithm)\n\t}\n\tmanifestPath := restorer.getManifestPath(manifestType, algorithm, restoreState)\n\tmanifestFile, err := os.Create(manifestPath)\n\tif err != nil {\n\t\trestoreState.PackageSummary.AddError(\"Cannot create manifest file %s: %v\",\n\t\t\tmanifestPath, err)\n\t\treturn\n\t}\n\tdefer manifestFile.Close()\n\tfor _, gf := range restoreState.IntellectualObject.GenericFiles {\n\t\tif !restorer.fileBelongsInManifest(gf, manifestType) {\n\t\t\trestorer.Context.MessageLog.Info(\"Skipping file '%s' for manifest type %s (%s)\",\n\t\t\t\tgf.Identifier, manifestType, algorithm)\n\t\t\tcontinue\n\t\t} else {\n\t\t\trestorer.Context.MessageLog.Info(\"Adding '%s' to %s\", gf.Identifier, manifestFile.Name())\n\t\t}\n\t\tchecksum := gf.GetChecksumByAlgorithm(algorithm)\n\t\tif checksum == nil {\n\t\t\trestoreState.PackageSummary.AddError(\"Cannot find %s checksum for file %s\",\n\t\t\t\talgorithm, gf.OriginalPath())\n\t\t\treturn\n\t\t}\n\t\t_, err := fmt.Fprintln(manifestFile, checksum.Digest, gf.OriginalPath())\n\t\tif err != nil {\n\t\t\trestoreState.PackageSummary.AddError(\"Error writing checksum for file %s \"+\n\t\t\t\t\"to manifest %s: %v\", gf.OriginalPath(), manifestPath, err)\n\t\t\treturn\n\t\t} else {\n\t\t\trestorer.Context.MessageLog.Info(\"Wrote %s digest %s for file %s\", algorithm,\n\t\t\t\tchecksum.Digest, gf.Identifier)\n\t\t}\n\t}\n}", "func WritevRaw(fd uintptr, iovec []syscall.Iovec) (nw int, err error) {\n\tnw_raw, _, errno := syscall.Syscall(syscall.SYS_WRITEV, fd, uintptr(unsafe.Pointer(&iovec[0])), uintptr(len(iovec)))\n\tnw = int(nw_raw)\n\tif errno != 0 {\n\t\terr = errors.New(fmt.Sprintf(\"writev failed with error: %d\", errno))\n\t}\n\treturn\n}", "func (s StatHat) PostEZ(name string, kind Kind, v float64, t *time.Time) error {\n\tif s.noop {\n\t\treturn nil\n\t}\n\n\tu, _ := url.Parse(s.ezPrefix())\n\tq := u.Query()\n\n\tif len(s.ezkey) == 0 {\n\t\treturn ErrMissingEZKey\n\t}\n\tq.Add(\"ezkey\", s.ezkey)\n\tq.Add(\"stat\", name)\n\n\tif t != nil && !t.IsZero() {\n\t\tq.Add(\"t\", strconv.FormatInt(t.Unix(), 10))\n\t}\n\n\tif kind == KindValue {\n\t\tq.Add(\"value\", strconv.FormatFloat(v, 'g', -1, 64))\n\t} else if kind == KindCounter {\n\t\tq.Add(\"count\", strconv.FormatFloat(v, 'g', -1, 64))\n\t} else {\n\t\treturn ErrKindMissing\n\t}\n\n\tu.RawQuery = q.Encode()\n\treq, err := http.NewRequest(http.MethodPost, u.String(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := httpDo(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\t// StatHat may return HTTP Status Code 204 to indicate success.\n\t// See: https://blog.stathat.com/2017/05/05/bandwidth.html\n\tif resp.StatusCode == http.StatusNoContent {\n\t\treturn nil\n\t}\n\n\tvar respJSON struct {\n\t\t// {\"msg\":\"stat deleted.\"}\n\t\tMsg string\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = json.Unmarshal(body, &respJSON)\n\tif respJSON.Msg != \"ok\" {\n\t\terr = errors.New(respJSON.Msg)\n\t}\n\treturn err\n}", "func (d *swiftDriver) WriteManifest(account keppel.Account, repoName string, manifestDigest digest.Digest, contents []byte) error {\n\tc, _, err := d.getBackendConnection(account)\n\tif err != nil {\n\t\treturn err\n\t}\n\to := manifestObject(c, repoName, manifestDigest)\n\treturn uploadToObject(o, bytes.NewReader(contents), nil, nil)\n}", "func HashFromDigest(algo Type, digest Digest) Hash {\n\treturn HashFromSum(algo, digest[:])\n}", "func Writev(f *os.File, in [][]byte) (nw int, err error) {\n\tiovec := make([]syscall.Iovec, len(in))\n\tfor i, slice := range in {\n\t\tiovec[i] = syscall.Iovec{&slice[0], uint64(len(slice))}\n\t}\n\tnw, err = WritevRaw(uintptr(f.Fd()), iovec)\n\treturn\n}", "func (r *Repository) InstallDigest(d digest.Digest, file string) error {\n\tfile, err := filepath.EvalSymlinks(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdir, path := r.Path(d)\n\tif err := os.MkdirAll(dir, 0777); err != nil {\n\t\treturn err\n\t}\n\terr = os.Link(file, path)\n\tif os.IsExist(err) {\n\t\terr = nil\n\t}\n\tif err != nil {\n\t\t// Copy if file was reported to be on a different device.\n\t\tif linkErr, ok := err.(*os.LinkError); ok && linkErr.Err == syscall.EXDEV {\n\t\t\tf, ferr := os.Open(file)\n\t\t\tif ferr != nil {\n\t\t\t\treturn ferr\n\t\t\t}\n\t\t\tdefer func() { _ = f.Close() }()\n\t\t\t_, err = r.Put(context.Background(), f)\n\t\t}\n\t}\n\treturn err\n}", "func (h *hmacsha256) Write(p []byte) {\n\th.inner.Write(p)\n}", "func (w *Writer) WriteValue(v reflect.Value) {\n\tvalueEncoders[v.Kind()](w, v)\n}", "func (b *Base) Digest(req *DigestReq) (*DigestResp, error) {\n\treturn nil, ErrFunctionNotSupported\n}", "func (me *XsdGoPkgHasElems_DigestValue) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElems_DigestValue; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (d *Deployment) WriteHMAC(version string, hmac []byte) error {\n\n\t// Generate the filename, write to file, set ownership.\n\thmacPath, _ := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)\n\tif err := ioutil.WriteFile(hmacPath, hmac, 0664); err != nil {\n\t\treturn fmt.Errorf(\"Error while writing %q: %s\", hmacPath, err.Error())\n\t}\n\tif err := setOwner(hmacPath, d.uid, d.gid); err != nil {\n\t\treturn fmt.Errorf(\"Unable to set owner on %q: %s\", hmacPath, err.Error())\n\t}\n\n\treturn nil\n}", "func Write(fd uintptr, p unsafe.Pointer, n int32) int32", "func (h *Hash) Write(p []byte) (n int, err error) {\n\treturn h.hmac.Write(p)\n}", "func (w *Writer) Write(entry *pb.DataEntry) error {\n\tw.sizeBuf.Reset()\n\tw.dataBuf.Reset()\n\tif err := w.dataBuf.Marshal(entry); err != nil {\n\t\treturn err\n\t}\n\tentryBytes := w.dataBuf.Bytes()\n\tif err := w.sizeBuf.EncodeVarint(uint64(len(entryBytes))); err != nil {\n\t\treturn err\n\t}\n\tif _, err := w.fd.Write(w.sizeBuf.Bytes()); err != nil {\n\t\treturn err\n\t}\n\tif _, err := w.fd.Write(entryBytes); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func FreezeImageToDigest(template *servingv1alpha1.RevisionTemplateSpec, baseRevision *servingv1alpha1.Revision) error {\n\tif baseRevision == nil {\n\t\treturn nil\n\t}\n\n\tcurrentContainer, err := ContainerOfRevisionTemplate(template)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbaseContainer, err := ContainerOfRevisionSpec(&baseRevision.Spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif currentContainer.Image != baseContainer.Image {\n\t\treturn fmt.Errorf(\"could not freeze image to digest since current revision contains unexpected image\")\n\t}\n\n\tif baseRevision.Status.ImageDigest != \"\" {\n\t\treturn UpdateImage(template, baseRevision.Status.ImageDigest)\n\t}\n\treturn nil\n}", "func write(w io.Writer, en *msgEncoder, fs map[string]interface{}, val reflect.Value) error {\n\tfor _, i := range fs {\n\t\tif f, ok := i.(field); ok {\n\t\t\twritePrefix(en, w, f.name, f.requiredType)\n\t\t\tv := val.Field(f.sField)\n\t\t\terr := f.write(w, en, v)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tfs := i.(fieldStruct)\n\t\t\twritePrefix(en, w, fs.name, 10)\n\t\t\tv := val.Field(fs.sField)\n\t\t\terr := write(w, en, fs.m, v)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tbs := en.b[:1]\n\tbs[0] = 0\n\t_, err := w.Write(bs)\n\treturn err\n}", "func (s *ShardMap) hash(v interface{}) int {\n\tswitch s.Type {\n\tcase \"string\":\n\t\tval, ok := v.(string)\n\t\tif !ok {\n\t\t\treturn -1\n\t\t}\n\n\t\thash := fnv.New32()\n\t\thash.Write([]byte(val))\n\t\treturn int(hash.Sum32() % NumShards)\n\tcase \"int32\":\n\t\t// Values that come as numbers in JSON are of type float64.\n\t\tval, ok := v.(float64)\n\t\tif !ok {\n\t\t\treturn -1\n\t\t}\n\n\t\treturn int(int32(val) % NumShards)\n\tdefault:\n\t\treturn -1\n\t}\n}", "func WriteVarUint(writer io.Writer, value uint64) error {\n\tvar buf [9]byte\n\tlen := 0\n\tif value < 0xFD {\n\t\tbuf[0] = uint8(value)\n\t\tlen = 1\n\t} else if value <= 0xFFFF {\n\t\tbuf[0] = 0xFD\n\t\tbinary.LittleEndian.PutUint16(buf[1:], uint16(value))\n\t\tlen = 3\n\t} else if value <= 0xFFFFFFFF {\n\t\tbuf[0] = 0xFE\n\t\tbinary.LittleEndian.PutUint32(buf[1:], uint32(value))\n\t\tlen = 5\n\t} else {\n\t\tbuf[0] = 0xFF\n\t\tbinary.LittleEndian.PutUint64(buf[1:], uint64(value))\n\t\tlen = 9\n\t}\n\t_, err := writer.Write(buf[:len])\n\treturn err\n}", "func (m *wasiSnapshotPreview1Impl) fdWrite(pfd wasiFd, piovs list) (rv wasiSize, err wasiErrno) {\n\tf, err := m.files.getFile(pfd, wasiRightsFdWrite)\n\tif err != wasiErrnoSuccess {\n\t\treturn 0, err\n\t}\n\n\tn, ferr := f.Writev(m.buffers(wasiIovecArray(piovs)))\n\tif ferr != nil {\n\t\treturn n, fileErrno(ferr)\n\t}\n\treturn n, wasiErrnoSuccess\n}", "func (w *Writer) WriteVal(schema Schema, val any) {\n\tencoder := w.cfg.getEncoderFromCache(schema.Fingerprint(), reflect2.RTypeOf(val))\n\tif encoder == nil {\n\t\ttyp := reflect2.TypeOf(val)\n\t\tencoder = w.cfg.EncoderOf(schema, typ)\n\t}\n\tencoder.Encode(reflect2.PtrOf(val), w)\n}", "func tfsWrite(imgFile *os.File, imgOffset uint64, fsSize uint64, label string, root map[string]interface{}) (*tfs, error) {\n\ttfs := newTfs(imgFile, imgOffset, fsSize)\n\ttfs.label = label\n\trand.Seed(time.Now().UnixNano())\n\t_, err := rand.Read(tfs.uuid[:])\n\terr = tfs.logInit()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot create filesystem log: %v\", err)\n\t}\n\ttfs.encodeTupleHeader(len(root))\n\tfor k, v := range root {\n\t\tif k == \"children\" {\n\t\t\terr = tfs.writeDirEntries(v.(map[string]interface{}))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\ttfs.encodeMetadata(k, v)\n\t\t}\n\t}\n\treturn tfs, tfs.flush()\n}", "func Digest(done <-chan interface{}, f func(interface{}, ...interface{}) interface{}, in <-chan interface{}, params []interface{}) (outchan <-chan interface{}) {\n\tout := make(chan interface{})\n\tvar wg sync.WaitGroup\n\tconst numDigesters = 20\n\twg.Add(numDigesters)\n\tfor i := 0; i < numDigesters; i++ {\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\t// recover from panic if one occured. Set err to nil otherwise.\n\t\t\t\tif recover() != nil {\n\t\t\t\t\tfmt.Println(\"defer launch\")\n\t\t\t\t\twg.Done()\n\n\t\t\t\t}\n\t\t\t}()\n\t\t\tdigester(done, f, params, in, out)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func (vd *tValueDiffer) writeElem(idx int, v reflect.Value, hl bool) {\n\tb := vd.bufi(idx)\n\tif !v.IsValid() {\n\t\tb.Write(hl, nil)\n\t} else {\n\t\tswitch v.Kind() {\n\t\tcase reflect.Interface:\n\t\t\tif v.IsNil() {\n\t\t\t\tb.Write(hl, nil)\n\t\t\t} else {\n\t\t\t\tvd.writeElem(idx, v.Elem(), hl)\n\t\t\t}\n\t\tcase reflect.Array:\n\t\t\tvd.writeElemArray(idx, v, hl)\n\t\tcase reflect.Slice:\n\t\t\tvd.writeElemSlice(idx, v, hl)\n\t\tcase reflect.Map:\n\t\t\tvd.writeElemMap(idx, v, hl)\n\t\tcase reflect.Struct:\n\t\t\tvd.writeElemStruct(idx, v, hl)\n\t\tdefault: // bool, integer, float, complex, channel, function, pointer, string\n\t\t\tvd.writeKey(idx, v, hl)\n\t\t}\n\t}\n}", "func (b *ItemBundle) Digest() (isolated.HexDigest, int64, error) {\n\th := sha1.New()\n\tcw := &iotools.CountingWriter{Writer: h}\n\tif err := b.writeTar(cw); err != nil {\n\t\treturn \"\", 0, err\n\t}\n\treturn isolated.Sum(h), cw.Count, nil\n}", "func execWriteType(_ int, p *gop.Context) {\n\targs := p.GetArgs(3)\n\ttypes.WriteType(args[0].(*bytes.Buffer), args[1].(types.Type), args[2].(types.Qualifier))\n}", "func Put(key string, value string){\n \n h := sha256.New()\n h.Write([]byte(value))\n sha := base64.URLEncoding.EncodeToString(h.Sum(nil))\n \n //fmt.Println(sha)\n var n Data \n \n n.val = value //storing key value in keyValue hash map\n n.hash = sha // storing key hash in keyHash hash map \n \n keyValue[key] = n\n}", "func (w *Writer) Write(name string, v interface{}) error {\n\tww, err := w.wz.Create(name)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"npz: could not create npz entry %q: %w\", name, err)\n\t}\n\n\terr = npy.Write(ww, v)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"npz: could not write npz entry %q: %w\", name, err)\n\t}\n\n\treturn nil\n}", "func (me TDigestValueType) ToXsdtBase64Binary() xsdt.Base64Binary { return xsdt.Base64Binary(me) }", "func (b *BTSVShardWriter) marshalValue(ctx MarshalContext, enc *marshal.Encoder, v Value) {\n\tenc.PutByte(byte(v.typ))\n\tswitch v.typ {\n\tcase NullType:\n\t\tif v.Null() == PosNull {\n\t\t\tenc.PutByte(1)\n\t\t} else {\n\t\t\tenc.PutByte(byte(0xff))\n\t\t}\n\tcase BoolType:\n\t\tif v.Bool(nil) {\n\t\t\tenc.PutByte(1)\n\t\t} else {\n\t\t\tenc.PutByte(0)\n\t\t}\n\tcase IntType, CharType:\n\t\tenc.PutVarint(int64(v.v))\n\tcase FloatType:\n\t\tenc.PutUint64(v.v) // v.v encodes the floating point in binary.\n\tcase StringType, FileNameType, EnumType:\n\t\ts := v.Str(nil)\n\t\tenc.PutString(s)\n\tcase DateType, DateTimeType:\n\t\tt := v.DateTime(nil)\n\t\tenc.PutVarint(t.UnixNano())\n\t\tenc.PutVarint(int64(b.getLocationID(t)))\n\tcase TableType:\n\t\tv.Table(nil).Marshal(ctx, enc)\n\tcase StructType:\n\t\ts := v.Struct(nil)\n\t\tnFields := s.Len()\n\t\tenc.PutVarint(int64(nFields))\n\t\ttmp := b.tmpPool.Get()\n\t\ttmp.colNames = tmp.colNames[:0]\n\t\tfor i := 0; i < nFields; i++ {\n\t\t\tf := s.Field(i)\n\t\t\ttmp.colNames = append(tmp.colNames, f.Name)\n\t\t\tcol := b.internCol(f.Name, f.Value.Type())\n\t\t\tenc.PutVarint(int64(col.Col))\n\t\t\tb.marshalValue(ctx, enc, f.Value)\n\t\t}\n\t\tb.colSorter.AddColumns(tmp.colNames)\n\t\tb.tmpPool.Put(tmp)\n\tdefault:\n\t\tlog.Panicf(\"writebinarytsv: invalid value %v (%s)\", v, DescribeValue(v))\n\t}\n}", "func (d *Diskv) Write(key string, val []byte) error {\n\treturn d.write(key, val, false)\n}", "func DigestFile(filename string) (string, error) {\n\tb, err := DigestFileBytes(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hex.EncodeToString(b), nil\n}", "func (vlog *valueLog) write(reqs []*request) error {\n\tfor i := range reqs {\n\t\tb := reqs[i]\n\t\tfor j := range b.Entries {\n\t\t\te := b.Entries[j]\n\t\t\tplen, err := encodeEntry(e, &vlog.buf) // Now encode the entry into buffer.\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvlog.curWriter.Append(vlog.buf.Bytes())\n\t\t\tvlog.buf.Reset()\n\t\t\tvlog.pendingLen += plen\n\t\t\te.logOffset.fid = vlog.currentLogFile().fid\n\t\t\t// Use the offset including buffer length so far.\n\t\t\te.logOffset.offset = vlog.writableOffset() + uint32(vlog.pendingLen)\n\t\t}\n\t\tvlog.numEntriesWritten += uint32(len(b.Entries))\n\t\t// We write to disk here so that all entries that are part of the same transaction are\n\t\t// written to the same vlog file.\n\t\twriteNow :=\n\t\t\tvlog.writableOffset()+uint32(vlog.pendingLen) > uint32(vlog.opt.ValueLogFileSize) ||\n\t\t\t\tvlog.numEntriesWritten > uint32(vlog.opt.ValueLogMaxEntries)\n\t\tif writeNow {\n\t\t\tif err := vlog.flush(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn vlog.flush()\n\n\t// Acquire mutex locks around this manipulation, so that the reads don't try to use\n\t// an invalid file descriptor.\n}", "func (p *Stream) WriteUint64(v uint64) {\n\tif v < 10 {\n\t\tp.writeFrame[p.writeIndex] = byte(v + 54)\n\t\tp.writeIndex++\n\t\tif p.writeIndex == streamBlockSize {\n\t\t\tp.gotoNextWriteFrame()\n\t\t}\n\t} else if v < 65536 {\n\t\tif p.writeIndex < streamBlockSize-3 {\n\t\t\tb := p.writeFrame[p.writeIndex:]\n\t\t\tb[0] = 9\n\t\t\tb[1] = byte(v)\n\t\t\tb[2] = byte(v >> 8)\n\t\t\tp.writeIndex += 3\n\t\t\treturn\n\t\t}\n\t\tp.PutBytes([]byte{\n\t\t\t9,\n\t\t\tbyte(v),\n\t\t\tbyte(v >> 8),\n\t\t})\n\t} else if v < 4294967296 {\n\t\tif p.writeIndex < streamBlockSize-5 {\n\t\t\tb := p.writeFrame[p.writeIndex:]\n\t\t\tb[0] = 10\n\t\t\tb[1] = byte(v)\n\t\t\tb[2] = byte(v >> 8)\n\t\t\tb[3] = byte(v >> 16)\n\t\t\tb[4] = byte(v >> 24)\n\t\t\tp.writeIndex += 5\n\t\t\treturn\n\t\t}\n\t\tp.PutBytes([]byte{\n\t\t\t10,\n\t\t\tbyte(v),\n\t\t\tbyte(v >> 8),\n\t\t\tbyte(v >> 16),\n\t\t\tbyte(v >> 24),\n\t\t})\n\t} else {\n\t\tif p.writeIndex < streamBlockSize-9 {\n\t\t\tb := p.writeFrame[p.writeIndex:]\n\t\t\tb[0] = 11\n\t\t\tb[1] = byte(v)\n\t\t\tb[2] = byte(v >> 8)\n\t\t\tb[3] = byte(v >> 16)\n\t\t\tb[4] = byte(v >> 24)\n\t\t\tb[5] = byte(v >> 32)\n\t\t\tb[6] = byte(v >> 40)\n\t\t\tb[7] = byte(v >> 48)\n\t\t\tb[8] = byte(v >> 56)\n\t\t\tp.writeIndex += 9\n\t\t\treturn\n\t\t}\n\t\tp.PutBytes([]byte{\n\t\t\t11,\n\t\t\tbyte(v),\n\t\t\tbyte(v >> 8),\n\t\t\tbyte(v >> 16),\n\t\t\tbyte(v >> 24),\n\t\t\tbyte(v >> 32),\n\t\t\tbyte(v >> 40),\n\t\t\tbyte(v >> 48),\n\t\t\tbyte(v >> 56),\n\t\t})\n\t}\n}", "func (t *Tree) Digest() *crypto.Digest { return t.dig }", "func (entry LogEntry) Write(w io.Writer, byteOrder binary.ByteOrder) (length int, err error) {\n\n\t// logID LSN\n\tlength += binary.Size(entry.logID)\n\terr = binary.Write(w, byteOrder, entry.logID)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tstdlog.Printf(\"write logId: %v\\n\", entry.logID)\n\n\t// symID\n\tlength += binary.Size(entry.symbolID)\n\terr = binary.Write(w, byteOrder, entry.symbolID)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tstdlog.Printf(\"write symId: %v\\n\", entry.symbolID)\n\n\t// timestamp\n\tlength += binary.Size(entry.timeStamp)\n\terr = binary.Write(w, byteOrder, entry.timeStamp)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tstdlog.Printf(\"write timestamp: %v\\n\", entry.timeStamp)\n\n\t// length of value data\n\tlength += 4\n\tvalLen := sizeOfValues(entry.valueList)\n\terr = binary.Write(w, byteOrder, valLen)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tstdlog.Printf(\"write value len: %v\\n\", valLen)\n\n\t// value data\n\t// Note: no type info is stored in the data log\n\t// Type info is kept in the sym file\n\tlength += int(valLen)\n\terr = writeValueList(w, byteOrder, entry.valueList)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn length, nil\n}", "func (g *Graphite) Write(_ context.Context, vl *api.ValueList) error {\n\tfor i, v := range vl.Values {\n\t\tdsName := \"\"\n\t\tif g.AlwaysAppendDS || len(vl.Values) != 1 {\n\t\t\tdsName = vl.DSName(i)\n\t\t}\n\n\t\tname := g.formatName(vl.Identifier, dsName)\n\n\t\tval, err := g.formatValue(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tt := vl.Time\n\t\tif t.IsZero() {\n\t\t\tt = time.Now()\n\t\t}\n\n\t\tfmt.Fprintf(g.W, \"%s %s %d\\r\\n\", name, val, t.Unix())\n\t}\n\n\treturn nil\n}", "func (d *Diskv) write(key string, val []byte, sync bool) error {\n\tif len(key) <= 0 {\n\t\treturn fmt.Errorf(\"empty key\")\n\t}\n\n\td.Lock()\n\tdefer d.Unlock()\n\tif err := d.ensurePath(key); err != nil {\n\t\treturn err\n\t}\n\n\tcompressedVal, err := d.compress(val)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists\n\tf, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err = f.Write(compressedVal); err != nil {\n\t\tf.Close() // error deliberately ignored\n\t\treturn err\n\t}\n\n\tif sync {\n\t\tif err := f.Sync(); err != nil {\n\t\t\tf.Close() // error deliberately ignored\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := f.Close(); err != nil {\n\t\treturn err\n\t}\n\n\tif d.Index != nil {\n\t\td.Index.Insert(key)\n\t}\n\n\tdelete(d.cache, key) // cache only on read\n\treturn nil\n}", "func (value *Value) write(w io.Writer) {\n\tswitch value.T {\n\tcase StringType:\n\t\tfmt.Fprintf(w, \"{%s}\", value.S)\n\tcase NumberType:\n\t\tfmt.Fprintf(w, \"%d\", value.I)\n\tcase SymbolType:\n\t\tfmt.Fprintf(w, \"%s\", value.S)\n\tdefault:\n\t\tpanic(\"unknown field value type\")\n\t}\n}", "func (e *encoder) writeUint(val uint64, size int) {\n\te.head = align(e.head, size)\n\tfor i := e.head; i < e.head+size; i++ {\n\t\te.buffer[i] = byte(val & 0xFF)\n\t\tval >>= 8\n\t}\n\te.head += size\n}", "func writeTTEntry(tt []TTEntryT, zobrist uint64, eval EvalCp, bestMove dragon.Move, depthToGo int, evalType TTEvalT) {\n\tvar entry TTEntryT // use a full struct overwrite to obliterate old data\n\n\t// Do we already have an entry for the hash?\n\toldTTEntry, isHit := probeTT(tt, zobrist)\n\n\tif isHit {\n\t\tentry = oldTTEntry\n\t\tupdateTTEntry(&entry, eval, bestMove, depthToGo, evalType)\n\t} else {\n\t\t// initialise a new entry\n\t\tentry.zobrist = zobrist\n\n\t\tpEntry := &entry.parityHits[depthToGoParity(depthToGo)]\n\n\t\tpEntry.eval = eval\n\t\tpEntry.bestMove = bestMove\n\t\tpEntry.depthToGo = uint8(depthToGo)\n\t\tpEntry.evalType = evalType\n\t}\n\tindex := ttIndex(tt, zobrist)\n\ttt[index] = entry\n}", "func (d *Digester) Digest(ctx context.Context, sub broker.Subscriber, opts ...digester.Option) error {\n\tdopts := digester.Options{}\n\tfor _, apply := range opts {\n\t\tapply(&dopts)\n\t}\n\n\th := dopts.Handler\n\tif h == nil {\n\t\th = handlers.DumpData\n\t}\n\n\treturn d.handle(ctx, sub, h)\n}", "func (s *Series) Push(t uint32, v float64) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tif s.t == 0 {\n\t\t// first point\n\t\ts.t = t\n\t\ts.val = v\n\t\ts.tDelta = t - s.T0\n\t\ts.bw.writeBits(uint64(s.tDelta), 14)\n\t\ts.bw.writeBits(math.Float64bits(v), 64)\n\t\treturn\n\t}\n\n\ttDelta := t - s.t\n\tdod := int32(tDelta - s.tDelta)\n\n\tswitch {\n\tcase dod == 0:\n\t\ts.bw.writeBit(zero)\n\tcase -63 <= dod && dod <= 64:\n\t\ts.bw.writeBits(0x02, 2) // '10'\n\t\ts.bw.writeBits(uint64(dod), 7)\n\tcase -255 <= dod && dod <= 256:\n\t\ts.bw.writeBits(0x06, 3) // '110'\n\t\ts.bw.writeBits(uint64(dod), 9)\n\tcase -2047 <= dod && dod <= 2048:\n\t\ts.bw.writeBits(0x0e, 4) // '1110'\n\t\ts.bw.writeBits(uint64(dod), 12)\n\tdefault:\n\t\ts.bw.writeBits(0x0f, 4) // '1111'\n\t\ts.bw.writeBits(uint64(dod), 32)\n\t}\n\n\tvDelta := math.Float64bits(v) ^ math.Float64bits(s.val)\n\n\tif vDelta == 0 {\n\t\ts.bw.writeBit(zero)\n\t} else {\n\t\ts.bw.writeBit(one)\n\n\t\tleading := uint8(bits.LeadingZeros64(vDelta))\n\t\ttrailing := uint8(bits.TrailingZeros64(vDelta))\n\n\t\t// clamp number of leading zeros to avoid overflow when encoding\n\t\tif leading >= 32 {\n\t\t\tleading = 31\n\t\t}\n\n\t\t// TODO(dgryski): check if it's 'cheaper' to reset the leading/trailing bits instead\n\t\tif s.leading != ^uint8(0) && leading >= s.leading && trailing >= s.trailing {\n\t\t\ts.bw.writeBit(zero)\n\t\t\ts.bw.writeBits(vDelta>>s.trailing, 64-int(s.leading)-int(s.trailing))\n\t\t} else {\n\t\t\ts.leading, s.trailing = leading, trailing\n\n\t\t\ts.bw.writeBit(one)\n\t\t\ts.bw.writeBits(uint64(leading), 5)\n\n\t\t\t// Note that if leading == trailing == 0, then sigbits == 64. But that value doesn't actually fit into the 6 bits we have.\n\t\t\t// Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0).\n\t\t\t// So instead we write out a 0 and adjust it back to 64 on unpacking.\n\t\t\tsigbits := 64 - leading - trailing\n\t\t\ts.bw.writeBits(uint64(sigbits), 6)\n\t\t\ts.bw.writeBits(vDelta>>trailing, int(sigbits))\n\t\t}\n\t}\n\n\ts.tDelta = tDelta\n\ts.t = t\n\ts.val = v\n\n}", "func Hash(i interface{}) string {\n\tv := reflect.ValueOf(i)\n\tif v.Kind() != reflect.Ptr {\n\t\tif !v.CanAddr(){\n\t\t\treturn \"\"\n\t\t}\n\t\tv = v.Addr()\n\t}\n\n\tsize := unsafe.Sizeof(v.Interface())\n\tb := (*[1 << 10]uint8)(unsafe.Pointer(v.Pointer()))[:size:size]\n\n\th := md5.New()\n\treturn base64.StdEncoding.EncodeToString(h.Sum(b))\n}", "func WithDigest(name Named, digest digest.Digest) (Canonical, error) {\n\tif !anchoredDigestRegexp.MatchString(digest.String()) {\n\t\treturn nil, ErrDigestInvalidFormat\n\t}\n\tvar repo repository\n\tif r, ok := name.(namedRepository); ok {\n\t\trepo.domain = r.Domain()\n\t\trepo.path = r.Path()\n\t} else {\n\t\trepo.path = name.Name()\n\t}\n\tif tagged, ok := name.(Tagged); ok {\n\t\treturn reference{\n\t\t\tnamedRepository: repo,\n\t\t\ttag: tagged.Tag(),\n\t\t\tdigest: digest,\n\t\t}, nil\n\t}\n\treturn canonicalReference{\n\t\tnamedRepository: repo,\n\t\tdigest: digest,\n\t}, nil\n}", "func expandDigestToHash(letter string) schema.MD5Hash {\n\tdigest := expandDigest(letter)\n\tdb, err := sql.DigestToBytes(digest)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn sql.AsMD5Hash(db)\n}", "func DigestSize() int {\n\treturn sha256DigestSize\n}", "func NVWriteValue(rw io.ReadWriter, index, offset uint32, data []byte, ownAuth []byte) error {\n\tif ownAuth == nil {\n\t\tif _, _, _, err := nvWriteValue(rw, index, offset, uint32(len(data)), data, nil); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write to NVRAM: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\tsharedSecretOwn, osaprOwn, err := newOSAPSession(rw, etOwner, khOwner, ownAuth[:])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to start new auth session: %v\", err)\n\t}\n\tdefer osaprOwn.Close(rw)\n\tdefer zeroBytes(sharedSecretOwn[:])\n\tauthIn := []interface{}{ordNVWriteValue, index, offset, len(data), data}\n\tca, err := newCommandAuth(osaprOwn.AuthHandle, osaprOwn.NonceEven, nil, sharedSecretOwn[:], authIn)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to construct owner auth fields: %v\", err)\n\t}\n\tdata, ra, ret, err := nvWriteValue(rw, index, offset, uint32(len(data)), data, ca)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to write to NVRAM: %v\", err)\n\t}\n\traIn := []interface{}{ret, ordNVWriteValue, tpmutil.U32Bytes(data)}\n\tif err := ra.verify(ca.NonceOdd, sharedSecretOwn[:], raIn); err != nil {\n\t\treturn fmt.Errorf(\"failed to verify authenticity of response: %v\", err)\n\t}\n\treturn nil\n}", "func (c *cpu) writev() {\n\tsp, iovcnt := popI32(c.sp)\n\tsp, iov := popPtr(sp)\n\tfd := readI32(sp)\n\tn, _, err := syscall.Syscall(syscall.SYS_WRITEV, uintptr(fd), iov, uintptr(iovcnt))\n\tif strace {\n\t\tfmt.Fprintf(os.Stderr, \"writev(%#x, %#x, %#x) %v %v\\t; %s\\n\", fd, iov, iovcnt, n, err, c.pos())\n\t}\n\tif err != 0 {\n\t\tc.setErrno(err)\n\t\twriteLong(c.rp, -1)\n\t\treturn\n\t}\n\n\twriteLong(c.rp, int64(n))\n}", "func (p *Stream) writeRTMap(v RTMap) string {\n\tif thread := v.rt.thread; thread != nil {\n\t\treadStream := thread.rtStream\n\t\tlength := int(*v.length)\n\n\t\tif length == 0 {\n\t\t\tp.writeFrame[p.writeIndex] = 96\n\t\t\tp.writeIndex++\n\t\t\tif p.writeIndex == streamBlockSize {\n\t\t\t\tp.gotoNextWriteFrame()\n\t\t\t}\n\t\t\treturn StreamWriteOK\n\t\t}\n\n\t\tstartPos := p.GetWritePos()\n\n\t\tb := p.writeFrame[p.writeIndex:]\n\t\tif p.writeIndex < streamBlockSize-5 {\n\t\t\tp.writeIndex += 5\n\t\t} else {\n\t\t\tb = b[0:1]\n\t\t\tp.SetWritePos(startPos + 5)\n\t\t}\n\n\t\tif length < 31 {\n\t\t\tb[0] = byte(96 + length)\n\t\t} else {\n\t\t\tb[0] = 127\n\t\t}\n\n\t\tif length > 30 {\n\t\t\tif p.writeIndex < streamBlockSize-4 {\n\t\t\t\tl := p.writeFrame[p.writeIndex:]\n\t\t\t\tl[0] = byte(uint32(length))\n\t\t\t\tl[1] = byte(uint32(length) >> 8)\n\t\t\t\tl[2] = byte(uint32(length) >> 16)\n\t\t\t\tl[3] = byte(uint32(length) >> 24)\n\t\t\t\tp.writeIndex += 4\n\t\t\t} else {\n\t\t\t\tp.PutBytes([]byte{\n\t\t\t\t\tbyte(uint32(length)),\n\t\t\t\t\tbyte(uint32(length) >> 8),\n\t\t\t\t\tbyte(uint32(length) >> 16),\n\t\t\t\t\tbyte(uint32(length) >> 24),\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tif v.items != nil {\n\t\t\titems := *v.items\n\t\t\tfor i := 0; i < length; i++ {\n\t\t\t\tp.WriteString(items[i].key)\n\t\t\t\treadStream.SetReadPos(int(items[i].pos.getPos()))\n\t\t\t\tif !p.writeStreamNext(readStream) {\n\t\t\t\t\tp.SetWritePos(startPos)\n\t\t\t\t\treturn StreamWriteIsNotAvailable\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\ttotalLength := uint32(p.GetWritePos() - startPos)\n\t\tif len(b) > 1 {\n\t\t\tb[1] = byte(totalLength)\n\t\t\tb[2] = byte(totalLength >> 8)\n\t\t\tb[3] = byte(totalLength >> 16)\n\t\t\tb[4] = byte(totalLength >> 24)\n\t\t} else {\n\t\t\tendPos := p.GetWritePos()\n\t\t\tp.SetWritePos(startPos + 1)\n\t\t\tp.PutBytes([]byte{\n\t\t\t\tbyte(totalLength),\n\t\t\t\tbyte(totalLength >> 8),\n\t\t\t\tbyte(totalLength >> 16),\n\t\t\t\tbyte(totalLength >> 24),\n\t\t\t})\n\t\t\tp.SetWritePos(endPos)\n\t\t}\n\n\t\treturn StreamWriteOK\n\t}\n\n\treturn StreamWriteIsNotAvailable\n}", "func (s *Storage) WriteCanonicalHash(n *big.Int, hash common.Hash) {\n\ts.set(CANONICAL, n.Bytes(), hash.Bytes())\n}", "func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) {\n\th := sha256.New()\n\trd := io.TeeReader(content, h)\n\n\tp, err := ioutil.ReadAll(rd)\n\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\n\tblobDigestSHA := digest.NewDigest(\"sha256\", h)\n\n\tblobPath, err := pathMapper.path(blobDataPathSpec{\n\t\tdigest: dgst,\n\t})\n\n\tif err := driver.PutContent(blobPath, p); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlayerLinkPath, err := pathMapper.path(layerLinkPathSpec{\n\t\tname: name,\n\t\tdigest: dgst,\n\t})\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := driver.PutContent(layerLinkPath, []byte(dgst)); err != nil {\n\t\treturn \"\", nil\n\t}\n\n\treturn blobDigestSHA, err\n}", "func WriteDouble(buffer []byte, offset int, value float64) {\n WriteUInt64(buffer, offset, math.Float64bits(value))\n}", "func SHA512Digest(s string) string {\n\th := sha512.Sum512([]byte(s))\n\treturn hex.EncodeToString(h[:])\n}", "func (t *Tube) Write(key string, rev Rev, value interface{}) (changed bool) {\n\t// log.Printf(\"tube writing (%s,%d,%v)\", key, rev, value)\n\t// defer func() {\n\t// \tlog.Printf(\"tube written to, changed=%v\\n%s\", changed, t.Dump())\n\t// }()\n\n\tt.Lock()\n\tdefer t.Unlock()\n\tchanged = t.view.Update(&Record{\n\t\tKey: key,\n\t\tRev: rev,\n\t\tValue: value,\n\t\tUpdated: time.Now(),\n\t})\n\tif changed {\n\t\tgo t.writeSync(key, rev, value) // synchronize downstream tubes\n\t}\n\treturn\n}", "func hash(stav Stav) uint64{\n\tstr := \"\"\n\n\tfor i := 0; i < len(stav.Auta); i++ {\n\t\tstr += stav.Auta[i].Farba\n\t\tstr += strconv.Itoa(int(stav.Auta[i].X))\n\t\tstr += strconv.Itoa(int(stav.Auta[i].Y))\n\t\tstr += strconv.FormatBool(stav.Auta[i].Smer)\n\t\tstr += strconv.Itoa(int(stav.Auta[i].Dlzka))\n\t}\n\n\th := fnv.New64a()\n\th.Write([]byte(str))\n\treturn h.Sum64()\n\n}", "func (e *Encoder) PutHash(h hash.Hash) {\n\te.write(h[:])\n}", "func (fi *FileIO) WriteAtv(bs [][]byte, off int64) (int, error) {\n\treturn linuxWriteAtv(fi, bs, off)\n}", "func (es *externalSigner) NewDigest(sig *model.PdfSignature) (model.Hasher, error) {\n\treturn bytes.NewBuffer(nil), nil\n}", "func TestWrite(t *testing.T) {\n\tmockZooKeeper := &MockZooHandle{\n\t\tzk: mock.Mock{},\n\t}\n\n\tbytes := make([]byte, 3)\n\tff := NewFuseFile(bytes, 0, \"mock/path\", mockZooKeeper)\n\n\tmockZooKeeper.zk.On(\"Set\", \"mock/path\", bytes, int32(-1)).Return(&zk.Stat{DataLength: int32(len(bytes))}, nil)\n\n\t// assert that we send 3 bytes into the writer and status out == fuse.OK\n\tsize, stat := ff.Write(bytes, 0)\n\tassert.Equal(t, uint32(3), size)\n\tassert.Equal(t, fuse.OK, stat)\n}", "func (t *Tfhd) Write() []byte {\n\tbuf := new(bytes.Buffer)\n\tvar err error\n\t// Size\n\terr = binary.Write(buf, binary.BigEndian, t.Size)\n\tif err != nil {\n\t\tfmt.Println(\"binary.Write failed:\", err)\n\t}\n\t// BoxType\n\terr = binary.Write(buf, binary.BigEndian, t.BoxType)\n\tif err != nil {\n\t\tfmt.Println(\"binary.Write failed:\", err)\n\t}\n\t//version\n\terr = binary.Write(buf, binary.BigEndian, t.Version)\n\tif err != nil {\n\t\tfmt.Println(\"binary.Write failed:\", err)\n\t}\n\t//flags\n\terr = binary.Write(buf, binary.BigEndian, t.Flags)\n\tif err != nil {\n\t\tfmt.Println(\"binary.Write failed:\", err)\n\t}\n\t//trackID\n\terr = binary.Write(buf, binary.BigEndian, t.TrackID)\n\tif err != nil {\n\t\tfmt.Println(\"binary.Write failed:\", err)\n\t}\n\tif t.BaseDataOffset != 0 {\n\t\terr = binary.Write(buf, binary.BigEndian, t.BaseDataOffset)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"binary.Write failed:\", err)\n\t\t}\n\t}\n\tif t.SampleDescriptionIndex != 0 {\n\t\terr = binary.Write(buf, binary.BigEndian, t.SampleDescriptionIndex)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"binary.Write failed:\", err)\n\t\t}\n\t}\n\tif t.DefaultSampleDuration != 0 {\n\t\terr = binary.Write(buf, binary.BigEndian, t.DefaultSampleDuration)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"binary.Write failed:\", err)\n\t\t}\n\t}\n\tif t.DefaultSampleSize != 0 {\n\t\terr = binary.Write(buf, binary.BigEndian, t.DefaultSampleSize)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"binary.Write failed:\", err)\n\t\t}\n\t}\n\tif t.DefaultSampleFlags != 0 {\n\t\terr = binary.Write(buf, binary.BigEndian, t.DefaultSampleFlags)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"binary.Write failed:\", err)\n\t\t}\n\t}\n\treturn buf.Bytes()\n}", "func (ref *digest) Write(src []byte) (int, error) {\n\tsln := uintptr(len(src))\n\tfln := len(src)\n\tbuf := ref.b[:]\n\tptr := ref.ptr\n\n\tif sln < (BlockSize - ptr) {\n\t\tcopy(buf[ptr:], src)\n\t\tref.ptr += sln\n\t\treturn int(sln), nil\n\t}\n\n\tvar hi, lo [8]uint64\n\thi[0] = ref.h[0x0]\n\tlo[0] = ref.h[0x1]\n\thi[1] = ref.h[0x2]\n\tlo[1] = ref.h[0x3]\n\thi[2] = ref.h[0x4]\n\tlo[2] = ref.h[0x5]\n\thi[3] = ref.h[0x6]\n\tlo[3] = ref.h[0x7]\n\thi[4] = ref.h[0x8]\n\tlo[4] = ref.h[0x9]\n\thi[5] = ref.h[0xA]\n\tlo[5] = ref.h[0xB]\n\thi[6] = ref.h[0xC]\n\tlo[6] = ref.h[0xD]\n\thi[7] = ref.h[0xE]\n\tlo[7] = ref.h[0xF]\n\n\tfor sln > 0 {\n\t\tcln := BlockSize - ptr\n\n\t\tif cln > sln {\n\t\t\tcln = sln\n\t\t}\n\t\tsln -= cln\n\n\t\tcopy(ref.b[ptr:], src[:cln])\n\t\tsrc = src[cln:]\n\t\tptr += cln\n\n\t\tif ptr == BlockSize {\n\t\t\tm0h := decUInt64le(buf[0:])\n\t\t\tm0l := decUInt64le(buf[8:])\n\t\t\tm1h := decUInt64le(buf[16:])\n\t\t\tm1l := decUInt64le(buf[24:])\n\t\t\tm2h := decUInt64le(buf[32:])\n\t\t\tm2l := decUInt64le(buf[40:])\n\t\t\tm3h := decUInt64le(buf[48:])\n\t\t\tm3l := decUInt64le(buf[56:])\n\n\t\t\thi[0] ^= m0h\n\t\t\tlo[0] ^= m0l\n\t\t\thi[1] ^= m1h\n\t\t\tlo[1] ^= m1l\n\t\t\thi[2] ^= m2h\n\t\t\tlo[2] ^= m2l\n\t\t\thi[3] ^= m3h\n\t\t\tlo[3] ^= m3l\n\n\t\t\tfor r := uint64(0); r < 42; r += 7 {\n\t\t\t\tslMutateExtend(r+0, 0, hi[:], lo[:])\n\t\t\t\tslMutateExtend(r+1, 1, hi[:], lo[:])\n\t\t\t\tslMutateExtend(r+2, 2, hi[:], lo[:])\n\t\t\t\tslMutateExtend(r+3, 3, hi[:], lo[:])\n\t\t\t\tslMutateExtend(r+4, 4, hi[:], lo[:])\n\t\t\t\tslMutateExtend(r+5, 5, hi[:], lo[:])\n\t\t\t\tslMutateBasic(r+6, hi[:], lo[:])\n\t\t\t}\n\n\t\t\thi[4] ^= m0h\n\t\t\tlo[4] ^= m0l\n\t\t\thi[5] ^= m1h\n\t\t\tlo[5] ^= m1l\n\t\t\thi[6] ^= m2h\n\t\t\tlo[6] ^= m2l\n\t\t\thi[7] ^= m3h\n\t\t\tlo[7] ^= m3l\n\n\t\t\tref.cnt++\n\t\t\tptr = 0\n\t\t}\n\t}\n\n\tref.h[0x0] = hi[0]\n\tref.h[0x1] = lo[0]\n\tref.h[0x2] = hi[1]\n\tref.h[0x3] = lo[1]\n\tref.h[0x4] = hi[2]\n\tref.h[0x5] = lo[2]\n\tref.h[0x6] = hi[3]\n\tref.h[0x7] = lo[3]\n\tref.h[0x8] = hi[4]\n\tref.h[0x9] = lo[4]\n\tref.h[0xA] = hi[5]\n\tref.h[0xB] = lo[5]\n\tref.h[0xC] = hi[6]\n\tref.h[0xD] = lo[6]\n\tref.h[0xE] = hi[7]\n\tref.h[0xF] = lo[7]\n\n\tref.ptr = ptr\n\treturn fln, nil\n}", "func (r *Repository) Put(ctx context.Context, body io.Reader) (digest.Digest, error) {\n\ttemp, err := r.TempFile(\"create-\")\n\tif err != nil {\n\t\treturn digest.Digest{}, err\n\t}\n\tdefer os.Remove(temp.Name())\n\tdw := reflow.Digester.NewWriter()\n\tdone := make(chan error, 1)\n\t// This is a workaround to make sure that copies respect\n\t// context cancellations. Note that the underlying copy is\n\t// not actually cancelled, so this could lead to goroutine\n\t// leaks.\n\tgo func() {\n\t\t_, err = io.Copy(temp, io.TeeReader(body, dw))\n\t\ttemp.Close()\n\t\tdone <- err\n\t}()\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn digest.Digest{}, ctx.Err()\n\tcase err := <-done:\n\t\tif err != nil {\n\t\t\treturn digest.Digest{}, err\n\t\t}\n\t\tdgst := dw.Digest()\n\t\treturn dgst, r.InstallDigest(dgst, temp.Name())\n\t}\n}", "func DigestUrl(baseURL string, digest types.Digest) string {\n\tbaseURL = strings.TrimRight(baseURL, \"/\")\n\treturn fmt.Sprintf(urlTemplate, baseURL, digest)\n}", "func (x *Index) Write(w io.Writer) error", "func (commit *Commit) Digest() []byte {\n\treturn commit.signingID.Hash(commit.signedRequest.GetRequest())\n}", "func (w *DiskImage) Digest() digest.Digest {\n\treturn w.digester.Digest()\n}" ]
[ "0.7222287", "0.61005545", "0.55415916", "0.5209088", "0.5083807", "0.5052842", "0.4941014", "0.48079118", "0.48077276", "0.48063084", "0.4738801", "0.4712518", "0.4684972", "0.46651158", "0.4654325", "0.4654092", "0.46490794", "0.46484983", "0.46432567", "0.46338454", "0.46325088", "0.4627105", "0.46171677", "0.4612079", "0.4565844", "0.4562752", "0.45210096", "0.4519383", "0.45006034", "0.4480316", "0.44425663", "0.44353738", "0.4428572", "0.44108114", "0.44088423", "0.4405481", "0.44000605", "0.43936613", "0.43879494", "0.43853682", "0.4384337", "0.437897", "0.4376615", "0.4356264", "0.43414104", "0.43393704", "0.43355522", "0.43137047", "0.43119633", "0.42980987", "0.42861038", "0.4272985", "0.42701417", "0.4269588", "0.42682436", "0.42647967", "0.42638192", "0.42537624", "0.42532486", "0.4231795", "0.42201442", "0.42124006", "0.42116523", "0.4200598", "0.41998407", "0.41978604", "0.41939557", "0.41917425", "0.4190685", "0.41854", "0.41842657", "0.41829956", "0.4169701", "0.41615853", "0.41500765", "0.41449952", "0.41440707", "0.41434488", "0.41420862", "0.4140696", "0.4132823", "0.41314262", "0.41280425", "0.4116212", "0.41159177", "0.4115693", "0.4114474", "0.4110506", "0.41075298", "0.4105372", "0.41024035", "0.41003492", "0.40946376", "0.40938383", "0.4092764", "0.40847337", "0.4075287", "0.40735316", "0.4072808", "0.4070842" ]
0.8357803
0
OnUpdateManifest is called when a new manifest is added. It updates metadb according to the type of image pushed(normal images, signatues, etc.). In care of any errors, it makes sure to keep consistency between metadb and the image store.
func OnUpdateManifest(repo, reference, mediaType string, digest godigest.Digest, body []byte, storeController storage.StoreController, metaDB mTypes.MetaDB, log log.Logger, ) error { imgStore := storeController.GetImageStore(repo) // check if image is a signature isSignature, signatureType, signedManifestDigest, err := storage.CheckIsImageSignature(repo, body, reference) if err != nil { log.Error().Err(err).Msg("can't check if image is a signature or not") if err := imgStore.DeleteImageManifest(repo, reference, false); err != nil { log.Error().Err(err).Str("manifest", reference).Str("repository", repo).Msg("couldn't remove image manifest in repo") return err } return err } metadataSuccessfullySet := true if isSignature { layersInfo, errGetLayers := GetSignatureLayersInfo(repo, reference, digest.String(), signatureType, body, imgStore, log) if errGetLayers != nil { metadataSuccessfullySet = false err = errGetLayers } else { err = metaDB.AddManifestSignature(repo, signedManifestDigest, mTypes.SignatureMetadata{ SignatureType: signatureType, SignatureDigest: digest.String(), LayersInfo: layersInfo, }) if err != nil { log.Error().Err(err).Msg("metadb: error while putting repo meta") metadataSuccessfullySet = false } else { err = metaDB.UpdateSignaturesValidity(repo, signedManifestDigest) if err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", reference).Str("digest", signedManifestDigest.String()).Msg("metadb: failed verify signatures validity for signed image") metadataSuccessfullySet = false } } } } else { err = SetImageMetaFromInput(repo, reference, mediaType, digest, body, imgStore, metaDB, log) if err != nil { metadataSuccessfullySet = false } } if !metadataSuccessfullySet { log.Info().Str("tag", reference).Str("repository", repo).Msg("uploading image meta was unsuccessful for tag in repo") if err := imgStore.DeleteImageManifest(repo, reference, false); err != nil { log.Error().Err(err).Str("reference", reference).Str("repository", repo). Msg("couldn't remove image manifest in repo") return err } return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func OnGetManifest(name, reference string, body []byte,\n\tstoreController storage.StoreController, metaDB mTypes.MetaDB, log log.Logger,\n) error {\n\t// check if image is a signature\n\tisSignature, _, _, err := storage.CheckIsImageSignature(name, body, reference)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"can't check if manifest is a signature or not\")\n\n\t\treturn err\n\t}\n\n\tif !isSignature {\n\t\terr := metaDB.IncrementImageDownloads(name, reference)\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Str(\"repository\", name).Str(\"reference\", reference).\n\t\t\t\tMsg(\"unexpected error for image\")\n\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func OnDeleteManifest(repo, reference, mediaType string, digest godigest.Digest, manifestBlob []byte,\n\tstoreController storage.StoreController, metaDB mTypes.MetaDB, log log.Logger,\n) error {\n\timgStore := storeController.GetImageStore(repo)\n\n\tisSignature, signatureType, signedManifestDigest, err := storage.CheckIsImageSignature(repo, manifestBlob,\n\t\treference)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"can't check if image is a signature or not\")\n\n\t\treturn err\n\t}\n\n\tmanageRepoMetaSuccessfully := true\n\n\tif isSignature {\n\t\terr = metaDB.DeleteSignature(repo, signedManifestDigest, mTypes.SignatureMetadata{\n\t\t\tSignatureDigest: digest.String(),\n\t\t\tSignatureType: signatureType,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Msg(\"metadb: can't check if image is a signature or not\")\n\t\t\tmanageRepoMetaSuccessfully = false\n\t\t}\n\t} else {\n\t\terr = metaDB.DeleteRepoTag(repo, reference)\n\t\tif err != nil {\n\t\t\tlog.Info().Msg(\"metadb: restoring image store\")\n\n\t\t\t// restore image store\n\t\t\t_, _, err := imgStore.PutImageManifest(repo, reference, mediaType, manifestBlob)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error().Err(err).Msg(\"metadb: error while restoring image store, database is not consistent\")\n\t\t\t}\n\n\t\t\tmanageRepoMetaSuccessfully = false\n\t\t}\n\n\t\tif referredDigest, hasSubject := common.GetReferredSubject(manifestBlob); hasSubject {\n\t\t\terr := metaDB.DeleteReferrer(repo, referredDigest, digest)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error().Err(err).Msg(\"metadb: error while deleting referrer\")\n\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif !manageRepoMetaSuccessfully {\n\t\tlog.Info().Str(\"tag\", reference).Str(\"repository\", repo).\n\t\t\tMsg(\"metadb: deleting image meta was unsuccessful for tag in repo\")\n\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) {\n\tdcontext.GetLogger(imh).Debug(\"PutImageManifest\")\n\tmanifests, err := imh.Repository.Manifests(imh)\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\n\tvar jsonBuf bytes.Buffer\n\tif err := copyFullPayload(imh, w, r, &jsonBuf, maxManifestBodySize, \"image manifest PUT\"); err != nil {\n\t\t// copyFullPayload reports the error if necessary\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err.Error()))\n\t\treturn\n\t}\n\n\tmediaType := r.Header.Get(\"Content-Type\")\n\tmanifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes())\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))\n\t\treturn\n\t}\n\n\tif imh.Digest != \"\" {\n\t\tif desc.Digest != imh.Digest {\n\t\t\tdcontext.GetLogger(imh).Errorf(\"payload digest does not match: %q != %q\", desc.Digest, imh.Digest)\n\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)\n\t\t\treturn\n\t\t}\n\t} else if imh.Tag != \"\" {\n\t\timh.Digest = desc.Digest\n\t} else {\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(\"no tag or digest specified\"))\n\t\treturn\n\t}\n\n\tisAnOCIManifest := mediaType == v1.MediaTypeImageManifest || mediaType == v1.MediaTypeImageIndex\n\n\tif isAnOCIManifest {\n\t\tdcontext.GetLogger(imh).Debug(\"Putting an OCI Manifest!\")\n\t} else {\n\t\tdcontext.GetLogger(imh).Debug(\"Putting a Docker Manifest!\")\n\t}\n\n\tvar options []distribution.ManifestServiceOption\n\tif imh.Tag != \"\" {\n\t\toptions = append(options, distribution.WithTag(imh.Tag))\n\t}\n\n\tif err := imh.applyResourcePolicy(manifest); err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\n\t_, err = manifests.Put(imh, manifest, options...)\n\tif err != nil {\n\t\t// TODO(stevvooe): These error handling switches really need to be\n\t\t// handled by an app global mapper.\n\t\tif err == distribution.ErrUnsupported {\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)\n\t\t\treturn\n\t\t}\n\t\tif err == distribution.ErrAccessDenied {\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeDenied)\n\t\t\treturn\n\t\t}\n\t\tswitch err := err.(type) {\n\t\tcase distribution.ErrManifestVerification:\n\t\t\tfor _, verificationError := range err {\n\t\t\t\tswitch verificationError := verificationError.(type) {\n\t\t\t\tcase distribution.ErrManifestBlobUnknown:\n\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest))\n\t\t\t\tcase distribution.ErrManifestNameInvalid:\n\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeNameInvalid.WithDetail(err))\n\t\t\t\tcase distribution.ErrManifestUnverified:\n\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified)\n\t\t\t\tdefault:\n\t\t\t\t\tif verificationError == digest.ErrDigestInvalidFormat {\n\t\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)\n\t\t\t\t\t} else {\n\t\t\t\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase errcode.Error:\n\t\t\timh.Errors = append(imh.Errors, err)\n\t\tdefault:\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t}\n\t\treturn\n\t}\n\n\t// Tag this manifest\n\tif imh.Tag != \"\" {\n\t\ttags := imh.Repository.Tags(imh)\n\t\terr = tags.Tag(imh, imh.Tag, desc)\n\t\tif err != nil {\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t\treturn\n\t\t}\n\n\t}\n\n\t// Construct a canonical url for the uploaded manifest.\n\tref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest)\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\treturn\n\t}\n\n\tlocation, err := imh.urlBuilder.BuildManifestURL(ref)\n\tif err != nil {\n\t\t// NOTE(stevvooe): Given the behavior above, this absurdly unlikely to\n\t\t// happen. We'll log the error here but proceed as if it worked. Worst\n\t\t// case, we set an empty location header.\n\t\tdcontext.GetLogger(imh).Errorf(\"error building manifest url from digest: %v\", err)\n\t}\n\n\tw.Header().Set(\"Location\", location)\n\tw.Header().Set(\"Docker-Content-Digest\", imh.Digest.String())\n\tw.WriteHeader(http.StatusCreated)\n\n\tdcontext.GetLogger(imh).Debug(\"Succeeded in putting manifest!\")\n}", "func (is *ImageStoreLocal) PutImageManifest(repo, reference, mediaType string, //nolint: gocyclo\n\tbody []byte,\n) (godigest.Digest, godigest.Digest, error) {\n\tif err := is.InitRepo(repo); err != nil {\n\t\tis.log.Debug().Err(err).Msg(\"init repo\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\tvar lockLatency time.Time\n\n\tvar err error\n\n\tis.Lock(&lockLatency)\n\tdefer func() {\n\t\tis.Unlock(&lockLatency)\n\n\t\tif err == nil {\n\t\t\tmonitoring.SetStorageUsage(is.metrics, is.rootDir, repo)\n\t\t\tmonitoring.IncUploadCounter(is.metrics, repo)\n\t\t}\n\t}()\n\n\trefIsDigest := true\n\n\tmDigest, err := common.GetAndValidateRequestDigest(body, reference, is.log)\n\tif err != nil {\n\t\tif errors.Is(err, zerr.ErrBadManifest) {\n\t\t\treturn mDigest, \"\", err\n\t\t}\n\n\t\trefIsDigest = false\n\t}\n\n\tdigest, err := common.ValidateManifest(is, repo, reference, mediaType, body, is.log)\n\tif err != nil {\n\t\treturn digest, \"\", err\n\t}\n\n\tindex, err := common.GetIndex(is, repo, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// create a new descriptor\n\tdesc := ispec.Descriptor{\n\t\tMediaType: mediaType, Size: int64(len(body)), Digest: mDigest,\n\t}\n\n\tif !refIsDigest {\n\t\tdesc.Annotations = map[string]string{ispec.AnnotationRefName: reference}\n\t}\n\n\tvar subjectDigest godigest.Digest\n\n\tartifactType := \"\"\n\n\tif mediaType == ispec.MediaTypeImageManifest {\n\t\tvar manifest ispec.Manifest\n\n\t\terr := json.Unmarshal(body, &manifest)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t\tif manifest.Subject != nil {\n\t\t\tsubjectDigest = manifest.Subject.Digest\n\t\t}\n\n\t\tartifactType = zcommon.GetManifestArtifactType(manifest)\n\t} else if mediaType == ispec.MediaTypeImageIndex {\n\t\tvar index ispec.Index\n\n\t\terr := json.Unmarshal(body, &index)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t\tif index.Subject != nil {\n\t\t\tsubjectDigest = index.Subject.Digest\n\t\t}\n\n\t\tartifactType = zcommon.GetIndexArtifactType(index)\n\t}\n\n\tupdateIndex, oldDgst, err := common.CheckIfIndexNeedsUpdate(&index, &desc, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif !updateIndex {\n\t\treturn desc.Digest, subjectDigest, nil\n\t}\n\n\t// write manifest to \"blobs\"\n\tdir := path.Join(is.rootDir, repo, \"blobs\", mDigest.Algorithm().String())\n\t_ = ensureDir(dir, is.log)\n\tfile := path.Join(dir, mDigest.Encoded())\n\n\t// in case the linter will not pass, it will be garbage collected\n\tif err := is.writeFile(file, body); err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", file).Msg(\"unable to write\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\terr = common.UpdateIndexWithPrunedImageManifests(is, &index, repo, desc, oldDgst, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// now update \"index.json\"\n\tindex.Manifests = append(index.Manifests, desc)\n\tdir = path.Join(is.rootDir, repo)\n\tfile = path.Join(dir, \"index.json\")\n\n\tbuf, err := json.Marshal(index)\n\tif err := inject.Error(err); err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", file).Msg(\"unable to marshal JSON\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\t// update the descriptors artifact type in order to check for signatures when applying the linter\n\tdesc.ArtifactType = artifactType\n\n\t// apply linter only on images, not signatures or indexes\n\tpass, err := common.ApplyLinter(is, is.linter, repo, desc)\n\tif !pass {\n\t\tis.log.Error().Err(err).Str(\"repository\", repo).Str(\"reference\", reference).Msg(\"linter didn't pass\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\terr = is.writeFile(file, buf)\n\tif err := inject.Error(err); err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", file).Msg(\"unable to write\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn desc.Digest, subjectDigest, nil\n}", "func PushManifest(img string, auth dockertypes.AuthConfig) (hash string, length int, err error) {\n\tsrcImages := []types.ManifestEntry{}\n\n\tfor i, platform := range platformsToSearchForIndex {\n\t\tosArchArr := strings.Split(platform, \"/\")\n\t\tif len(osArchArr) != 2 && len(osArchArr) != 3 {\n\t\t\treturn hash, length, fmt.Errorf(\"platform argument %d is not of form 'os/arch': '%s'\", i, platform)\n\t\t}\n\t\tvariant := \"\"\n\t\tos, arch := osArchArr[0], osArchArr[1]\n\t\tif len(osArchArr) == 3 {\n\t\t\tvariant = osArchArr[2]\n\t\t}\n\t\tsrcImages = append(srcImages, types.ManifestEntry{\n\t\t\tImage: fmt.Sprintf(\"%s-%s\", img, arch),\n\t\t\tPlatform: ocispec.Platform{\n\t\t\t\tOS: os,\n\t\t\t\tArchitecture: arch,\n\t\t\t\tVariant: variant,\n\t\t\t},\n\t\t})\n\t}\n\n\tyamlInput := types.YAMLInput{\n\t\tImage: img,\n\t\tManifests: srcImages,\n\t}\n\n\tlog.Debugf(\"pushing manifest list for %s -> %#v\", img, yamlInput)\n\n\t// push the manifest list with the auth as given, ignore missing, do not allow insecure\n\treturn registry.PushManifestList(auth.Username, auth.Password, yamlInput, true, false, false, types.OCI, \"\")\n}", "func (is *ObjectStorage) PutImageManifest(repo, reference, mediaType string, //nolint: gocyclo\n\tbody []byte,\n) (godigest.Digest, godigest.Digest, error) {\n\tif err := is.InitRepo(repo); err != nil {\n\t\tis.log.Debug().Err(err).Msg(\"init repo\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\tvar lockLatency time.Time\n\n\tvar err error\n\n\tis.Lock(&lockLatency)\n\tdefer func() {\n\t\tis.Unlock(&lockLatency)\n\n\t\tif err == nil {\n\t\t\tmonitoring.SetStorageUsage(is.metrics, is.rootDir, repo)\n\t\t\tmonitoring.IncUploadCounter(is.metrics, repo)\n\t\t}\n\t}()\n\n\trefIsDigest := true\n\n\tmDigest, err := common.GetAndValidateRequestDigest(body, reference, is.log)\n\tif err != nil {\n\t\tif errors.Is(err, zerr.ErrBadManifest) {\n\t\t\treturn mDigest, \"\", err\n\t\t}\n\n\t\trefIsDigest = false\n\t}\n\n\tdig, err := common.ValidateManifest(is, repo, reference, mediaType, body, is.log)\n\tif err != nil {\n\t\treturn dig, \"\", err\n\t}\n\n\tindex, err := common.GetIndex(is, repo, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// create a new descriptor\n\tdesc := ispec.Descriptor{\n\t\tMediaType: mediaType, Size: int64(len(body)), Digest: mDigest,\n\t}\n\n\tif !refIsDigest {\n\t\tdesc.Annotations = map[string]string{ispec.AnnotationRefName: reference}\n\t}\n\n\tvar subjectDigest godigest.Digest\n\n\tartifactType := \"\"\n\n\tif mediaType == ispec.MediaTypeImageManifest {\n\t\tvar manifest ispec.Manifest\n\n\t\terr := json.Unmarshal(body, &manifest)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t\tif manifest.Subject != nil {\n\t\t\tsubjectDigest = manifest.Subject.Digest\n\t\t}\n\n\t\tartifactType = zcommon.GetManifestArtifactType(manifest)\n\t} else if mediaType == ispec.MediaTypeImageIndex {\n\t\tvar index ispec.Index\n\n\t\terr := json.Unmarshal(body, &index)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t\tif index.Subject != nil {\n\t\t\tsubjectDigest = index.Subject.Digest\n\t\t}\n\n\t\tartifactType = zcommon.GetIndexArtifactType(index)\n\t}\n\n\tupdateIndex, oldDgst, err := common.CheckIfIndexNeedsUpdate(&index, &desc, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif !updateIndex {\n\t\treturn desc.Digest, subjectDigest, nil\n\t}\n\n\t// write manifest to \"blobs\"\n\tdir := path.Join(is.rootDir, repo, \"blobs\", mDigest.Algorithm().String())\n\tmanifestPath := path.Join(dir, mDigest.Encoded())\n\n\tif err = is.store.PutContent(context.Background(), manifestPath, body); err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", manifestPath).Msg(\"unable to write\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\terr = common.UpdateIndexWithPrunedImageManifests(is, &index, repo, desc, oldDgst, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// now update \"index.json\"\n\tindex.Manifests = append(index.Manifests, desc)\n\tdir = path.Join(is.rootDir, repo)\n\tindexPath := path.Join(dir, \"index.json\")\n\n\tbuf, err := json.Marshal(index)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", indexPath).Msg(\"unable to marshal JSON\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\t// update the descriptors artifact type in order to check for signatures when applying the linter\n\tdesc.ArtifactType = artifactType\n\n\t// apply linter only on images, not signatures\n\tpass, err := common.ApplyLinter(is, is.linter, repo, desc)\n\tif !pass {\n\t\tis.log.Error().Err(err).Str(\"repository\", repo).Str(\"reference\", reference).Msg(\"linter didn't pass\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\tif err = is.store.PutContent(context.Background(), indexPath, buf); err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", manifestPath).Msg(\"unable to write\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn desc.Digest, subjectDigest, nil\n}", "func UpdateManifest(m Manifests, root string, paths []string, id flux.ResourceID, f func(manifest []byte) ([]byte, error)) error {\n\tresources, err := m.LoadManifests(root, paths)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresource, ok := resources[id.String()]\n\tif !ok {\n\t\treturn ErrResourceNotFound(id.String())\n\t}\n\n\tpath := filepath.Join(root, resource.Source())\n\tdef, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewDef, err := f(def)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(path, newDef, fi.Mode())\n}", "func (s *Stargate) UpdateManifest() error {\n\tts := time.Now().Unix()\n\tkey := fmt.Sprintf(\"%s/%s\", service.Stargate, s.ID)\n\tmanifest := &consul.ServiceManifest{\n\t\tID: s.ID,\n\t\tType: service.Stargate,\n\t\tLastActive: ts,\n\t}\n\n\tif err := s.Consul.WriteStructToKey(key, manifest); err != nil {\n\t\treturn fmt.Errorf(\"error updating manifest: %v\", err)\n\t}\n\n\tfmt.Printf(\"Updated manifest %v\\n\", manifest)\n\treturn nil\n}", "func UpdateManifest(m Manifests, root string, serviceID flux.ResourceID, f func(manifest []byte) ([]byte, error)) error {\n\tservices, err := m.FindDefinedServices(root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpaths := services[serviceID]\n\tif len(paths) == 0 {\n\t\treturn ErrNoResourceFilesFoundForService\n\t}\n\tif len(paths) > 1 {\n\t\treturn ErrMultipleResourceFilesFoundForService\n\t}\n\n\tdef, err := ioutil.ReadFile(paths[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewDef, err := f(def)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfi, err := os.Stat(paths[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(paths[0], newDef, fi.Mode())\n}", "func (imh *manifestHandler) DeleteManifest(w http.ResponseWriter, r *http.Request) {\n\tdcontext.GetLogger(imh).Debug(\"DeleteImageManifest\")\n\n\tif imh.App.isCache {\n\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)\n\t\treturn\n\t}\n\n\tif imh.Tag != \"\" {\n\t\tdcontext.GetLogger(imh).Debug(\"DeleteImageTag\")\n\t\ttagService := imh.Repository.Tags(imh.Context)\n\t\tif err := tagService.Untag(imh.Context, imh.Tag); err != nil {\n\t\t\tswitch err.(type) {\n\t\t\tcase distribution.ErrTagUnknown, driver.PathNotFoundError:\n\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))\n\t\t\tdefault:\n\t\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\treturn\n\t}\n\n\tmanifests, err := imh.Repository.Manifests(imh)\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\n\terr = manifests.Delete(imh, imh.Digest)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase digest.ErrDigestUnsupported:\n\t\tcase digest.ErrDigestInvalidFormat:\n\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)\n\t\t\treturn\n\t\tcase distribution.ErrBlobUnknown:\n\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown)\n\t\t\treturn\n\t\tcase distribution.ErrUnsupported:\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)\n\t\t\treturn\n\t\tdefault:\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown)\n\t\t\treturn\n\t\t}\n\t}\n\n\ttagService := imh.Repository.Tags(imh)\n\treferencedTags, err := tagService.Lookup(imh, distribution.Descriptor{Digest: imh.Digest})\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\n\tfor _, tag := range referencedTags {\n\t\tif err := tagService.Untag(imh, tag); err != nil {\n\t\t\timh.Errors = append(imh.Errors, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.WriteHeader(http.StatusAccepted)\n}", "func (d *InboundCacheDriver) StoreManifest(location models.ImageReference, contents []byte, mediaType string, now time.Time) error {\n\td.Entries[location] = inboundCacheEntry{contents, mediaType, now}\n\treturn nil\n}", "func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) {\n\tif options.ManifestMIMEType == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tconverter, ok := converters[options.ManifestMIMEType]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Unsupported conversion type: %v\", options.ManifestMIMEType)\n\t}\n\n\toptionsCopy := options\n\tconvertedManifest, err := converter(ctx, &optionsCopy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconvertedImage := memoryImageFromManifest(convertedManifest)\n\n\toptionsCopy.ManifestMIMEType = \"\"\n\treturn convertedImage.UpdatedImage(ctx, optionsCopy)\n}", "func (b *Backend) ManifestAnnotate(ctx context.Context, req *pb.ManifestAnnotateRequest) (*gogotypes.Empty, error) {\n\tvar emptyResp = &gogotypes.Empty{}\n\n\tif !b.daemon.opts.Experimental {\n\t\treturn emptyResp, errors.New(\"please enable experimental to use manifest feature\")\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"ManifestList\": req.GetManifestList(),\n\t\t\"Manifest\": req.GetManifest(),\n\t}).Info(\"ManifestAnnotateRequest received\")\n\n\tmanifestName := req.GetManifestList()\n\tmanifestImage := req.GetManifest()\n\timageOS := req.GetOs()\n\timageArch := req.GetArch()\n\timageOSFeature := req.GetOsFeatures()\n\timageVariant := req.GetVariant()\n\n\t// get list image\n\t_, listImage, err := image.FindImage(b.daemon.localStore, manifestName)\n\tif err != nil {\n\t\treturn emptyResp, err\n\t}\n\n\t// load list from list image\n\t_, list, err := loadListFromImage(b.daemon.localStore, listImage.ID)\n\tif err != nil {\n\t\treturn emptyResp, err\n\t}\n\n\t// add image to list, if image already exists, it will be substituted\n\tinstanceDigest, err := list.addImage(ctx, b.daemon.localStore, manifestImage)\n\tif err != nil {\n\t\treturn emptyResp, err\n\t}\n\n\t// modify image platform if user specifies\n\tfor i := range list.docker.Manifests {\n\t\tif list.docker.Manifests[i].Digest == instanceDigest {\n\t\t\tif imageOS != \"\" {\n\t\t\t\tlist.docker.Manifests[i].Platform.OS = imageOS\n\t\t\t}\n\t\t\tif imageArch != \"\" {\n\t\t\t\tlist.docker.Manifests[i].Platform.Architecture = imageArch\n\t\t\t}\n\t\t\tif len(imageOSFeature) > 0 {\n\t\t\t\tlist.docker.Manifests[i].Platform.OSFeatures = append([]string{}, imageOSFeature...)\n\t\t\t}\n\t\t\tif imageVariant != \"\" {\n\t\t\t\tlist.docker.Manifests[i].Platform.Variant = imageVariant\n\t\t\t}\n\t\t}\n\t}\n\n\t// save list to image\n\t_, err = list.saveListToImage(b.daemon.localStore, listImage.ID, \"\", manifest.DockerV2ListMediaType)\n\n\treturn emptyResp, err\n}", "func (r *Registry) ImageManifest(image Image, token string) (*ImageManifest, error) {\n\turl := r.GetDigestUrl(image)\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Accept\", schema2.MediaTypeManifest)\n\tif token != \"\" {\n\t\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", token))\n\t}\n\n\tresp, err := r.Client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\trespBody, _ := GetRespBody(resp)\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusUnauthorized {\n\t\t\tklog.Error(statusUnauthorized)\n\t\t\treturn nil, restful.NewError(resp.StatusCode, statusUnauthorized)\n\t\t}\n\t\tklog.Errorf(\"got response: statusCode is '%d', body is '%s'\\n\", resp.StatusCode, respBody)\n\t\treturn nil, restful.NewError(resp.StatusCode, \"got image manifest failed\")\n\t}\n\n\timageManifest := &ImageManifest{}\n\terr = json.Unmarshal(respBody, imageManifest)\n\n\treturn imageManifest, err\n}", "func appendImageManifest(tarFile string, manifest []byte) error {\n\thash := sha256.Sum256(manifest)\n\treturn appendToTarFile(tarFile, fmt.Sprintf(\"%s-%x.json\", \"imagemanifest\", hash), manifest)\n}", "func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination string, opts entities.ImagePushOptions) (string, error) {\n\tif opts.Signers != nil {\n\t\treturn \"\", fmt.Errorf(\"forwarding Signers is not supported for remote clients\")\n\t}\n\n\toptions := new(images.PushOptions)\n\toptions.WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile).WithRemoveSignatures(opts.RemoveSignatures).WithAll(opts.All).WithFormat(opts.Format).WithCompressionFormat(opts.CompressionFormat).WithQuiet(opts.Quiet).WithProgressWriter(opts.Writer).WithAddCompression(opts.AddCompression).WithForceCompressionFormat(opts.ForceCompressionFormat)\n\n\tif s := opts.SkipTLSVerify; s != types.OptionalBoolUndefined {\n\t\tif s == types.OptionalBoolTrue {\n\t\t\toptions.WithSkipTLSVerify(true)\n\t\t} else {\n\t\t\toptions.WithSkipTLSVerify(false)\n\t\t}\n\t}\n\tdigest, err := manifests.Push(ir.ClientCtx, name, destination, options)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"pushing manifest list %s: %w\", name, err)\n\t}\n\n\tif opts.Rm {\n\t\tif _, rmErrors := ir.Remove(ctx, []string{name}, entities.ImageRemoveOptions{LookupManifest: true}); len(rmErrors) > 0 {\n\t\t\treturn \"\", fmt.Errorf(\"removing manifest after push: %w\", rmErrors[0])\n\t\t}\n\t}\n\n\treturn digest, err\n}", "func (m *manifestService) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {\n\tcontext.GetLogger(ctx).Debugf(\"(*manifestService).Put\")\n\n\tmh, err := NewManifestHandler(m.repo, manifest)\n\tif err != nil {\n\t\treturn \"\", regapi.ErrorCodeManifestInvalid.WithDetail(err)\n\t}\n\tmediaType, payload, _, err := mh.Payload()\n\tif err != nil {\n\t\treturn \"\", regapi.ErrorCodeManifestInvalid.WithDetail(err)\n\t}\n\n\t// this is fast to check, let's do it before verification\n\tif !m.acceptschema2 && mediaType == schema2.MediaTypeManifest {\n\t\treturn \"\", regapi.ErrorCodeManifestInvalid.WithDetail(fmt.Errorf(\"manifest V2 schema 2 not allowed\"))\n\t}\n\n\t// in order to stat the referenced blobs, repository need to be set on the context\n\tif err := mh.Verify(withRepository(ctx, m.repo), false); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t_, err = m.manifests.Put(withRepository(ctx, m.repo), manifest, options...)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tconfig, err := mh.Config(ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdgst, err := mh.Digest()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Upload to openshift\n\tism := imageapiv1.ImageStreamMapping{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: m.repo.namespace,\n\t\t\tName: m.repo.name,\n\t\t},\n\t\tImage: imageapiv1.Image{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: dgst.String(),\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\timageapi.ManagedByOpenShiftAnnotation: \"true\",\n\t\t\t\t\timageapi.ImageManifestBlobStoredAnnotation: \"true\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tDockerImageReference: fmt.Sprintf(\"%s/%s/%s@%s\", m.repo.config.registryAddr, m.repo.namespace, m.repo.name, dgst.String()),\n\t\t\tDockerImageManifest: string(payload),\n\t\t\tDockerImageManifestMediaType: mediaType,\n\t\t\tDockerImageConfig: string(config),\n\t\t},\n\t}\n\n\tfor _, option := range options {\n\t\tif opt, ok := option.(distribution.WithTagOption); ok {\n\t\t\tism.Tag = opt.Tag\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif _, err = m.repo.registryOSClient.ImageStreamMappings(m.repo.namespace).Create(&ism); err != nil {\n\t\t// if the error was that the image stream wasn't found, try to auto provision it\n\t\tstatusErr, ok := err.(*kerrors.StatusError)\n\t\tif !ok {\n\t\t\tcontext.GetLogger(ctx).Errorf(\"error creating ImageStreamMapping: %s\", err)\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif quotautil.IsErrorQuotaExceeded(statusErr) {\n\t\t\tcontext.GetLogger(ctx).Errorf(\"denied creating ImageStreamMapping: %v\", statusErr)\n\t\t\treturn \"\", distribution.ErrAccessDenied\n\t\t}\n\n\t\tstatus := statusErr.ErrStatus\n\t\tkind := strings.ToLower(status.Details.Kind)\n\t\tisValidKind := kind == \"imagestream\" /*pre-1.2*/ || kind == \"imagestreams\" /*1.2 to 1.6*/ || kind == \"imagestreammappings\" /*1.7+*/\n\t\tif !isValidKind || status.Code != http.StatusNotFound || status.Details.Name != m.repo.name {\n\t\t\tcontext.GetLogger(ctx).Errorf(\"error creating ImageStreamMapping: %s\", err)\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif _, err := m.repo.createImageStream(ctx); err != nil {\n\t\t\tif e, ok := err.(errcode.Error); ok && e.ErrorCode() == errcode.ErrorCodeUnknown {\n\t\t\t\t// TODO: convert statusErr to distribution error\n\t\t\t\treturn \"\", statusErr\n\t\t\t}\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t// try to create the ISM again\n\t\tif _, err := m.repo.registryOSClient.ImageStreamMappings(m.repo.namespace).Create(&ism); err != nil {\n\t\t\tif quotautil.IsErrorQuotaExceeded(err) {\n\t\t\t\tcontext.GetLogger(ctx).Errorf(\"denied a creation of ImageStreamMapping: %v\", err)\n\t\t\t\treturn \"\", distribution.ErrAccessDenied\n\t\t\t}\n\t\t\tcontext.GetLogger(ctx).Errorf(\"error creating ImageStreamMapping: %s\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn dgst, nil\n}", "func (s *storageImageDestination) PutManifest(ctx context.Context, manifest []byte) error {\n\ts.manifest = make([]byte, len(manifest))\n\tcopy(s.manifest, manifest)\n\treturn nil\n}", "func Update(cfg *Config) error {\n\tmetadir := filepath.Join(cfg.OutputDir, \"meta\")\n\tos.MkdirAll(metadir, os.ModePerm)\n\tmanifest, err := cfg.Manifest()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := writeABIRevision(cfg, manifest); err != nil {\n\t\treturn err\n\t}\n\n\tcontentsPath := filepath.Join(metadir, \"contents\")\n\tpkgContents := manifest.Content()\n\n\tif cfg.SubpackagesPath != \"\" {\n\t\tif err := writeSubpackagesMeta(cfg, cfg.SubpackagesPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// manifestLines is a channel containing unpacked manifest paths\n\tvar manifestLines = make(chan struct{ src, dest string }, len(pkgContents))\n\tgo func() {\n\t\tfor dest, src := range pkgContents {\n\t\t\tmanifestLines <- struct{ src, dest string }{src, dest}\n\t\t}\n\t\tclose(manifestLines)\n\t}()\n\n\t// contentCollector receives entries to include in contents\n\ttype contentEntry struct {\n\t\tpath string\n\t\troot MerkleRoot\n\t}\n\tvar contentCollector = make(chan contentEntry, len(pkgContents))\n\tvar errors = make(chan error)\n\n\t// w is a group that is done when contentCollector is fully populated\n\tvar w sync.WaitGroup\n\tfor i := runtime.NumCPU(); i > 0; i-- {\n\t\tw.Add(1)\n\n\t\tgo func() {\n\t\t\tdefer w.Done()\n\n\t\t\tfor in := range manifestLines {\n\t\t\t\tvar t merkle.Tree\n\t\t\t\tcf, err := os.Open(in.src)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrors <- fmt.Errorf(\"build.Update: open %s for %s: %s\", in.src, in.dest, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t_, err = t.ReadFrom(bufio.NewReader(cf))\n\t\t\t\tcf.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\terrors <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tvar root MerkleRoot\n\t\t\t\tcopy(root[:], t.Root())\n\t\t\t\tcontentCollector <- contentEntry{in.dest, root}\n\t\t\t}\n\t\t}()\n\t}\n\n\t// close the collector channel when all workers are done\n\tgo func() {\n\t\tw.Wait()\n\t\tclose(contentCollector)\n\t}()\n\n\t// collect all results and close done to signal the waiting select\n\tvar done = make(chan struct{})\n\tcontents := MetaContents{}\n\tgo func() {\n\t\tfor entry := range contentCollector {\n\t\t\tcontents[entry.path] = entry.root\n\t\t}\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\t// contents is populated\n\tcase err := <-errors:\n\t\t// exit on the first error\n\t\treturn err\n\t}\n\n\tmanifest.Paths[\"meta/contents\"] = contentsPath\n\n\treturn os.WriteFile(contentsPath,\n\t\t[]byte(contents.String()), os.ModePerm)\n}", "func (m *ImageManifest) UnmarshalJSON(data []byte) (err error) {\n\tmanifestMap := make(map[string]json.RawMessage)\n\tif err = json.Unmarshal(data, &manifestMap); err != nil {\n\t\terr = errors.WithStack(err)\n\t\treturn\n\t}\n\n\tfor k, v := range manifestMap {\n\t\tswitch k {\n\t\tcase \"mediaType\":\n\t\t\terr = json.Unmarshal(v, &m.MediaType)\n\t\tcase \"schemaVersion\":\n\t\t\terr = json.Unmarshal(v, &m.SchemaVersion)\n\t\tcase \"config\":\n\t\t\tm.Config, err = unmarshalConfig(v)\n\t\tcase \"layers\":\n\t\t\tm.Layers, err = unmarshalLayers(v)\n\t\tdefault:\n\t\t}\n\t\tif err != nil {\n\t\t\terr = errors.WithStack(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}", "func (mr *ManifestResource) Put(_ http.ResponseWriter, req *http.Request, _ httprouter.Params) restful.Exchanger {\n\treturn &PUTManifestHandler{\n\t\tState: mr.context.liveState(),\n\t\tLogSink: mr.context.LogSink,\n\t\tRequest: req,\n\t\tQueryValues: mr.ParseQuery(req),\n\t\tUser: mr.GetUser(req),\n\t\tStateWriter: sous.StateWriter(mr.context.StateManager),\n\t}\n}", "func (p *Processor) ValidateAndStoreManifest(account keppel.Account, repo keppel.Repository, m IncomingManifest, actx keppel.AuditContext) (*keppel.Manifest, error) {\n\t//check if the objects we want to create already exist in the database; this\n\t//check is not 100% reliable since it does not run in the same transaction as\n\t//the actual upsert, so results should be taken with a grain of salt; but the\n\t//result is accurate enough to avoid most duplicate audit events\n\tcontentsDigest := digest.Canonical.FromBytes(m.Contents)\n\tmanifestExistsAlready, err := p.db.SelectBool(checkManifestExistsQuery, repo.ID, contentsDigest.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogg.Debug(\"ValidateAndStoreManifest: in repo %d, manifest %s already exists = %t\", repo.ID, contentsDigest, manifestExistsAlready)\n\tvar tagExistsAlready bool\n\tif m.Reference.IsTag() {\n\t\ttagExistsAlready, err = p.db.SelectBool(checkTagExistsAtSameDigestQuery, repo.ID, m.Reference.Tag, contentsDigest.String())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlogg.Debug(\"ValidateAndStoreManifest: in repo %d, tag %s @%s already exists = %t\", repo.ID, m.Reference.Tag, contentsDigest, tagExistsAlready)\n\t}\n\n\t//the quota check can be skipped if we are sure that we won't need to insert\n\t//a new row into the manifests table\n\tif !manifestExistsAlready {\n\t\terr = p.checkQuotaForManifestPush(account)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tmanifest := &keppel.Manifest{\n\t\t//NOTE: .Digest and .SizeBytes are computed by validateAndStoreManifestCommon()\n\t\tRepositoryID: repo.ID,\n\t\tMediaType: m.MediaType,\n\t\tPushedAt: m.PushedAt,\n\t\tValidatedAt: m.PushedAt,\n\t}\n\tif m.Reference.IsDigest() {\n\t\t//allow validateAndStoreManifestCommon() to validate the user-supplied\n\t\t//digest against the actual manifest data\n\t\tmanifest.Digest = m.Reference.Digest\n\t}\n\terr = p.validateAndStoreManifestCommon(account, repo, manifest, m.Contents,\n\t\tfunc(tx *gorp.Transaction) error {\n\t\t\tif m.Reference.IsTag() {\n\t\t\t\terr = upsertTag(tx, keppel.Tag{\n\t\t\t\t\tRepositoryID: repo.ID,\n\t\t\t\t\tName: m.Reference.Tag,\n\t\t\t\t\tDigest: manifest.Digest,\n\t\t\t\t\tPushedAt: m.PushedAt,\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t//after making all DB changes, but before committing the DB transaction,\n\t\t\t//write the manifest into the backend\n\t\t\treturn p.sd.WriteManifest(account, repo.Name, manifest.Digest, m.Contents)\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//submit audit events, but only if we are reasonably sure that we actually\n\t//inserted a new manifest and/or changed a tag (without this restriction, we\n\t//would log an audit event everytime a manifest is validated or a tag is\n\t//synced; before the introduction of this check, we generated millions of\n\t//useless audit events per month)\n\tif userInfo := actx.UserIdentity.UserInfo(); userInfo != nil {\n\t\trecord := func(target audittools.TargetRenderer) {\n\t\t\tp.auditor.Record(audittools.EventParameters{\n\t\t\t\tTime: p.timeNow(),\n\t\t\t\tRequest: actx.Request,\n\t\t\t\tUser: userInfo,\n\t\t\t\tReasonCode: http.StatusOK,\n\t\t\t\tAction: cadf.CreateAction,\n\t\t\t\tTarget: target,\n\t\t\t})\n\t\t}\n\t\tif !manifestExistsAlready {\n\t\t\trecord(auditManifest{\n\t\t\t\tAccount: account,\n\t\t\t\tRepository: repo,\n\t\t\t\tDigest: manifest.Digest,\n\t\t\t})\n\t\t}\n\t\tif m.Reference.IsTag() && !tagExistsAlready {\n\t\t\trecord(auditTag{\n\t\t\t\tAccount: account,\n\t\t\t\tRepository: repo,\n\t\t\t\tDigest: manifest.Digest,\n\t\t\t\tTagName: m.Reference.Tag,\n\t\t\t})\n\t\t}\n\t}\n\treturn manifest, nil\n}", "func uploadManifest(registry, name, tag, layerDigest string) error {\n\tfmt.Printf(\"Upload manifest to %s (%s:%s): %s\\n\", registry, name, tag, layerDigest)\n\n\tmanifest, err := buildManifest(name, tag, layerDigest)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmanifestURL := fmt.Sprintf(\"%s/v2/%s/manifests/%s\", registry, name, tag)\n\treq, err := http.NewRequest(http.MethodPut, manifestURL, manifest)\n\treq.Header.Set(\"Content-Type\", \"application/vnd.docker.distribution.manifest.v1+json\")\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"Got response: %s\\n\", resp.Status)\n\tfor k, v := range resp.Header {\n\t\tfmt.Printf(\"%s: %s\\n\", k, v)\n\t}\n\tfmt.Printf(\"\\n\")\n\tio.Copy(os.Stdout, resp.Body)\n\tfmt.Printf(\"\\n\")\n\n\tif resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\treturn fmt.Errorf(\"Registry error: %s\", resp.Status)\n\t}\n\treturn nil\n}", "func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) {\n\tdcontext.GetLogger(imh).Debug(\"GetImageManifest\")\n\tmanifests, err := imh.Repository.Manifests(imh)\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\tvar supports [numStorageTypes]bool\n\n\t// this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about \"q=\" values\n\t// https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202\n\tfor _, acceptHeader := range r.Header[\"Accept\"] {\n\t\t// r.Header[...] is a slice in case the request contains the same header more than once\n\t\t// if the header isn't set, we'll get the zero value, which \"range\" will handle gracefully\n\n\t\t// we need to split each header value on \",\" to get the full list of \"Accept\" values (per RFC 2616)\n\t\t// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1\n\t\tfor _, mediaType := range strings.Split(acceptHeader, \",\") {\n\t\t\tif mediaType, _, err = mime.ParseMediaType(mediaType); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif mediaType == schema2.MediaTypeManifest {\n\t\t\t\tsupports[manifestSchema2] = true\n\t\t\t}\n\t\t\tif mediaType == manifestlist.MediaTypeManifestList {\n\t\t\t\tsupports[manifestlistSchema] = true\n\t\t\t}\n\t\t\tif mediaType == v1.MediaTypeImageManifest {\n\t\t\t\tsupports[ociSchema] = true\n\t\t\t}\n\t\t\tif mediaType == v1.MediaTypeImageIndex {\n\t\t\t\tsupports[ociImageIndexSchema] = true\n\t\t\t}\n\t\t}\n\t}\n\n\tif imh.Tag != \"\" {\n\t\ttags := imh.Repository.Tags(imh)\n\t\tdesc, err := tags.Get(imh, imh.Tag)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(distribution.ErrTagUnknown); ok {\n\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))\n\t\t\t} else {\n\t\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\timh.Digest = desc.Digest\n\t}\n\n\tif etagMatch(r, imh.Digest.String()) {\n\t\tw.WriteHeader(http.StatusNotModified)\n\t\treturn\n\t}\n\n\tvar options []distribution.ManifestServiceOption\n\tif imh.Tag != \"\" {\n\t\toptions = append(options, distribution.WithTag(imh.Tag))\n\t}\n\tmanifest, err := manifests.Get(imh, imh.Digest, options...)\n\tif err != nil {\n\t\tif _, ok := err.(distribution.ErrManifestUnknownRevision); ok {\n\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))\n\t\t} else {\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t}\n\t\treturn\n\t}\n\t// determine the type of the returned manifest\n\tmanifestType := manifestSchema1\n\tschema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest)\n\tmanifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList)\n\tif isSchema2 {\n\t\tmanifestType = manifestSchema2\n\t} else if _, isOCImanifest := manifest.(*ocischema.DeserializedManifest); isOCImanifest {\n\t\tmanifestType = ociSchema\n\t} else if isManifestList {\n\t\tif manifestList.MediaType == manifestlist.MediaTypeManifestList {\n\t\t\tmanifestType = manifestlistSchema\n\t\t} else if manifestList.MediaType == v1.MediaTypeImageIndex {\n\t\t\tmanifestType = ociImageIndexSchema\n\t\t}\n\t}\n\n\tif manifestType == ociSchema && !supports[ociSchema] {\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithMessage(\"OCI manifest found, but accept header does not support OCI manifests\"))\n\t\treturn\n\t}\n\tif manifestType == ociImageIndexSchema && !supports[ociImageIndexSchema] {\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithMessage(\"OCI index found, but accept header does not support OCI indexes\"))\n\t\treturn\n\t}\n\t// Only rewrite schema2 manifests when they are being fetched by tag.\n\t// If they are being fetched by digest, we can't return something not\n\t// matching the digest.\n\tif imh.Tag != \"\" && manifestType == manifestSchema2 && !supports[manifestSchema2] {\n\t\t// Rewrite manifest in schema1 format\n\t\tdcontext.GetLogger(imh).Infof(\"rewriting manifest %s in schema1 format to support old client\", imh.Digest.String())\n\n\t\tmanifest, err = imh.convertSchema2Manifest(schema2Manifest)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t} else if imh.Tag != \"\" && manifestType == manifestlistSchema && !supports[manifestlistSchema] {\n\t\t// Rewrite manifest in schema1 format\n\t\tdcontext.GetLogger(imh).Infof(\"rewriting manifest list %s in schema1 format to support old client\", imh.Digest.String())\n\n\t\t// Find the image manifest corresponding to the default\n\t\t// platform\n\t\tvar manifestDigest digest.Digest\n\t\tfor _, manifestDescriptor := range manifestList.Manifests {\n\t\t\tif manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS {\n\t\t\t\tmanifestDigest = manifestDescriptor.Digest\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif manifestDigest == \"\" {\n\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown)\n\t\t\treturn\n\t\t}\n\n\t\tmanifest, err = manifests.Get(imh, manifestDigest)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(distribution.ErrManifestUnknownRevision); ok {\n\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))\n\t\t\t} else {\n\t\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t// If necessary, convert the image manifest\n\t\tif schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supports[manifestSchema2] {\n\t\t\tmanifest, err = imh.convertSchema2Manifest(schema2Manifest)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\timh.Digest = manifestDigest\n\t\t}\n\t}\n\n\tct, p, err := manifest.Payload()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", ct)\n\tw.Header().Set(\"Content-Length\", fmt.Sprint(len(p)))\n\tw.Header().Set(\"Docker-Content-Digest\", imh.Digest.String())\n\tw.Header().Set(\"Etag\", fmt.Sprintf(`\"%s\"`, imh.Digest))\n\tw.Write(p)\n}", "func (m *EC2Manifest) Add(version string, image *EC2Image) {\n\tversions := make(sortVersions, 0, len(m.Versions)+1)\n\tfor _, v := range m.Versions {\n\t\tif v.version() == version {\n\t\t\timages := make([]*EC2Image, len(v.Images))\n\t\t\tadded := false\n\t\t\tfor n, i := range v.Images {\n\t\t\t\tif i.Region == image.Region {\n\t\t\t\t\t// replace existing image\n\t\t\t\t\timages[n] = image\n\t\t\t\t\tadded = true\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\timages[n] = i\n\t\t\t}\n\t\t\tif !added {\n\t\t\t\timages = append(images, image)\n\t\t\t}\n\t\t\tv.Images = images\n\t\t\treturn\n\t\t}\n\t\tversions = append(versions, v)\n\t}\n\tversions = append(versions, &EC2Version{\n\t\tVersion: version,\n\t\tImages: []*EC2Image{image},\n\t})\n\tsort.Sort(sort.Reverse(versions))\n\tm.Versions = make([]*EC2Version, 0, maxVersions)\n\tfor i := 0; i < len(versions) && i < maxVersions; i++ {\n\t\tm.Versions = append(m.Versions, versions[i].(*EC2Version))\n\t}\n}", "func (j *Janitor) ValidateNextManifest() (returnErr error) {\n\tdefer func() {\n\t\tif returnErr == nil {\n\t\t\tvalidateManifestSuccessCounter.Inc()\n\t\t} else if returnErr != sql.ErrNoRows {\n\t\t\tvalidateManifestFailedCounter.Inc()\n\t\t\treturnErr = fmt.Errorf(\"while validating a manifest: %s\", returnErr.Error())\n\t\t}\n\t}()\n\n\t//find manifest\n\tvar manifest keppel.Manifest\n\tmaxValidatedAt := j.timeNow().Add(-6 * time.Hour)\n\terr := j.db.SelectOne(&manifest, outdatedManifestSearchQuery, maxValidatedAt)\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\tlogg.Debug(\"no manifests to validate - slowing down...\")\n\t\t\treturn sql.ErrNoRows\n\t\t}\n\t\treturn err\n\t}\n\n\t//find corresponding account and repo\n\tvar repo keppel.Repository\n\terr = j.db.SelectOne(&repo, `SELECT * FROM repos WHERE id = $1`, manifest.RepositoryID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot find repo %d for manifest %s: %s\", manifest.RepositoryID, manifest.Digest, err.Error())\n\t}\n\taccount, err := keppel.FindAccount(j.db, repo.AccountName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot find account for manifest %s/%s: %s\", repo.FullName(), manifest.Digest, err.Error())\n\t}\n\n\t//perform validation\n\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\n\tdefer cancel()\n\terr = retry(ctx, defaultRetryOpts, func() error {\n\t\treturn j.processor().ValidateExistingManifest(*account, repo, &manifest, j.timeNow())\n\t})\n\tif err == nil {\n\t\t//update `validated_at` and reset error message\n\t\t_, err := j.db.Exec(`\n\t\t\tUPDATE manifests SET validated_at = $1, validation_error_message = ''\n\t\t\t WHERE repo_id = $2 AND digest = $3`,\n\t\t\tj.timeNow(), repo.ID, manifest.Digest,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t//attempt to log the error message, and also update the `validated_at`\n\t\t//timestamp to ensure that the ValidateNextManifest() loop does not get\n\t\t//stuck on this one\n\t\t_, updateErr := j.db.Exec(`\n\t\t\tUPDATE manifests SET validated_at = $1, validation_error_message = $2\n\t\t\t WHERE repo_id = $3 AND digest = $4`,\n\t\t\tj.timeNow(), err.Error(), repo.ID, manifest.Digest,\n\t\t)\n\t\tif updateErr != nil {\n\t\t\terr = fmt.Errorf(\"%s (additional error encountered while recording validation error: %s)\", err.Error(), updateErr.Error())\n\t\t}\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *tagStore) Manifest(ctx context.Context, t *models.Tag) (*models.Manifest, error) {\n\tdefer metrics.InstrumentQuery(\"tag_manifest\")()\n\tq := `SELECT\n\t\t\tm.id,\n\t\t\tm.top_level_namespace_id,\n\t\t\tm.repository_id,\n\t\t\tm.schema_version,\n\t\t\tmt.media_type,\n\t\t\tencode(m.digest, 'hex') as digest,\n\t\t\tm.payload,\n\t\t\tmtc.media_type as configuration_media_type,\n\t\t\tencode(m.configuration_blob_digest, 'hex') as configuration_blob_digest,\n\t\t\tm.configuration_payload,\n\t\t\tm.created_at\n\t\tFROM\n\t\t\tmanifests AS m\n\t\t\tJOIN media_types AS mt ON mt.id = m.media_type_id\n\t\t\tLEFT JOIN media_types AS mtc ON mtc.id = m.configuration_media_type_id\n\t\tWHERE\n\t\t\tm.top_level_namespace_id = $1\n\t\t\tAND m.repository_id = $2\n\t\t\tAND m.id = $3`\n\trow := s.db.QueryRowContext(ctx, q, t.NamespaceID, t.RepositoryID, t.ManifestID)\n\n\treturn scanFullManifest(row)\n}", "func ComputeManifest(refToImage map[name.Reference]v1.Image) (Manifest, error) {\n\timageToTags := dedupRefToImage(refToImage)\n\treturn calculateManifest(imageToTags)\n}", "func (cb *ManifestBuffer) appendManifest(m interface{}, sender sender.Sender) {\n\tif len(cb.bufferedManifests) >= cb.Cfg.MaxBufferedManifests {\n\t\tcb.flushManifest(sender)\n\t}\n\n\tcb.bufferedManifests = append(cb.bufferedManifests, m)\n}", "func generateManifest(gitRepo *gitRepo, imageName, imageTag string) (*registry.ManifestData, error) {\n\tbranches, err := gitRepo.branch()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar imageChecksums []string = make([]string, len(branches))\n\tfor _, br := range branches {\n\t\tchecksum := br.imageID()\n\t\tsumTypeBytes, err := gitRepo.branchDescription(br)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\timageChecksums[br.number()] = string(sumTypeBytes) + \":\" + checksum\n\t}\n\n\tmanifest := &registry.ManifestData{\n\t\tName: imageName,\n\t\tArchitecture: \"amd64\", //unclean but so far looks ok ...\n\t\tTag: imageTag,\n\t\tSchemaVersion: 1,\n\t\tFSLayers: make([]*registry.FSLayer, 0, 4),\n\t}\n\n\tfor i, checksum := range imageChecksums {\n\t\tif tarsum.VersionLabelForChecksum(checksum) != tarsum.Version1.String() {\n\t\t\t//need to calculate the tarsum V1 for each layer ...\n\t\t\tlayerData, err := gitRepo.exportChangeSet(branches[i])\n\t\t\tif err == ErrNoChange {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdefer layerData.Close()\n\n\t\t\ttarSum, err := tarsum.NewTarSum(layerData, true, tarsum.Version1)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif _, err := io.Copy(ioutil.Discard, tarSum); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tchecksum = tarSum.Sum(nil)\n\t\t}\n\t\tmanifest.FSLayers = append(manifest.FSLayers, &registry.FSLayer{BlobSum: checksum})\n\t}\n\treturn manifest, nil\n}", "func UploadManifest(ctx context.Context, repo distribution.Repository, tag string, manifest distribution.Manifest) error {\n\tcanonical, err := CanonicalManifest(manifest)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tms, err := repo.Manifests(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get manifest service for %s: %w\", repo.Named(), err)\n\t}\n\n\tdgst, err := ms.Put(ctx, manifest, distribution.WithTag(tag))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to upload manifest to %s: %w\", repo.Named(), err)\n\t}\n\n\tif expectedDgst := digest.FromBytes(canonical); dgst != expectedDgst {\n\t\treturn fmt.Errorf(\"upload manifest to %s failed: digest mismatch: got %s, want %s\", repo.Named(), dgst, expectedDgst)\n\t}\n\n\treturn nil\n}", "func (is *ImageStoreLocal) DeleteImageManifest(repo, reference string, detectCollision bool) error {\n\tdir := path.Join(is.rootDir, repo)\n\tif !is.DirExists(dir) {\n\t\treturn zerr.ErrRepoNotFound\n\t}\n\n\tvar lockLatency time.Time\n\n\tvar err error\n\n\tis.Lock(&lockLatency)\n\tdefer func() {\n\t\tis.Unlock(&lockLatency)\n\n\t\tif err == nil {\n\t\t\tmonitoring.SetStorageUsage(is.metrics, is.rootDir, repo)\n\t\t}\n\t}()\n\n\tindex, err := common.GetIndex(is, repo, is.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanifestDesc, err := common.RemoveManifestDescByReference(&index, reference, detectCollision)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = common.UpdateIndexWithPrunedImageManifests(is, &index, repo, manifestDesc, manifestDesc.Digest, is.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// now update \"index.json\"\n\tdir = path.Join(is.rootDir, repo)\n\tfile := path.Join(dir, \"index.json\")\n\n\tbuf, err := json.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := is.writeFile(file, buf); err != nil {\n\t\treturn err\n\t}\n\n\t// Delete blob only when blob digest not present in manifest entry.\n\t// e.g. 1.0.1 & 1.0.2 have same blob digest so if we delete 1.0.1, blob should not be removed.\n\ttoDelete := true\n\n\tfor _, manifest := range index.Manifests {\n\t\tif manifestDesc.Digest.String() == manifest.Digest.String() {\n\t\t\ttoDelete = false\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif toDelete {\n\t\tp := path.Join(dir, \"blobs\", manifestDesc.Digest.Algorithm().String(), manifestDesc.Digest.Encoded())\n\n\t\t_ = os.Remove(p)\n\t}\n\n\treturn nil\n}", "func UpdateApplication(updateConf *UpdateConfiguration, state *SyncIterationState) ImageUpdaterResult {\n\tvar needUpdate bool = false\n\n\tresult := ImageUpdaterResult{}\n\tapp := updateConf.UpdateApp.Application.GetName()\n\tchangeList := make([]ChangeEntry, 0)\n\n\t// Get all images that are deployed with the current application\n\tapplicationImages := GetImagesFromApplication(&updateConf.UpdateApp.Application)\n\n\tresult.NumApplicationsProcessed += 1\n\n\t// Loop through all images of current application, and check whether one of\n\t// its images is eligible for updating.\n\t//\n\t// Whether an image qualifies for update is dependent on semantic version\n\t// constraints which are part of the application's annotation values.\n\t//\n\tfor _, applicationImage := range updateConf.UpdateApp.Images {\n\t\tupdateableImage := applicationImages.ContainsImage(applicationImage, false)\n\t\tif updateableImage == nil {\n\t\t\tlog.WithContext().AddField(\"application\", app).Debugf(\"Image '%s' seems not to be live in this application, skipping\", applicationImage.ImageName)\n\t\t\tresult.NumSkipped += 1\n\t\t\tcontinue\n\t\t}\n\n\t\t// In some cases, the running image has no tag set. We create a dummy\n\t\t// tag, without name, digest and a timestamp of zero. This dummy tag\n\t\t// will trigger an update on the first run.\n\t\tif updateableImage.ImageTag == nil {\n\t\t\tupdateableImage.ImageTag = tag.NewImageTag(\"\", time.Unix(0, 0), \"\")\n\t\t}\n\n\t\tresult.NumImagesConsidered += 1\n\n\t\timgCtx := log.WithContext().\n\t\t\tAddField(\"application\", app).\n\t\t\tAddField(\"registry\", updateableImage.RegistryURL).\n\t\t\tAddField(\"image_name\", updateableImage.ImageName).\n\t\t\tAddField(\"image_tag\", updateableImage.ImageTag).\n\t\t\tAddField(\"alias\", applicationImage.ImageAlias)\n\n\t\tif updateableImage.KustomizeImage != nil {\n\t\t\timgCtx.AddField(\"kustomize_image\", updateableImage.KustomizeImage)\n\t\t}\n\n\t\timgCtx.Debugf(\"Considering this image for update\")\n\n\t\trep, err := registry.GetRegistryEndpoint(applicationImage.RegistryURL)\n\t\tif err != nil {\n\t\t\timgCtx.Errorf(\"Could not get registry endpoint from configuration: %v\", err)\n\t\t\tresult.NumErrors += 1\n\t\t\tcontinue\n\t\t}\n\n\t\tvar vc image.VersionConstraint\n\t\tif applicationImage.ImageTag != nil {\n\t\t\tvc.Constraint = applicationImage.ImageTag.TagName\n\t\t\timgCtx.Debugf(\"Using version constraint '%s' when looking for a new tag\", vc.Constraint)\n\t\t} else {\n\t\t\timgCtx.Debugf(\"Using no version constraint when looking for a new tag\")\n\t\t}\n\n\t\tvc.SortMode = applicationImage.GetParameterUpdateStrategy(updateConf.UpdateApp.Application.Annotations)\n\t\tvc.MatchFunc, vc.MatchArgs = applicationImage.GetParameterMatch(updateConf.UpdateApp.Application.Annotations)\n\t\tvc.IgnoreList = applicationImage.GetParameterIgnoreTags(updateConf.UpdateApp.Application.Annotations)\n\n\t\t// The endpoint can provide default credentials for pulling images\n\t\terr = rep.SetEndpointCredentials(updateConf.KubeClient)\n\t\tif err != nil {\n\t\t\timgCtx.Errorf(\"Could not set registry endpoint credentials: %v\", err)\n\t\t\tresult.NumErrors += 1\n\t\t\tcontinue\n\t\t}\n\n\t\timgCredSrc := applicationImage.GetParameterPullSecret(updateConf.UpdateApp.Application.Annotations)\n\t\tvar creds *image.Credential = &image.Credential{}\n\t\tif imgCredSrc != nil {\n\t\t\tcreds, err = imgCredSrc.FetchCredentials(rep.RegistryAPI, updateConf.KubeClient)\n\t\t\tif err != nil {\n\t\t\t\timgCtx.Warnf(\"Could not fetch credentials: %v\", err)\n\t\t\t\tresult.NumErrors += 1\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tregClient, err := updateConf.NewRegFN(rep, creds.Username, creds.Password)\n\t\tif err != nil {\n\t\t\timgCtx.Errorf(\"Could not create registry client: %v\", err)\n\t\t\tresult.NumErrors += 1\n\t\t\tcontinue\n\t\t}\n\n\t\t// Get list of available image tags from the repository\n\t\ttags, err := rep.GetTags(applicationImage, regClient, &vc)\n\t\tif err != nil {\n\t\t\timgCtx.Errorf(\"Could not get tags from registry: %v\", err)\n\t\t\tresult.NumErrors += 1\n\t\t\tcontinue\n\t\t}\n\n\t\timgCtx.Tracef(\"List of available tags found: %v\", tags.Tags())\n\n\t\t// Get the latest available tag matching any constraint that might be set\n\t\t// for allowed updates.\n\t\tlatest, err := updateableImage.GetNewestVersionFromTags(&vc, tags)\n\t\tif err != nil {\n\t\t\timgCtx.Errorf(\"Unable to find newest version from available tags: %v\", err)\n\t\t\tresult.NumErrors += 1\n\t\t\tcontinue\n\t\t}\n\n\t\t// If we have no latest tag information, it means there was no tag which\n\t\t// has met our version constraint (or there was no semantic versioned tag\n\t\t// at all in the repository)\n\t\tif latest == nil {\n\t\t\timgCtx.Debugf(\"No suitable image tag for upgrade found in list of available tags.\")\n\t\t\tresult.NumSkipped += 1\n\t\t\tcontinue\n\t\t}\n\n\t\t// If the user has specified digest as update strategy, but the running\n\t\t// image is configured to use a tag and no digest, we need to set an\n\t\t// initial dummy digest, so that tag.Equals() will return false.\n\t\t// TODO: Fix this. This is just a workaround.\n\t\tif vc.SortMode == image.VersionSortDigest {\n\t\t\tif !updateableImage.ImageTag.IsDigest() {\n\t\t\t\tlog.Tracef(\"Setting dummy digest for image %s\", updateableImage.GetFullNameWithTag())\n\t\t\t\tupdateableImage.ImageTag.TagDigest = \"dummy\"\n\t\t\t}\n\t\t}\n\n\t\tif needsUpdate(updateableImage, applicationImage, latest) {\n\n\t\t\timgCtx.Infof(\"Setting new image to %s\", applicationImage.WithTag(latest).GetFullNameWithTag())\n\t\t\tneedUpdate = true\n\n\t\t\terr = setAppImage(&updateConf.UpdateApp.Application, applicationImage.WithTag(latest))\n\n\t\t\tif err != nil {\n\t\t\t\timgCtx.Errorf(\"Error while trying to update image: %v\", err)\n\t\t\t\tresult.NumErrors += 1\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tcontainerImageNew := applicationImage.WithTag(latest)\n\t\t\t\timgCtx.Infof(\"Successfully updated image '%s' to '%s', but pending spec update (dry run=%v)\", updateableImage.GetFullNameWithTag(), containerImageNew.GetFullNameWithTag(), updateConf.DryRun)\n\t\t\t\tchangeList = append(changeList, ChangeEntry{containerImageNew, updateableImage.ImageTag, containerImageNew.ImageTag})\n\t\t\t}\n\t\t} else {\n\t\t\t// We need to explicitly set the up-to-date images in the spec too, so\n\t\t\t// that we correctly marshal out the parameter overrides to include all\n\t\t\t// images, regardless of those were updated or not.\n\t\t\terr = setAppImage(&updateConf.UpdateApp.Application, applicationImage.WithTag(updateableImage.ImageTag))\n\t\t\tif err != nil {\n\t\t\t\timgCtx.Errorf(\"Error while trying to update image: %v\", err)\n\t\t\t\tresult.NumErrors += 1\n\t\t\t}\n\t\t\timgCtx.Debugf(\"Image '%s' already on latest allowed version\", updateableImage.GetFullNameWithTag())\n\t\t}\n\t}\n\n\twbc, err := getWriteBackConfig(&updateConf.UpdateApp.Application, updateConf.KubeClient, updateConf.ArgoClient)\n\tif err != nil {\n\t\treturn result\n\t}\n\n\tif wbc.Method == WriteBackGit {\n\t\tif updateConf.GitCommitUser != \"\" {\n\t\t\twbc.GitCommitUser = updateConf.GitCommitUser\n\t\t}\n\t\tif updateConf.GitCommitEmail != \"\" {\n\t\t\twbc.GitCommitEmail = updateConf.GitCommitEmail\n\t\t}\n\t\tif len(changeList) > 0 && updateConf.GitCommitMessage != nil {\n\t\t\twbc.GitCommitMessage = TemplateCommitMessage(updateConf.GitCommitMessage, updateConf.UpdateApp.Application.Name, changeList)\n\t\t}\n\t}\n\n\tif needUpdate {\n\t\tlogCtx := log.WithContext().AddField(\"application\", app)\n\t\tlog.Debugf(\"Using commit message: %s\", wbc.GitCommitMessage)\n\t\tif !updateConf.DryRun {\n\t\t\tlogCtx.Infof(\"Committing %d parameter update(s) for application %s\", result.NumImagesUpdated, app)\n\t\t\terr := commitChangesLocked(&updateConf.UpdateApp.Application, wbc, state)\n\t\t\tif err != nil {\n\t\t\t\tlogCtx.Errorf(\"Could not update application spec: %v\", err)\n\t\t\t\tresult.NumErrors += 1\n\t\t\t\tresult.NumImagesUpdated = 0\n\t\t\t} else {\n\t\t\t\tlogCtx.Infof(\"Successfully updated the live application spec\")\n\t\t\t\tresult.NumImagesUpdated += 1\n\t\t\t\tif !updateConf.DisableKubeEvents && updateConf.KubeClient != nil {\n\t\t\t\t\tannotations := map[string]string{}\n\t\t\t\t\tfor i, c := range changeList {\n\t\t\t\t\t\tannotations[fmt.Sprintf(\"argocd-image-updater.image-%d/full-image-name\", i)] = c.Image.GetFullNameWithoutTag()\n\t\t\t\t\t\tannotations[fmt.Sprintf(\"argocd-image-updater.image-%d/image-name\", i)] = c.Image.ImageName\n\t\t\t\t\t\tannotations[fmt.Sprintf(\"argocd-image-updater.image-%d/old-tag\", i)] = c.OldTag.String()\n\t\t\t\t\t\tannotations[fmt.Sprintf(\"argocd-image-updater.image-%d/new-tag\", i)] = c.NewTag.String()\n\t\t\t\t\t}\n\t\t\t\t\tmessage := fmt.Sprintf(\"Successfully updated application '%s'\", app)\n\t\t\t\t\t_, err = updateConf.KubeClient.CreateApplicationEvent(&updateConf.UpdateApp.Application, \"ImagesUpdated\", message, annotations)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogCtx.Warnf(\"Event could not be sent: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlogCtx.Infof(\"Dry run - not commiting %d changes to application\", result.NumImagesUpdated)\n\t\t}\n\t}\n\n\treturn result\n}", "func (h *proxyHandler) cacheTargetManifest(img *openImage) error {\n\tctx := context.Background()\n\tif img.cachedimg != nil {\n\t\treturn nil\n\t}\n\tunparsedToplevel := image.UnparsedInstance(img.src, nil)\n\tmfest, manifestType, err := unparsedToplevel.Manifest(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar target *image.UnparsedImage\n\tif manifest.MIMETypeIsMultiImage(manifestType) {\n\t\tmanifestList, err := manifest.ListFromBlob(mfest, manifestType)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinstanceDigest, err := manifestList.ChooseInstance(h.sysctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttarget = image.UnparsedInstance(img.src, &instanceDigest)\n\t} else {\n\t\ttarget = unparsedToplevel\n\t}\n\tcachedimg, err := image.FromUnparsedImage(ctx, h.sysctx, target)\n\tif err != nil {\n\t\treturn err\n\t}\n\timg.cachedimg = cachedimg\n\treturn nil\n}", "func (m *Monocular) UpdateMetadata(info *interfaces.Info, userGUID string, echoContext echo.Context) {\n}", "func (ir *ImageEngine) ManifestAdd(_ context.Context, name string, imageNames []string, opts entities.ManifestAddOptions) (string, error) {\n\toptions := new(manifests.AddOptions).WithAll(opts.All).WithArch(opts.Arch).WithVariant(opts.Variant)\n\toptions.WithFeatures(opts.Features).WithImages(imageNames).WithOS(opts.OS).WithOSVersion(opts.OSVersion)\n\toptions.WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile)\n\n\tif len(opts.Annotation) != 0 {\n\t\tannotations := make(map[string]string)\n\t\tfor _, annotationSpec := range opts.Annotation {\n\t\t\tspec := strings.SplitN(annotationSpec, \"=\", 2)\n\t\t\tif len(spec) != 2 {\n\t\t\t\treturn \"\", fmt.Errorf(\"no value given for annotation %q\", spec[0])\n\t\t\t}\n\t\t\tannotations[spec[0]] = spec[1]\n\t\t}\n\t\topts.Annotations = envLib.Join(opts.Annotations, annotations)\n\t}\n\toptions.WithAnnotation(opts.Annotations)\n\n\tif s := opts.SkipTLSVerify; s != types.OptionalBoolUndefined {\n\t\tif s == types.OptionalBoolTrue {\n\t\t\toptions.WithSkipTLSVerify(true)\n\t\t} else {\n\t\t\toptions.WithSkipTLSVerify(false)\n\t\t}\n\t}\n\n\tid, err := manifests.Add(ir.ClientCtx, name, options)\n\tif err != nil {\n\t\treturn id, fmt.Errorf(\"adding to manifest list %s: %w\", name, err)\n\t}\n\treturn id, nil\n}", "func ImageManifestString(im *schema.ImageManifest) (string, error) {\n\t// When nil have been passed as an argument, it will create an\n\t// empty manifest and define the minimal attributes.\n\tif im == nil {\n\t\tim = new(schema.ImageManifest)\n\t}\n\n\t// Set the default kind of the AC image manifest.\n\tif im.ACKind == \"\" {\n\t\tim.ACKind = schema.ImageManifestKind\n\t}\n\n\t// Set the default version of the AC image manifest.\n\tif im.ACVersion == zeroVersion {\n\t\tim.ACVersion = schema.AppContainerVersion\n\t}\n\n\t// Set the default name of the AC image manifest.\n\tif im.Name == \"\" {\n\t\tim.Name = defaultName\n\t}\n\n\tb, err := json.Marshal(toImageManifest(im))\n\treturn string(b), err\n}", "func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Descriptor) (ocispec.Descriptor, error) {\n\tif !(desc.MediaType == images.MediaTypeDockerSchema2Manifest ||\n\t\tdesc.MediaType == ocispec.MediaTypeImageManifest) {\n\n\t\tlog.G(ctx).Warnf(\"do nothing for media type: %s\", desc.MediaType)\n\t\treturn desc, nil\n\t}\n\n\t// read manifest data\n\tmb, err := content.ReadBlob(ctx, store, desc)\n\tif err != nil {\n\t\treturn ocispec.Descriptor{}, fmt.Errorf(\"failed to read index data: %w\", err)\n\t}\n\n\tvar manifest ocispec.Manifest\n\tif err := json.Unmarshal(mb, &manifest); err != nil {\n\t\treturn ocispec.Descriptor{}, fmt.Errorf(\"failed to unmarshal data into manifest: %w\", err)\n\t}\n\n\t// check config media type\n\tif manifest.Config.MediaType != LegacyConfigMediaType {\n\t\treturn desc, nil\n\t}\n\n\tmanifest.Config.MediaType = images.MediaTypeDockerSchema2Config\n\tdata, err := json.MarshalIndent(manifest, \"\", \" \")\n\tif err != nil {\n\t\treturn ocispec.Descriptor{}, fmt.Errorf(\"failed to marshal manifest: %w\", err)\n\t}\n\n\t// update manifest with gc labels\n\tdesc.Digest = digest.Canonical.FromBytes(data)\n\tdesc.Size = int64(len(data))\n\n\tlabels := map[string]string{}\n\tfor i, c := range append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...) {\n\t\tlabels[fmt.Sprintf(\"containerd.io/gc.ref.content.%d\", i)] = c.Digest.String()\n\t}\n\n\tref := remotes.MakeRefKey(ctx, desc)\n\tif err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil {\n\t\treturn ocispec.Descriptor{}, fmt.Errorf(\"failed to update content: %w\", err)\n\t}\n\treturn desc, nil\n}", "func (pmc *proxyMetricsCollector) ManifestPush(bytesPushed uint64) {\n\tatomic.AddUint64(&pmc.manifestMetrics.Requests, 1)\n\tatomic.AddUint64(&pmc.manifestMetrics.Hits, 1)\n\tatomic.AddUint64(&pmc.manifestMetrics.BytesPushed, bytesPushed)\n}", "func (restorer *APTRestorer) writeManifest(manifestType, algorithm string, restoreState *models.RestoreState) {\n\tif algorithm != constants.AlgMd5 && algorithm != constants.AlgSha256 {\n\t\trestorer.Context.MessageLog.Fatalf(\"writeManifest: Unsupported algorithm: %s\", algorithm)\n\t}\n\tmanifestPath := restorer.getManifestPath(manifestType, algorithm, restoreState)\n\tmanifestFile, err := os.Create(manifestPath)\n\tif err != nil {\n\t\trestoreState.PackageSummary.AddError(\"Cannot create manifest file %s: %v\",\n\t\t\tmanifestPath, err)\n\t\treturn\n\t}\n\tdefer manifestFile.Close()\n\tfor _, gf := range restoreState.IntellectualObject.GenericFiles {\n\t\tif !restorer.fileBelongsInManifest(gf, manifestType) {\n\t\t\trestorer.Context.MessageLog.Info(\"Skipping file '%s' for manifest type %s (%s)\",\n\t\t\t\tgf.Identifier, manifestType, algorithm)\n\t\t\tcontinue\n\t\t} else {\n\t\t\trestorer.Context.MessageLog.Info(\"Adding '%s' to %s\", gf.Identifier, manifestFile.Name())\n\t\t}\n\t\tchecksum := gf.GetChecksumByAlgorithm(algorithm)\n\t\tif checksum == nil {\n\t\t\trestoreState.PackageSummary.AddError(\"Cannot find %s checksum for file %s\",\n\t\t\t\talgorithm, gf.OriginalPath())\n\t\t\treturn\n\t\t}\n\t\t_, err := fmt.Fprintln(manifestFile, checksum.Digest, gf.OriginalPath())\n\t\tif err != nil {\n\t\t\trestoreState.PackageSummary.AddError(\"Error writing checksum for file %s \"+\n\t\t\t\t\"to manifest %s: %v\", gf.OriginalPath(), manifestPath, err)\n\t\t\treturn\n\t\t} else {\n\t\t\trestorer.Context.MessageLog.Info(\"Wrote %s digest %s for file %s\", algorithm,\n\t\t\t\tchecksum.Digest, gf.Identifier)\n\t\t}\n\t}\n}", "func (h *proxyHandler) GetManifest(args []any) (replyBuf, error) {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\n\tvar ret replyBuf\n\n\tif h.sysctx == nil {\n\t\treturn ret, fmt.Errorf(\"client error: must invoke Initialize\")\n\t}\n\tif len(args) != 1 {\n\t\treturn ret, fmt.Errorf(\"invalid request, expecting one argument\")\n\t}\n\timgref, err := h.parseImageFromID(args[0])\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = h.cacheTargetManifest(imgref)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\timg := imgref.cachedimg\n\n\tctx := context.Background()\n\trawManifest, manifestType, err := img.Manifest(ctx)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t// We only support OCI and docker2schema2. We know docker2schema2 can be easily+cheaply\n\t// converted into OCI, so consumers only need to see OCI.\n\tswitch manifestType {\n\tcase imgspecv1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType:\n\t\tbreak\n\t// Explicitly reject e.g. docker schema 1 type with a \"legacy\" note\n\tcase manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:\n\t\treturn ret, fmt.Errorf(\"unsupported legacy manifest MIME type: %s\", manifestType)\n\tdefault:\n\t\treturn ret, fmt.Errorf(\"unsupported manifest MIME type: %s\", manifestType)\n\t}\n\n\t// We always return the original digest, as that's what clients need to do pull-by-digest\n\t// and in general identify the image.\n\tdigest, err := manifest.Digest(rawManifest)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tvar serialized []byte\n\t// But, we convert to OCI format on the wire if it's not already. The idea here is that by reusing the containers/image\n\t// stack, clients to this proxy can pretend the world is OCI only, and not need to care about e.g.\n\t// docker schema and MIME types.\n\tif manifestType != imgspecv1.MediaTypeImageManifest {\n\t\tmanifestUpdates := types.ManifestUpdateOptions{ManifestMIMEType: imgspecv1.MediaTypeImageManifest}\n\t\tociImage, err := img.UpdatedImage(ctx, manifestUpdates)\n\t\tif err != nil {\n\t\t\treturn ret, err\n\t\t}\n\n\t\tociSerialized, _, err := ociImage.Manifest(ctx)\n\t\tif err != nil {\n\t\t\treturn ret, err\n\t\t}\n\t\tserialized = ociSerialized\n\t} else {\n\t\tserialized = rawManifest\n\t}\n\treturn h.returnBytes(digest, serialized)\n}", "func (d *InboundCacheDriver) LoadManifest(location models.ImageReference, now time.Time) (contents []byte, mediaType string, err error) {\n\tmaxInsertedAt := now.Add(-d.MaxAge)\n\tentry, ok := d.Entries[location]\n\tif ok && entry.InsertedAt.After(maxInsertedAt) {\n\t\treturn entry.Contents, entry.MediaType, nil\n\t}\n\treturn nil, \"\", sql.ErrNoRows\n}", "func UpdateFileMeta(fmeta FileMeta) {\n\tfileMetas[fmeta.FileSha1] = fmeta\n}", "func (b *Backend) ManifestInspect(ctx context.Context, req *pb.ManifestInspectRequest) (*pb.ManifestInspectResponse, error) {\n\tif !b.daemon.opts.Experimental {\n\t\treturn &pb.ManifestInspectResponse{}, errors.New(\"please enable experimental to use manifest feature\")\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"ManifestList\": req.GetManifestList(),\n\t}).Info(\"ManifestInspectRequest received\")\n\n\tmanifestName := req.GetManifestList()\n\n\t// get list image\n\tref, _, err := image.FindImage(b.daemon.localStore, manifestName)\n\tif err != nil {\n\t\treturn &pb.ManifestInspectResponse{}, err\n\t}\n\n\t// get image reference\n\tsrc, err := ref.NewImageSource(ctx, image.GetSystemContext())\n\tif err != nil {\n\t\treturn &pb.ManifestInspectResponse{}, err\n\t}\n\n\tdefer func() {\n\t\tif cErr := src.Close(); cErr != nil {\n\t\t\tlogrus.Warnf(\"Image source closing error: %v\", cErr)\n\t\t}\n\t}()\n\n\t// get image manifest\n\tmanifestBytes, manifestType, err := src.GetManifest(ctx, nil)\n\tif err != nil {\n\t\treturn &pb.ManifestInspectResponse{}, err\n\t}\n\n\t// check whether image is a list image\n\tif !manifest.MIMETypeIsMultiImage(manifestType) {\n\t\treturn &pb.ManifestInspectResponse{}, errors.Errorf(\"%v is not a manifest list\", manifestName)\n\t}\n\n\t// return list image data\n\treturn &pb.ManifestInspectResponse{\n\t\tData: manifestBytes,\n\t}, nil\n}", "func (is *ObjectStorage) DeleteImageManifest(repo, reference string, detectCollisions bool) error {\n\tdir := path.Join(is.rootDir, repo)\n\tif fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {\n\t\treturn zerr.ErrRepoNotFound\n\t}\n\n\tvar lockLatency time.Time\n\n\tvar err error\n\n\tis.Lock(&lockLatency)\n\tdefer func() {\n\t\tis.Unlock(&lockLatency)\n\n\t\tif err == nil {\n\t\t\tmonitoring.SetStorageUsage(is.metrics, is.rootDir, repo)\n\t\t}\n\t}()\n\n\tindex, err := common.GetIndex(is, repo, is.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanifestDesc, err := common.RemoveManifestDescByReference(&index, reference, detectCollisions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = common.UpdateIndexWithPrunedImageManifests(is, &index, repo, manifestDesc, manifestDesc.Digest, is.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// now update \"index.json\"\n\tdir = path.Join(is.rootDir, repo)\n\tfile := path.Join(dir, \"index.json\")\n\n\tbuf, err := json.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := writeFile(is.store, file, buf); err != nil {\n\t\tis.log.Debug().Str(\"deleting reference\", reference).Msg(\"\")\n\n\t\treturn err\n\t}\n\n\t// Delete blob only when blob digest not present in manifest entry.\n\t// e.g. 1.0.1 & 1.0.2 have same blob digest so if we delete 1.0.1, blob should not be removed.\n\ttoDelete := true\n\n\tfor _, manifest := range index.Manifests {\n\t\tif manifestDesc.Digest.String() == manifest.Digest.String() {\n\t\t\ttoDelete = false\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif toDelete {\n\t\tp := path.Join(dir, \"blobs\", manifestDesc.Digest.Algorithm().String(), manifestDesc.Digest.Encoded())\n\n\t\terr = is.store.Delete(context.Background(), p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s HTTPServer) UpdateImages(w http.ResponseWriter, r *http.Request) {\n\tvar (\n\t\tvars = mux.Vars(r)\n\t\timage = vars[\"image\"]\n\t\tkind = vars[\"kind\"]\n\t)\n\tif err := r.ParseForm(); err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing form\"))\n\t\treturn\n\t}\n\tvar serviceSpecs []update.ResourceSpec\n\tfor _, service := range r.Form[\"service\"] {\n\t\tserviceSpec, err := update.ParseResourceSpec(service)\n\t\tif err != nil {\n\t\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing service spec %q\", service))\n\t\t\treturn\n\t\t}\n\t\tserviceSpecs = append(serviceSpecs, serviceSpec)\n\t}\n\timageSpec, err := update.ParseImageSpec(image)\n\tif err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing image spec %q\", image))\n\t\treturn\n\t}\n\treleaseKind, err := update.ParseReleaseKind(kind)\n\tif err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing release kind %q\", kind))\n\t\treturn\n\t}\n\n\tvar excludes []resource.ID\n\tfor _, ex := range r.URL.Query()[\"exclude\"] {\n\t\ts, err := resource.ParseID(ex)\n\t\tif err != nil {\n\t\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing excluded service %q\", ex))\n\t\t\treturn\n\t\t}\n\t\texcludes = append(excludes, s)\n\t}\n\n\tspec := update.ReleaseImageSpec{\n\t\tServiceSpecs: serviceSpecs,\n\t\tImageSpec: imageSpec,\n\t\tKind: releaseKind,\n\t\tExcludes: excludes,\n\t}\n\tcause := update.Cause{\n\t\tUser: r.FormValue(\"user\"),\n\t\tMessage: r.FormValue(\"message\"),\n\t}\n\tresult, err := s.server.UpdateManifests(r.Context(), update.Spec{Type: update.Images, Cause: cause, Spec: spec})\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, result)\n}", "func (p *Processor) ReplicateManifest(ctx context.Context, account keppel.Account, repo keppel.Repository, reference models.ManifestReference, actx keppel.AuditContext) (*keppel.Manifest, []byte, error) {\n\tmanifestBytes, manifestMediaType, err := p.downloadManifestViaInboundCache(ctx, account, repo, reference)\n\tif err != nil {\n\t\tif errorIsManifestNotFound(err) {\n\t\t\treturn nil, nil, UpstreamManifestMissingError{reference, err}\n\t\t}\n\t\treturn nil, nil, err\n\t}\n\n\t//parse the manifest to discover references to other manifests and blobs\n\tmanifestParsed, _, err := keppel.ParseManifest(manifestMediaType, manifestBytes)\n\tif err != nil {\n\t\treturn nil, nil, keppel.ErrManifestInvalid.With(err.Error())\n\t}\n\n\t//replicate referenced manifests recursively if required\n\tfor _, desc := range manifestParsed.ManifestReferences(account.PlatformFilter) {\n\t\t_, err := keppel.FindManifest(p.db, repo, desc.Digest)\n\t\tif errors.Is(err, sql.ErrNoRows) {\n\t\t\t_, _, err = p.ReplicateManifest(ctx, account, repo, models.ManifestReference{Digest: desc.Digest}, actx)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\t//mark all missing blobs as pending replication\n\tfor _, desc := range manifestParsed.BlobReferences() {\n\t\t//mark referenced blobs as pending replication if not replicated yet\n\t\tblob, err := p.FindBlobOrInsertUnbackedBlob(desc, account)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\t//also ensure that the blob is mounted in this repo (this is also\n\t\t//important if the blob exists; it may only have been replicated in a\n\t\t//different repo)\n\t\terr = keppel.MountBlobIntoRepo(p.db, *blob, repo)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\t//if the manifest is an image, we need to replicate the image configuration\n\t//blob immediately because ValidateAndStoreManifest() uses it for validation\n\t//purposes\n\tconfigBlobDesc := manifestParsed.FindImageConfigBlob()\n\tif configBlobDesc != nil {\n\t\tconfigBlob, err := keppel.FindBlobByAccountName(p.db, configBlobDesc.Digest, account)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif configBlob.StorageID == \"\" {\n\t\t\t_, err = p.ReplicateBlob(ctx, *configBlob, account, repo, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tmanifest, err := p.ValidateAndStoreManifest(account, repo, IncomingManifest{\n\t\tReference: reference,\n\t\tMediaType: manifestMediaType,\n\t\tContents: manifestBytes,\n\t\tPushedAt: p.timeNow(),\n\t}, actx)\n\treturn manifest, manifestBytes, err\n}", "func toImageManifest(m *schema.ImageManifest) *aciManifest {\n\treturn &aciManifest{\n\t\tACKind: aciKind(m.ACKind),\n\t\tACVersion: m.ACVersion,\n\t\tName: aciName(m.Name),\n\t\tLabels: aciLabels(m.Labels),\n\t\tApp: (*aciApp)(m.App),\n\t\tAnnotations: aciAnnotations(m.Annotations),\n\t\tDependencies: aciDependencies(m.Dependencies),\n\t\tPathWhitelist: m.PathWhitelist,\n\t}\n}", "func (p *postProcessor) Manifest() (map[string]ManifestEntry, error) {\n\tlog.Println(\"updating gapic manifest\")\n\tentries := map[string]ManifestEntry{} // Key is the package name.\n\tf, err := os.Create(filepath.Join(p.googleCloudDir, \"internal\", \".repo-metadata-full.json\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tfor _, manual := range p.config.ManualClientInfo {\n\t\tentries[manual.DistributionName] = *manual\n\t}\n\tfor inputDir, conf := range p.config.GoogleapisToImportPath {\n\t\tif conf.ServiceConfig == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tyamlPath := filepath.Join(p.googleapisDir, inputDir, conf.ServiceConfig)\n\t\tyamlFile, err := os.Open(yamlPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tyamlConfig := struct {\n\t\t\tTitle string `yaml:\"title\"` // We only need the title field.\n\t\t}{}\n\t\tif err := yaml.NewDecoder(yamlFile).Decode(&yamlConfig); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"decode: %v\", err)\n\t\t}\n\t\tdocURL, err := docURL(p.googleCloudDir, conf.ImportPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to build docs URL: %v\", err)\n\t\t}\n\t\treleaseLevel, err := releaseLevel(p.googleCloudDir, conf.ImportPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to calculate release level for %v: %v\", inputDir, err)\n\t\t}\n\n\t\tentry := ManifestEntry{\n\t\t\tDistributionName: conf.ImportPath,\n\t\t\tDescription: yamlConfig.Title,\n\t\t\tLanguage: \"Go\",\n\t\t\tClientLibraryType: \"generated\",\n\t\t\tDocsURL: docURL,\n\t\t\tReleaseLevel: releaseLevel,\n\t\t\tLibraryType: gapicAutoLibraryType,\n\t\t}\n\t\tentries[conf.ImportPath] = entry\n\t}\n\t// Remove base module entry\n\tdelete(entries, \"\")\n\tenc := json.NewEncoder(f)\n\tenc.SetIndent(\"\", \" \")\n\treturn entries, enc.Encode(entries)\n}", "func (d *Daemon) UpdateManifests(ctx context.Context, spec update.Spec) (job.ID, error) {\n\tvar id job.ID\n\tif spec.Type == \"\" {\n\t\treturn id, errors.New(\"no type in update spec\")\n\t}\n\tswitch s := spec.Spec.(type) {\n\tcase release.Changes:\n\t\tif s.ReleaseKind() == update.ReleaseKindPlan {\n\t\t\tid := job.ID(guid.New())\n\t\t\t_, err := d.executeJob(id, d.makeJobFromUpdate(d.release(spec, s)), d.Logger)\n\t\t\treturn id, err\n\t\t}\n\t\treturn d.queueJob(d.makeLoggingJobFunc(d.makeJobFromUpdate(d.release(spec, s)))), nil\n\tcase resource.PolicyUpdates:\n\t\treturn d.queueJob(d.makeLoggingJobFunc(d.makeJobFromUpdate(d.updatePolicies(spec, s)))), nil\n\tcase update.ManualSync:\n\t\treturn d.queueJob(d.sync()), nil\n\tdefault:\n\t\treturn id, fmt.Errorf(`unknown update type \"%s\"`, spec.Type)\n\t}\n}", "func (b *Backend) ManifestCreate(ctx context.Context, req *pb.ManifestCreateRequest) (*pb.ManifestCreateResponse, error) {\n\tif !b.daemon.opts.Experimental {\n\t\treturn &pb.ManifestCreateResponse{}, errors.New(\"please enable experimental to use manifest feature\")\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"ManifestList\": req.GetManifestList(),\n\t\t\"Manifest\": req.GetManifests(),\n\t}).Info(\"ManifestCreateRequest received\")\n\n\tmanifestName := req.GetManifestList()\n\tmanifests := req.GetManifests()\n\n\tlist := &manifestList{\n\t\tdocker: manifest.Schema2List{\n\t\t\tSchemaVersion: container.SchemaVersion,\n\t\t\tMediaType: manifest.DockerV2ListMediaType,\n\t\t},\n\t\tinstances: make(map[digest.Digest]string, 0),\n\t}\n\n\tfor _, imageSpec := range manifests {\n\t\t// add image to list\n\t\tif _, err := list.addImage(ctx, b.daemon.localStore, imageSpec); err != nil {\n\t\t\treturn &pb.ManifestCreateResponse{}, err\n\t\t}\n\t}\n\n\t// expand list name\n\t_, imageName, err := dockerfile.CheckAndExpandTag(manifestName)\n\tif err != nil {\n\t\treturn &pb.ManifestCreateResponse{}, err\n\t}\n\t// save list to image\n\timageID, err := list.saveListToImage(b.daemon.localStore, \"\", imageName, list.docker.MediaType)\n\n\treturn &pb.ManifestCreateResponse{\n\t\tImageID: imageID,\n\t}, err\n}", "func (f *File) AddManifest(mainClass string) error {\n\tmanifest := fmt.Sprintf(\"Manifest-Version: 1.0\\nMain-Class: %s\\n\", mainClass)\n\treturn f.WriteFile(\"META-INF/MANIFEST.MF\", []byte(manifest), 0644)\n}", "func calculateManifest(imageToTags map[v1.Image][]string) (m Manifest, err error) {\n\tif len(imageToTags) == 0 {\n\t\treturn nil, errors.New(\"set of images is empty\")\n\t}\n\n\tfor img, tags := range imageToTags {\n\t\tcfgName, err := img.ConfigName()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Store foreign layer info.\n\t\tlayerSources := make(map[v1.Hash]v1.Descriptor)\n\n\t\t// Write the layers.\n\t\tlayers, err := img.Layers()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlayerFiles := make([]string, len(layers))\n\t\tfor i, l := range layers {\n\t\t\td, err := l.Digest()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Munge the file name to appease ancient technology.\n\t\t\t//\n\t\t\t// tar assumes anything with a colon is a remote tape drive:\n\t\t\t// https://www.gnu.org/software/tar/manual/html_section/tar_45.html\n\t\t\t// Drop the algorithm prefix, e.g. \"sha256:\"\n\t\t\thex := d.Hex\n\n\t\t\t// gunzip expects certain file extensions:\n\t\t\t// https://www.gnu.org/software/gzip/manual/html_node/Overview.html\n\t\t\tlayerFiles[i] = fmt.Sprintf(\"%s.tar.gz\", hex)\n\n\t\t\t// Add to LayerSources if it's a foreign layer.\n\t\t\tdesc, err := partial.BlobDescriptor(img, d)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif !desc.MediaType.IsDistributable() {\n\t\t\t\tdiffid, err := partial.BlobToDiffID(img, d)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tlayerSources[diffid] = *desc\n\t\t\t}\n\t\t}\n\n\t\t// Generate the tar descriptor and write it.\n\t\tm = append(m, Descriptor{\n\t\t\tConfig: cfgName.String(),\n\t\t\tRepoTags: tags,\n\t\t\tLayers: layerFiles,\n\t\t\tLayerSources: layerSources,\n\t\t})\n\t}\n\t// sort by name of the repotags so it is consistent. Alternatively, we could sort by hash of the\n\t// descriptor, but that would make it hard for humans to process\n\tsort.Slice(m, func(i, j int) bool {\n\t\treturn strings.Join(m[i].RepoTags, \",\") < strings.Join(m[j].RepoTags, \",\")\n\t})\n\n\treturn m, nil\n}", "func (p *Pvr) UpdateApplication(app AppData) error {\n\tappManifest, err := p.GetApplicationManifest(app.Appname)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !app.DoOverlay && (appManifest.Base == \"\" || appManifest.Base == app.Appname) {\n\t\tapp.SquashFile = SQUASH_FILE\n\t\tapp.DoOverlay = false\n\t\tfmt.Println(\"Update base: \" + app.SquashFile)\n\t\tappManifest.DockerOvlDigest = \"\"\n\t} else {\n\t\tapp.DoOverlay = true\n\t\tapp.SquashFile = SQUASH_OVL_FILE\n\t\tfmt.Println(\"Update ovl: \" + app.SquashFile)\n\t}\n\tswitch app.SourceType {\n\tcase models.SourceTypeDocker:\n\t\terr = UpdateDockerApp(p, &app, appManifest)\n\tcase models.SourceTypeRootFs:\n\t\terr = UpdateRootFSApp(p, &app, appManifest)\n\tcase models.SourceTypePvr:\n\t\terr = UpdatePvApp(p, &app, appManifest)\n\tdefault:\n\t\terr = fmt.Errorf(\"type %s not supported yet\", models.SourceTypePvr)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = p.InstallApplication(&app)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tapps, err := p.GetApplications()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"Searching for dependencies of %s\\n\", app.Appname)\n\tfor _, a := range apps {\n\t\tif appManifest.Base == app.Appname {\n\t\t\tfmt.Printf(\"Updating dependency %s\\n\", a.Appname)\n\t\t\tif err := UpdateDockerApp(p, &a, appManifest); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Printf(\"%s is up to date\\n\", a.Appname)\n\t\t}\n\t}\n\n\treturn err\n}", "func (a *ACBuild) ReplaceManifest(manifestPath string) (err error) {\n\tif err = a.lock(); err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err1 := a.unlock(); err == nil {\n\t\t\terr = err1\n\t\t}\n\t}()\n\n\tfinfo, err := os.Stat(manifestPath)\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\treturn fmt.Errorf(\"no such file or directory: %s\", manifestPath)\n\tcase err != nil:\n\t\treturn err\n\tcase finfo.IsDir():\n\t\treturn fmt.Errorf(\"%s is a directory\", manifestPath)\n\tdefault:\n\t\tbreak\n\t}\n\n\tmanblob, err := ioutil.ReadFile(manifestPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Marshal and Unmarshal the manifest to assert that it's valid and to\n\t// strip any whitespace\n\n\tvar man schema.ImageManifest\n\terr = man.UnmarshalJSON(manblob)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanblob, err = man.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(path.Join(a.CurrentACIPath, aci.ManifestFile), manblob, 0755)\n}", "func (s *Stargate) InitializeManifestUpdateCycle() {\n\tgo func() {\n\t\tfor {\n\t\t\tif err := s.UpdateManifest(); err != nil {\n\t\t\t\tfmt.Printf(\"Error updating manifest: %v\\n\", err)\n\t\t\t}\n\t\t\ttime.Sleep(time.Duration(s.UpdatePeriod) * time.Second)\n\t\t}\n\t}()\n}", "func (*CNETMsg_SpawnGroup_ManifestUpdate) Descriptor() ([]byte, []int) {\n\treturn file_artifact_networkbasetypes_proto_rawDescGZIP(), []int{17}\n}", "func (ir *ImageEngine) ManifestRemoveDigest(ctx context.Context, name string, image string) (string, error) {\n\tupdatedListID, err := manifests.Remove(ir.ClientCtx, name, image, nil)\n\tif err != nil {\n\t\treturn updatedListID, fmt.Errorf(\"removing from manifest %s: %w\", name, err)\n\t}\n\treturn fmt.Sprintf(\"%s :%s\\n\", updatedListID, image), nil\n}", "func (client *Client) AddImage(manifest map[string][]byte, layers map[string][]byte) (string, error) {\n\tmf := make(map[string]files.Node)\n\tfor k, v := range manifest {\n\t\tmf[k] = files.NewBytesFile(v)\n\t}\n\n\tbf := make(map[string]files.Node)\n\tfor k, v := range layers {\n\t\tbf[k] = files.NewBytesFile(v)\n\t}\n\n\tsf := files.NewMapDirectory(map[string]files.Node{\n\t\t\"blobs\": files.NewMapDirectory(bf),\n\t\t\"manifests\": files.NewMapDirectory(mf),\n\t})\n\tslf := files.NewSliceDirectory([]files.DirEntry{files.FileEntry(\"image\", sf)})\n\n\treader := files.NewMultiFileReader(slf, true)\n\tresp, err := client.client.Request(\"add\").\n\t\tOption(\"recursive\", true).\n\t\tOption(\"cid-version\", 1).\n\t\tBody(reader).\n\t\tSend(context.Background())\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\n\tdefer resp.Close()\n\n\tif resp.Error != nil {\n\t\treturn \"\", resp.Error\n\t}\n\n\tdec := json.NewDecoder(resp.Output)\n\tvar final string\n\tfor {\n\t\tvar out object\n\t\terr = dec.Decode(&out)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn \"\", err\n\t\t}\n\t\tfinal = out.Hash\n\t}\n\n\tif final == \"\" {\n\t\treturn \"\", errors.New(\"no results received\")\n\t}\n\n\treturn final, nil\n}", "func Update(fileMeta FileMeta){\n\tfileMetas[fileMeta.FileSha1] = fileMeta\n}", "func (c *Controller) syncHandler(key string) error {\n\t// If an error occurs during handling, we'll requeue the item so we can\n\t// attempt processing again later. This could have been caused by a\n\t// temporary network failure, or any other transient reason.\n\n\t// Convert the namespace/name string into a distinct namespace and name\n\tns, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\tutilruntime.HandleError(fmt.Errorf(\"invalid resource key: %s\", key))\n\t\treturn nil\n\t}\n\n\tklog.V(4).Infof(\"start processing Manifest %q\", key)\n\t// Get the Manifest resource with this name\n\tmanifest, err := c.manifestLister.Manifests(ns).Get(name)\n\t// The Manifest resource may no longer exist, in which case we stop processing.\n\tif errors.IsNotFound(err) {\n\t\tklog.V(2).Infof(\"Manifest %q has been deleted\", key)\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif manifest.Template.Raw == nil {\n\t\tklog.Warning(\"manifest.Template.Raw is empty, %q\", klog.KObj(manifest))\n\t\treturn nil\n\t}\n\tutd := &unstructured.Unstructured{}\n\terr = json.Unmarshal(manifest.Template.Raw, &utd.Object)\n\tif err != nil {\n\t\tklog.Errorf(\"unmarshal error, %q, err=%v\", klog.KObj(manifest), err)\n\t\treturn err\n\t}\n\n\tresourceKind := utd.GroupVersionKind().Kind\n\n\tmatchAnnotations := util.FindAnnotationsMathKeyPrefix(utd.GetAnnotations())\n\t//为空则表示在update时候进行过更新,清除过annotation\n\tdeleteSubscription := len(matchAnnotations) == 0\n\tif manifest.DeletionTimestamp != nil {\n\t\t//删除\n\t\tdeleteSubscription = true\n\t}\n\tmatchLabels := map[string]string{\n\t\t\"bkbcs.tencent.com/resource-kind\": resourceKind,\n\t\t\"bkbcs.tencent.com/resource-ns\": utd.GetNamespace(),\n\t\t\"bkbcs.tencent.com/resource-name\": utd.GetName(),\n\t}\n\tsubscriptionName := c.genAutoCreateSubscriptionName(utd.GetName())\n\n\tsubscriptionList, err := c.clusternetClient.AppsV1alpha1().Subscriptions(utd.GetNamespace()).List(context.Background(), metav1.ListOptions{\n\t\tLabelSelector: labels.Set(matchLabels).String(),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\t//只会存在0个或1个\n\tif len(subscriptionList.Items) > 1 {\n\t\treturn fmt.Errorf(\"auto create sub matchLabels match %d\", len(subscriptionList.Items))\n\t}\n\tif deleteSubscription {\n\t\tklog.Infof(\"start delete subscription %s\", subscriptionName)\n\t\t//删除Subscription\n\t\terr = c.clusternetClient.AppsV1alpha1().Subscriptions(utd.GetNamespace()).Delete(context.Background(), subscriptionList.Items[0].Name, metav1.DeleteOptions{})\n\t\tif errors.IsNotFound(err) {\n\t\t\tklog.V(2).Infof(\"Subscription %s:%s has been deleted\", ns, name)\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\t//更新或创建Subscription\n\tif len(subscriptionList.Items) == 0 {\n\t\t//create\n\t\tsubscription := &appsapi.Subscription{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: subscriptionName,\n\t\t\t\tNamespace: utd.GetNamespace(),\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"bkbcs.tencent.com/created-by\": \"bcs-clusternet-controller\",\n\t\t\t\t},\n\t\t\t\tLabels: matchLabels,\n\t\t\t},\n\t\t\tSpec: c.genSubscriptionSpec(matchAnnotations, utd.GroupVersionKind(), utd.GetNamespace(), utd.GetName()),\n\t\t}\n\t\tklog.Infof(\"start create Subscriptions %q\", klog.KObj(subscription))\n\t\t_, err = c.clusternetClient.AppsV1alpha1().Subscriptions(utd.GetNamespace()).Create(context.Background(), subscription, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"create Subscriptions %q error, err=%+v\", klog.KObj(subscription), err)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\t//update\n\tmatchSubscription := subscriptionList.Items[0]\n\tmatchSubscription.Spec = c.genSubscriptionSpec(matchAnnotations, utd.GroupVersionKind(), utd.GetNamespace(), utd.GetName())\n\tklog.Infof(\"start update Subscriptions %q\", klog.KObj(&matchSubscription))\n\t_, err = c.clusternetClient.AppsV1alpha1().Subscriptions(utd.GetNamespace()).Update(context.Background(), &matchSubscription, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tklog.Errorf(\"update subscriptions %q error, err=%v\", klog.KObj(&matchSubscription), err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (i *Images) GetManifestImages(\n\tregistry, version, buildPath string,\n\tforTarballFn func(path, origTag, newTagWithArch string) error,\n) (map[string][]string, error) {\n\tmanifestImages := make(map[string][]string)\n\n\treleaseImagesPath := filepath.Join(buildPath, ImagesPath)\n\tlogrus.Infof(\"Getting manifest images in %s\", releaseImagesPath)\n\n\tarchPaths, err := os.ReadDir(releaseImagesPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"read images path %s: %w\", releaseImagesPath, err)\n\t}\n\n\tfor _, archPath := range archPaths {\n\t\tarch := archPath.Name()\n\t\tif !archPath.IsDir() {\n\t\t\tlogrus.Infof(\"Skipping %s because it's not a directory\", arch)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := filepath.Walk(\n\t\t\tfilepath.Join(releaseImagesPath, arch),\n\t\t\tfunc(path string, info os.FileInfo, err error) error {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif info.IsDir() {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tfileName := info.Name()\n\t\t\t\tif !strings.HasSuffix(fileName, \".tar\") {\n\t\t\t\t\tlogrus.Infof(\"Skipping non-tarball %s\", fileName)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\torigTag, err := i.RepoTagFromTarball(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"getting repo tags for tarball: %w\", err)\n\t\t\t\t}\n\n\t\t\t\ttagMatches := tagRegex.FindStringSubmatch(origTag)\n\t\t\t\tif len(tagMatches) != 2 {\n\t\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\t\"malformed tag %s in %s\", origTag, path,\n\t\t\t\t\t)\n\t\t\t\t}\n\n\t\t\t\tbinary := tagMatches[1]\n\t\t\t\tnewTag := filepath.Join(\n\t\t\t\t\tregistry,\n\t\t\t\t\tstrings.TrimSuffix(binary, \"-\"+arch),\n\t\t\t\t)\n\t\t\t\tnewTagWithArch := fmt.Sprintf(\"%s-%s:%s\", newTag, arch, version)\n\t\t\t\tmanifestImages[newTag] = append(manifestImages[newTag], arch)\n\n\t\t\t\tif forTarballFn != nil {\n\t\t\t\t\tif err := forTarballFn(\n\t\t\t\t\t\tpath, origTag, newTagWithArch,\n\t\t\t\t\t); err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"executing tarball callback: %w\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"traversing path: %w\", err)\n\t\t}\n\t}\n\treturn manifestImages, nil\n}", "func (l logger) LogManifest(reference models.ManifestReference, level int, err error, isCached bool) {\n\tindent := strings.Repeat(\" \", level)\n\tsuffix := \"\"\n\tif isCached {\n\t\tsuffix = \" (cached result)\"\n\t}\n\tif err == nil {\n\t\tlogg.Info(\"%smanifest %s looks good%s\", indent, reference, suffix)\n\t} else {\n\t\tlogg.Error(\"%smanifest %s validation failed: %s%s\", indent, reference, err.Error(), suffix)\n\t}\n}", "func (a *App) UpdateMeta(w http.ResponseWriter, r *http.Request) {\n\tdecoder := json.NewDecoder(r.Body)\n\tvar metadata map[string]string\n\terr := decoder.Decode(&metadata)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\tdoc := &types.MetaDoc{\n\t\tID: a.eventID,\n\t\tCorrelationID: a.correlationID,\n\t\tParentEventID: a.parentEventID,\n\t\tMetadata: metadata,\n\t}\n\terr = a.Meta.AddOrUpdateMetaDoc(doc)\n\tif err != nil {\n\t\trespondWithError(err, http.StatusNotFound, w)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}", "func (w *Worker) updateDeploymentImage(dep types.Deployment, artifactID int64) {\n\tartifact, err := w.ciClient.GetBuildArtifactByID(artifactID)\n\tif err != nil {\n\t\tw.log.Error(\"Failed to get build artifact\", err)\n\t\treturn\n\t}\n\tw.log.Info(\"Got artifact with url \" + artifact.Name)\n\tlockRes := strconv.FormatInt(dep.ID, 10)\n\terr = w.distLock.Lock(lockRes)\n\tif err != nil {\n\t\tw.log.Error(\"Failed to acquire deployment lock\", err)\n\t\treturn\n\t}\n\tchangedOk, stdout := w.kubectl.ChangeImage(dep.K8SName, artifact.Name)\n\terr = w.distLock.Unlock(lockRes)\n\tif err != nil {\n\t\tw.log.Error(\"Failed to release deployment lock\", err)\n\t}\n\tdep.ArtifactID = artifact.ID\n\terr = w.recordRevision(dep, stdout)\n\tif err != nil {\n\t\tw.log.Error(\"Failed to record revision\", err)\n\t}\n\tif changedOk == true {\n\t\terr = w.databaseClient.SaveDeployment(&dep)\n\t\tif err != nil {\n\t\t\tw.log.Error(\"Failed to save deployment to db\", err)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (d *swiftDriver) WriteManifest(account keppel.Account, repoName string, manifestDigest digest.Digest, contents []byte) error {\n\tc, _, err := d.getBackendConnection(account)\n\tif err != nil {\n\t\treturn err\n\t}\n\to := manifestObject(c, repoName, manifestDigest)\n\treturn uploadToObject(o, bytes.NewReader(contents), nil, nil)\n}", "func cleanupManifest(origData, finalData []byte) ([]byte, error) {\n\tobjectMetacreationTs := []byte(\"\\n creationTimestamp: null\\n\")\n\tspecTemplatecreationTs := []byte(\"\\n creationTimestamp: null\\n\")\n\tjobSpecTemplatecreationTs := []byte(\"\\n creationTimestamp: null\\n\")\n\tnullStatus := []byte(\"\\nstatus: {}\\n\")\n\tnullReplicaStatus := []byte(\"status:\\n replicas: 0\\n\")\n\tnullLBStatus := []byte(\"status:\\n loadBalancer: {}\\n\")\n\tnullMetaStatus := []byte(\"\\n status: {}\\n\")\n\n\tvar hasObjectMetacreationTs, hasSpecTemplatecreationTs, hasJobSpecTemplatecreationTs, hasNullStatus,\n\t\thasNullReplicaStatus, hasNullLBStatus, hasNullMetaStatus bool\n\n\tif origData != nil {\n\t\thasObjectMetacreationTs = bytes.Contains(origData, objectMetacreationTs)\n\t\thasSpecTemplatecreationTs = bytes.Contains(origData, specTemplatecreationTs)\n\t\thasJobSpecTemplatecreationTs = bytes.Contains(origData, jobSpecTemplatecreationTs)\n\n\t\thasNullStatus = bytes.Contains(origData, nullStatus)\n\t\thasNullReplicaStatus = bytes.Contains(origData, nullReplicaStatus)\n\t\thasNullLBStatus = bytes.Contains(origData, nullLBStatus)\n\t\thasNullMetaStatus = bytes.Contains(origData, nullMetaStatus)\n\t} // null value is false in case of origFile\n\n\tif !hasObjectMetacreationTs {\n\t\tfinalData = bytes.Replace(finalData, objectMetacreationTs, []byte(\"\\n\"), -1)\n\t}\n\tif !hasSpecTemplatecreationTs {\n\t\tfinalData = bytes.Replace(finalData, specTemplatecreationTs, []byte(\"\\n\"), -1)\n\t}\n\tif !hasJobSpecTemplatecreationTs {\n\t\tfinalData = bytes.Replace(finalData, jobSpecTemplatecreationTs, []byte(\"\\n\"), -1)\n\t}\n\tif !hasNullStatus {\n\t\tfinalData = bytes.Replace(finalData, nullStatus, []byte(\"\\n\"), -1)\n\t}\n\tif !hasNullReplicaStatus {\n\t\tfinalData = bytes.Replace(finalData, nullReplicaStatus, []byte(\"\\n\"), -1)\n\t}\n\tif !hasNullLBStatus {\n\t\tfinalData = bytes.Replace(finalData, nullLBStatus, []byte(\"\\n\"), -1)\n\t}\n\tif !hasNullMetaStatus {\n\t\tfinalData = bytes.Replace(finalData, nullMetaStatus, []byte(\"\\n\"), -1)\n\t}\n\n\treturn finalData, nil\n}", "func (p *postProcessor) Manifest() (map[string]ManifestEntry, error) {\n\tlog.Println(\"updating gapic manifest\")\n\tentries := map[string]ManifestEntry{} // Key is the package name.\n\tf, err := os.Create(filepath.Join(p.googleCloudDir, \"internal\", \".repo-metadata-full.json\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tfor _, manual := range p.config.ManualClientInfo {\n\t\tentries[manual.DistributionName] = *manual\n\t}\n\tfor inputDir, conf := range p.config.GoogleapisToImportPath {\n\t\tif conf.ServiceConfig == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tyamlPath := filepath.Join(p.googleapisDir, inputDir, conf.ServiceConfig)\n\t\tyamlFile, err := os.Open(yamlPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tyamlConfig := struct {\n\t\t\tTitle string `yaml:\"title\"` // We only need the title and name.\n\t\t\tNameFull string `yaml:\"name\"` // We only need the title and name.\n\t\t}{}\n\t\tif err := yaml.NewDecoder(yamlFile).Decode(&yamlConfig); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"decode: %v\", err)\n\t\t}\n\t\tdocURL, err := docURL(p.googleCloudDir, conf.ImportPath, conf.RelPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to build docs URL: %v\", err)\n\t\t}\n\t\treleaseLevel, err := releaseLevel(p.googleCloudDir, conf.ImportPath, conf.RelPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to calculate release level for %v: %v\", inputDir, err)\n\t\t}\n\n\t\tapiShortname, err := apiShortname(yamlConfig.NameFull)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to determine api_shortname from %v: %v\", yamlConfig.NameFull, err)\n\t\t}\n\n\t\tentry := ManifestEntry{\n\t\t\tAPIShortname: apiShortname,\n\t\t\tDistributionName: conf.ImportPath,\n\t\t\tDescription: yamlConfig.Title,\n\t\t\tLanguage: \"go\",\n\t\t\tClientLibraryType: \"generated\",\n\t\t\tClientDocumentation: docURL,\n\t\t\tReleaseLevel: releaseLevel,\n\t\t\tLibraryType: gapicAutoLibraryType,\n\t\t}\n\t\tentries[conf.ImportPath] = entry\n\t}\n\t// Remove base module entry\n\tdelete(entries, \"\")\n\tenc := json.NewEncoder(f)\n\tenc.SetIndent(\"\", \" \")\n\treturn entries, enc.Encode(entries)\n}", "func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error {\n\t// 1. Check all files in manifest exist.\n\tfor id := range mf.Tables {\n\t\tif _, ok := idMap[id]; !ok {\n\t\t\treturn fmt.Errorf(\"file does not exist for table %d\", id)\n\t\t}\n\t}\n\n\t// 2. Delete files that shouldn't exist.\n\tfor id := range idMap {\n\t\tif _, ok := mf.Tables[id]; !ok {\n\t\t\tkv.elog.Printf(\"Table file %d not referenced in MANIFEST\\n\", id)\n\t\t\tfilename := table.NewFilename(id, kv.opt.Dir)\n\t\t\tif err := os.Remove(filename); err != nil {\n\t\t\t\treturn y.Wrapf(err, \"While removing table %d\", id)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func RegisterManifestHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {\n\treturn RegisterManifestHandlerClient(ctx, mux, NewManifestClient(conn))\n}", "func (i *Images) Validate(registry, version, buildPath string) error {\n\tlogrus.Infof(\"Validating image manifests in %s\", registry)\n\tversion = i.normalizeVersion(version)\n\n\tmanifestImages, err := i.GetManifestImages(\n\t\tregistry, version, buildPath,\n\t\tfunc(_, _, image string) error {\n\t\t\tlogrus.Infof(\"Verifying that image is signed: %s\", image)\n\t\t\tif err := i.VerifyImage(i.signer, image); err != nil {\n\t\t\t\treturn fmt.Errorf(\"verify signed image: %w\", err)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"get manifest images: %w\", err)\n\t}\n\tlogrus.Infof(\"Got manifest images %+v\", manifestImages)\n\n\tfor image, arches := range manifestImages {\n\t\timageVersion := fmt.Sprintf(\"%s:%s\", image, version)\n\n\t\tmanifestBytes, err := crane.Manifest(imageVersion)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"get remote manifest from %s: %w\", imageVersion, err)\n\t\t}\n\n\t\tlogrus.Info(\"Verifying that image manifest list is signed\")\n\t\tif err := i.VerifyImage(i.signer, imageVersion); err != nil {\n\t\t\treturn fmt.Errorf(\"verify signed manifest list: %w\", err)\n\t\t}\n\n\t\tmanifest := string(manifestBytes)\n\t\tmanifestFile, err := os.CreateTemp(\"\", \"manifest-\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"create temp file for manifest: %w\", err)\n\t\t}\n\t\tif _, err := manifestFile.WriteString(manifest); err != nil {\n\t\t\treturn fmt.Errorf(\"write manifest to %s: %w\", manifestFile.Name(), err)\n\t\t}\n\n\t\tfor _, arch := range arches {\n\t\t\tlogrus.Infof(\n\t\t\t\t\"Checking image digest for %s on %s architecture\", image, arch,\n\t\t\t)\n\n\t\t\tdigest, err := i.ExecuteOutput(\n\t\t\t\t\"jq\", \"--arg\", \"a\", arch, \"-r\",\n\t\t\t\t\".manifests[] | select(.platform.architecture == $a) | .digest\",\n\t\t\t\tmanifestFile.Name(),\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"get digest from manifest file %s for arch %s: %w\", manifestFile.Name(), arch, err)\n\t\t\t}\n\n\t\t\tif digest == \"\" {\n\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\"could not find the image digest for %s on %s\",\n\t\t\t\t\timageVersion, arch,\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tlogrus.Infof(\"Digest for %s on %s: %s\", imageVersion, arch, digest)\n\t\t}\n\n\t\tif err := os.RemoveAll(manifestFile.Name()); err != nil {\n\t\t\treturn fmt.Errorf(\"remove manifest file: %w\", err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemContext) ([]string, error) {\n\tvar imageDigest digest.Digest\n\tmanifestType := manifest.GuessMIMEType(manifestBytes)\n\tif manifest.MIMETypeIsMultiImage(manifestType) {\n\t\tlist, err := manifest.ListFromBlob(manifestBytes, manifestType)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"parsing manifest list: %w\", err)\n\t\t}\n\t\td, err := list.ChooseInstance(sys)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"choosing instance from manifest list: %w\", err)\n\t\t}\n\t\timageDigest = d\n\t} else {\n\t\td, err := manifest.Digest(manifestBytes)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"digesting manifest\")\n\t\t}\n\t\timageDigest = d\n\t}\n\timages, err := r.store.ImagesByDigest(imageDigest)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"listing images by manifest digest: %w\", err)\n\t}\n\n\t// If you have additionStores defined and the same image stored in\n\t// both storage and additional store, it can be output twice.\n\t// Fixes github.com/containers/podman/issues/18647\n\tresults := []string{}\n\timageMap := map[string]bool{}\n\tfor _, image := range images {\n\t\tif imageMap[image.ID] {\n\t\t\tcontinue\n\t\t}\n\t\timageMap[image.ID] = true\n\t\tresults = append(results, image.ID)\n\t}\n\tif len(results) == 0 {\n\t\treturn nil, fmt.Errorf(\"identifying new image by manifest digest: %w\", storage.ErrImageUnknown)\n\t}\n\treturn results, nil\n}", "func WriteManifest(manifestWriter io.Writer, compression *pwr.CompressionSettings, container *tlc.Container, blockHashes *BlockHashMap) error {\n\trawWire := wire.NewWriteContext(manifestWriter)\n\terr := rawWire.WriteMagic(pwr.ManifestMagic)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\terr = rawWire.WriteMessage(&pwr.ManifestHeader{\n\t\tCompression: compression,\n\t\tAlgorithm: pwr.HashAlgorithm_SHAKE128_32,\n\t})\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\twire, err := pwr.CompressWire(rawWire, compression)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\terr = wire.WriteMessage(container)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tsh := &pwr.SyncHeader{}\n\tmbh := &pwr.ManifestBlockHash{}\n\n\tfor fileIndex, f := range container.Files {\n\t\tsh.Reset()\n\t\tsh.FileIndex = int64(fileIndex)\n\t\terr = wire.WriteMessage(sh)\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\n\t\tnumBlocks := ComputeNumBlocks(f.Size)\n\n\t\tfor blockIndex := int64(0); blockIndex < numBlocks; blockIndex++ {\n\t\t\tloc := BlockLocation{FileIndex: int64(fileIndex), BlockIndex: blockIndex}\n\t\t\thash := blockHashes.Get(loc)\n\t\t\tif hash == nil {\n\t\t\t\terr = fmt.Errorf(\"missing BlockHash for block %+v\", loc)\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\n\t\t\tmbh.Reset()\n\t\t\tmbh.Hash = hash\n\n\t\t\terr = wire.WriteMessage(mbh)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\t\t}\n\t}\n\n\terr = wire.Close()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\treturn nil\n}", "func getManifest(fimg *sif.FileImage) pluginapi.Manifest {\n\treturn pluginapi.Manifest{}\n}", "func buildManifestResourceMeta(\n\tindex int,\n\tmanifest workapiv1.Manifest,\n\trestMapper meta.RESTMapper) (resourceMeta workapiv1.ManifestResourceMeta, gvr schema.GroupVersionResource, err error) {\n\terrs := []error{}\n\n\tvar object runtime.Object\n\n\t// try to get resource meta from manifest if the one got from apply result is incompleted\n\tswitch {\n\tcase manifest.Object != nil:\n\t\tobject = manifest.Object\n\tdefault:\n\t\tunstructuredObj := &unstructured.Unstructured{}\n\t\tif err = unstructuredObj.UnmarshalJSON(manifest.Raw); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t\treturn resourceMeta, gvr, utilerrors.NewAggregate(errs)\n\t\t}\n\t\tobject = unstructuredObj\n\t}\n\tresourceMeta, gvr, err = buildResourceMeta(index, object, restMapper)\n\tif err == nil {\n\t\treturn resourceMeta, gvr, nil\n\t}\n\n\treturn resourceMeta, gvr, utilerrors.NewAggregate(errs)\n}", "func (mm manifestManager) Update(lastLock addr, newContents manifestContents, stats *Stats, writeHook func()) manifestContents {\n\tif upstream, _, hit := mm.cache.Get(mm.Name()); hit {\n\t\tif lastLock != upstream.lock {\n\t\t\treturn upstream\n\t\t}\n\t}\n\tt := time.Now()\n\n\tmm.lockOutFetch()\n\tdefer mm.allowFetch()\n\n\tcontents := mm.m.Update(lastLock, newContents, stats, writeHook)\n\tmm.cache.Put(mm.Name(), contents, t)\n\treturn contents\n}", "func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) {\n\tif instanceDigest != nil {\n\t\treturn nil, \"\", ErrNoManifestLists\n\t}\n\tif len(s.cachedManifest) == 0 {\n\t\t// We stored the manifest as an item named after storage.ImageDigestBigDataKey.\n\t\tcachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\ts.cachedManifest = cachedBlob\n\t}\n\treturn s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err\n}", "func (p *Processor) DeleteManifest(account keppel.Account, repo keppel.Repository, manifestDigest digest.Digest, actx keppel.AuditContext) error {\n\tvar (\n\t\ttagResults []keppel.Tag\n\t\ttags []string\n\t)\n\n\t_, err := p.db.Select(&tagResults,\n\t\t`SELECT * FROM tags WHERE repo_id = $1 AND digest = $2`,\n\t\trepo.ID, manifestDigest)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, tagResult := range tagResults {\n\t\ttags = append(tags, tagResult.Name)\n\t}\n\n\tresult, err := p.db.Exec(\n\t\t//this also deletes tags referencing this manifest because of \"ON DELETE CASCADE\"\n\t\t`DELETE FROM manifests WHERE repo_id = $1 AND digest = $2`,\n\t\trepo.ID, manifestDigest)\n\tif err != nil {\n\t\totherDigest, err2 := p.db.SelectStr(\n\t\t\t`SELECT parent_digest FROM manifest_manifest_refs WHERE repo_id = $1 AND child_digest = $2`,\n\t\t\trepo.ID, manifestDigest)\n\t\t// more than one manifest is referenced by another manifest\n\t\tif otherDigest != \"\" && err2 == nil {\n\t\t\treturn fmt.Errorf(\"cannot delete a manifest which is referenced by the manifest %s\", otherDigest)\n\t\t}\n\t\t// if the SELECT failed return the previous error to not shadow it\n\t\treturn err\n\t}\n\trowsDeleted, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rowsDeleted == 0 {\n\t\treturn sql.ErrNoRows\n\t}\n\n\t//We delete in the storage *after* the deletion is durable in the DB to be\n\t//extra sure that we did not break any constraints (esp. manifest-manifest\n\t//refs and manifest-blob refs) that the DB enforces. Doing things in this\n\t//order might mean that, if DeleteManifest fails, we're left with a manifest\n\t//in the backing storage that is not referenced in the DB anymore, but this\n\t//is not a huge problem since the janitor can clean those up after the fact.\n\t//What's most important is that we don't lose any data in the backing storage\n\t//while it is still referenced in the DB.\n\t//\n\t//Also, the DELETE statement could fail if some concurrent process created a\n\t//manifest reference in the meantime. If that happens, and we have already\n\t//deleted the manifest in the backing storage, we've caused an inconsistency\n\t//that we cannot recover from.\n\terr = p.sd.DeleteManifest(account, repo.Name, manifestDigest)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif userInfo := actx.UserIdentity.UserInfo(); userInfo != nil {\n\t\tp.auditor.Record(audittools.EventParameters{\n\t\t\tTime: p.timeNow(),\n\t\t\tRequest: actx.Request,\n\t\t\tUser: userInfo,\n\t\t\tReasonCode: http.StatusOK,\n\t\t\tAction: cadf.DeleteAction,\n\t\t\tTarget: auditManifest{\n\t\t\t\tAccount: account,\n\t\t\t\tRepository: repo,\n\t\t\t\tDigest: manifestDigest,\n\t\t\t\tTags: tags,\n\t\t\t},\n\t\t})\n\t}\n\n\treturn nil\n}", "func GrowManifest(\n\tctx context.Context,\n\to *GrowManifestOptions,\n) error {\n\tvar err error\n\tvar riiCombined RegInvImage\n\n\t// (1) Scan the BaseDir and find the promoter manifest to modify.\n\tmanifest, err := FindManifest(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// (2) Scan the StagingRepo, and whittle the read results down with some\n\t// filters (Filter* fields in GrowManifestOptions).\n\triiUnfiltered, err := ReadStagingRepo(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// (3) Apply some filters.\n\triiFiltered, err := ApplyFilters(o, riiUnfiltered)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// (4) Inject (2)'s output into (1)'s manifest's images to create a larger\n\t// RegInvImage.\n\triiCombined = Union(manifest.ToRegInvImage(), riiFiltered)\n\n\t// (5) Write back RegInvImage as Manifest ([]Image field}) back onto disk.\n\terr = WriteImages(manifest, riiCombined)\n\n\treturn err\n}", "func increment(c *cli.Context) error {\n\tallFiles, err := findManifests(getGivenPathOrWorkingDir(c), handlers)\n\n\t// validations\n\tif err != nil {\n\t\treturn cli.NewExitError(err, 1)\n\t}\n\n\tif len(allFiles) == 0 {\n\t\treturn cli.NewExitError(\"No application has been found\", 2)\n\t}\n\n\tfor _, file := range allFiles {\n\t\tif file.HasError {\n\t\t\treturn cli.NewExitError(fmt.Sprintf(\"Invalid file content: %v\", file.Path), 3)\n\t\t}\n\t}\n\n\t// execute version update\n\tfor _, file := range allFiles {\n\n\t\tnewVersion := file.Version\n\t\tswitch incrementType := c.String(\"type\"); incrementType {\n\t\tcase \"major\":\n\t\t\tnewVersion = incrementMajor(file.Version)\n\t\tcase \"minor\":\n\t\t\tnewVersion = incrementMinor(file.Version)\n\t\tcase \"patch\":\n\t\t\tnewVersion = incrementPatch(file.Version)\n\t\tdefault:\n\t\t\treturn cli.NewExitError(fmt.Sprintf(\"Invalid type %v\", incrementType), 4)\n\t\t}\n\n\t\tsetVersion(file, newVersion)\n\t\tupdatedManifest, err := findManifests(file.Path, handlers)\n\n\t\tif err != nil {\n\t\t\treturn cli.NewExitError(err, 5)\n\t\t}\n\n\t\tfmt.Println(fmt.Sprintf(\"%v: New version: %v (%v)\", file.Version, updatedManifest[0].Version, file.Path))\n\t}\n\treturn nil\n}", "func (mh *MetadataHandler) HandlePutMetadata(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tvar payload metadata.ApplicationMetadata\n\tb, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest) // 400\n\t\treturn\n\t}\n\terr = yaml.Unmarshal(b, &payload)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest) // 400\n\t\treturn\n\t}\n\n\t// validate the payload first\n\tvalid, desc := payload.IsValid()\n\tif !valid {\n\t\tw.WriteHeader(http.StatusBadRequest) // 400\n\t\tyaml.NewEncoder(w).Encode(desc)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(r)\n\tvar (\n\t\tappID string\n\t\tok bool\n\t)\n\tif appID, ok = vars[\"appID\"]; !ok {\n\t\tw.WriteHeader(http.StatusBadRequest) // 400\n\t\treturn\n\t}\n\n\tpayload.ApplicationID = appID\n\n\terr = mh.Repository.Update(appID, &payload)\n\tif err != nil {\n\t\tif err == repository.ErrIDNotFound {\n\t\t\tw.WriteHeader(http.StatusConflict) // 409\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusInternalServerError) // 500\n\t\t}\n\t\tyaml.NewEncoder(w).Encode(err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK) // 200\n\tyaml.NewEncoder(w).Encode(payload)\n}", "func manifestDispatcher(ctx *Context, r *http.Request) http.Handler {\n\tmanifestHandler := &manifestHandler{\n\t\tContext: ctx,\n\t}\n\tref := getReference(ctx)\n\tdgst, err := digest.Parse(ref)\n\tif err != nil {\n\t\t// We just have a tag\n\t\tmanifestHandler.Tag = ref\n\t} else {\n\t\tmanifestHandler.Digest = dgst\n\t}\n\n\tmhandler := handlers.MethodHandler{\n\t\thttp.MethodGet: http.HandlerFunc(manifestHandler.GetManifest),\n\t\thttp.MethodHead: http.HandlerFunc(manifestHandler.GetManifest),\n\t}\n\n\tif !ctx.readOnly {\n\t\tmhandler[http.MethodPut] = http.HandlerFunc(manifestHandler.PutManifest)\n\t\tmhandler[http.MethodDelete] = http.HandlerFunc(manifestHandler.DeleteManifest)\n\t}\n\n\treturn mhandler\n}", "func (s *Store) GetImageManifest(key string) (*schema.ImageManifest, error) {\n\timj, err := s.GetImageManifestJSON(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar im *schema.ImageManifest\n\tif err = json.Unmarshal(imj, &im); err != nil {\n\t\treturn nil, errwrap.Wrap(errors.New(\"error unmarshalling image manifest\"), err)\n\t}\n\treturn im, nil\n}", "func deployS3Manifest(c Client, log utils.Logger, pd *DeployData, params DeployAppParams, out *DeployAppResult) error {\n\tdata, err := json.Marshal(pd.Manifest)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"can't marshal manifest for app - %s\", pd.Manifest.AppID)\n\t}\n\tbuffer := bytes.NewBuffer(data)\n\n\t// Make the manifest publicly visible.\n\turl, err := c.UploadS3(params.Bucket, pd.ManifestKey, buffer, false)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"can't upload manifest file for the app - %s\", pd.Manifest.AppID)\n\t}\n\n\tout.Manifest = *pd.Manifest\n\tout.ManifestURL = url\n\tlog.Infow(\"Uploaded manifest to S3 (public-read)\", \"bucket\", params.Bucket, \"key\", pd.ManifestKey)\n\treturn nil\n}", "func AddonsBaseManifest(client clientset.Interface) error {\n\tcurrentClusterVersion, err := kubeadm.GetCurrentClusterVersion(client)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclusterConfiguration, err := kubeadm.GetClusterConfiguration(client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not fetch cluster configuration\")\n\t}\n\n\t// re-render all addons manifest\n\taddonConfiguration := addons.AddonConfiguration{\n\t\tClusterVersion: currentClusterVersion,\n\t\tControlPlane: clusterConfiguration.ControlPlaneEndpoint,\n\t\tClusterName: clusterConfiguration.ClusterName,\n\t}\n\tfor addonName, addon := range addons.Addons {\n\t\tif !addon.IsPresentForClusterVersion(currentClusterVersion) {\n\t\t\tcontinue\n\t\t}\n\t\tif err := addon.Write(addonConfiguration); err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to refresh addon %s manifest\", string(addonName))\n\t\t}\n\t}\n\n\tfmt.Println(\"Successfully refreshed addons base manifests\")\n\treturn nil\n}", "func (esc *BgMetadataElasticSearchConnector) UpdateMetricMetadata(metric *Metric) error {\n\treturn esc.addDocumentToBuff(metric)\n}", "func ParseManifest(filename string) Manifest {\n\tlog.WithFields(log.Fields{\"From\": filename}).Info(\"Loading manifest\")\n\n\tfile, err := os.Open(filename)\n\tdefer file.Close()\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tvar data map[string]interface{}\n\n\tjsonParser := json.NewDecoder(file)\n\terr = jsonParser.Decode(&data)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tmWants := map[string]string{}\n\tmRepositories := map[string]ManifestRepositoryInfo{}\n\n\tif wants, ok := data[\"wants\"]; ok {\n\t\twants := wants.(map[string]interface{})\n\t\tfor w, v := range wants {\n\t\t\tmWants[w] = v.(string)\n\t\t}\n\t}\n\n\tif repositories, ok := data[\"repositories\"]; ok {\n\t\trepositories := repositories.(map[string]interface{})\n\t\tfor reponame, repo := range repositories {\n\n\t\t\trepo := repo.([]interface{})\n\t\t\trepopath := repo[0].(map[string]interface{})\n\t\t\trepovals := repo[1].(map[string]interface{})\n\n\t\t\tvar r = ManifestRepositoryInfo{\n\t\t\t\tValues: repovals,\n\t\t\t}\n\n\t\t\tif _, ok := repopath[\"file\"]; ok {\n\t\t\t\tr.SrcType = \"file\"\n\t\t\t\tr.SrcValue = repopath[\"file\"].(string)\n\t\t\t}\n\n\t\t\tfor k, v := range repovals {\n\t\t\t\tr.Values[k] = v.(string)\n\t\t\t}\n\n\t\t\tmRepositories[reponame] = r\n\t\t}\n\t}\n\n\tmanifest := Manifest{\n\t\tWants: mWants,\n\t\tRepositories: mRepositories,\n\n\t\tPackages: map[string]map[string]map[string]interface{}{},\n\t\tOutPackages: map[string]map[string]interface{}{},\n\t}\n\n\tfor rn, rv := range manifest.Repositories {\n\t\tlog.WithFields(log.Fields{\"Name\": rn}).Info(\"Loading repository\")\n\t\tr := RepositoryFromInfo(rv)\n\t\tfor pn, pf := range r.PackageFiles {\n\t\t\tfor pv, p := range pf.Packages {\n\t\t\t\tif l, ok := manifest.Packages[pn]; ok {\n\t\t\t\t\tl[pv] = p\n\t\t\t\t\tmanifest.Packages[pn] = l\n\t\t\t\t} else {\n\t\t\t\t\tmanifest.Packages[pn] = map[string]map[string]interface{}{pv: p}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor pn, pc := range manifest.Wants {\n\t\tmanifest.Filter(pn, pc)\n\t}\n\n\tfor pn := range manifest.Wants {\n\t\tmanifest.Resolve(pn, []string{})\n\t}\n\n\tfor pn := range manifest.Wants {\n\t\tmanifest.Merge(pn, []string{})\n\t}\n\n\tmanifest.Dump()\n\n\treturn manifest\n}", "func (mdm *MetaDagModifier) AddMetadata(root ipld.Node, metadata []byte) (ipld.Node, error) {\n\t// Read the existing metadata map from the first element of the metadata list.\n\tb, encodedTree, err := util.ReadMetadataListFromDag(mdm.ctx, root, mdm.dagserv, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttreeSize := len(encodedTree)\n\n\t// Determine the specific scenario.\n\t// Scenario #1:\n\tvar newMetaNode ipld.Node\n\tvar children *ft.DagMetaNodes\n\tif b == nil || util.IsMetadataEmpty(b) {\n\t\t// Create a metadata sub-DAG\n\t\tnewMetaNode, err = mdm.buildNewMetaDataDag(metadata, encodedTree)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tchildren, err = ft.GetChildrenForDagWithMeta(mdm.ctx, root, mdm.dagserv)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Create existing map and input metadata map and check.\n\t\tm := make(map[string]interface{})\n\t\terr = json.Unmarshal(b, &m)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tinputM := make(map[string]interface{})\n\t\terr = json.Unmarshal(metadata, &inputM)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\texists := util.Intersects(m, inputM)\n\t\t// Scenario #2: Update case.\n\t\tif exists {\n\t\t\tif !mdm.Overwrite {\n\t\t\t\treturn nil, errors.New(\"Found existing key value pairs. Use --overwrite to force to update.\")\n\t\t\t}\n\t\t\t// Truncate(0) on the metadata sub-DAG.\n\t\t\terr := mdm.Truncate(0)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// iterate the inputM to put its (k, v) pairs to existing map.\n\t\t\tfor k, v := range inputM {\n\t\t\t\tm[k] = v\n\t\t\t}\n\t\t\tb, err = json.Marshal(m)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Create a metadata sub-DAG\n\t\t\tnewMetaNode, err = mdm.buildNewMetaDataDag(b, encodedTree)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else\n\t\t// Scenario #3: Append case.\n\t\t{\n\t\t\t// Append the given metadata items to the metadata sub-DAG.\n\t\t\tmdm.curNode = children.MetaNode\n\t\t\tfileSize, err := FileSize(mdm.curNode)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// Combine two JSON format byte arrays. E.g.,\n\t\t\t// `{\"price\":12.22} + `{\"number\":1234}` -> `{\"price\":12.22,\"number\":1234}`\n\t\t\tmetadata[0] = ','\n\t\t\tmetadata = append(append(metadata[:], '#'), encodedTree[:]...)\n\t\t\toffset := int(fileSize) - treeSize - 2\n\t\t\tnmod, err := mdm.WriteAt(metadata, int64(offset))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif nmod != int(len(metadata)) {\n\t\t\t\treturn nil, errors.New(\"Modified length not correct!\")\n\t\t\t}\n\n\t\t\tnewMetaNode, err = mdm.GetNode()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Attach the modified metadata sub-DAG to a new root for the BTFS file DAG.\n\tvar dnode ipld.Node\n\tif children == nil {\n\t\tdnode = root\n\t} else {\n\t\tdnode = children.DataNode\n\t}\n\n\tfileSize, err := FileSize(dnode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnewRoot, err := mdm.GetDb().AttachMetadataDag(dnode, fileSize, newMetaNode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmdm.GetDb().Add(newRoot)\n\n\treturn newRoot, nil\n}", "func RegisterManifestHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ManifestServer) error {\n\n\tmux.Handle(\"POST\", pattern_Manifest_ManifestCreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_Manifest_ManifestCreate_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Manifest_ManifestCreate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Manifest_ManifestConfigCreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_Manifest_ManifestConfigCreate_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Manifest_ManifestConfigCreate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\treturn nil\n}", "func (bm *BlobsManifest) Write() error {\n\n\tdirpath := blobsManifestDirPath(bm.BlobSum)\n\tutils.CreateDir(dirpath)\n\n\tfilePath := dirpath + \"/\" + bm.BlobSum\n\tutils.Remove(filePath)\n\n\tfd, err := os.Create(filePath)\n\tif err != nil {\n\t\tlogger.Errorf(\"create file %s error\\n\", filePath)\n\t\treturn err\n\t}\n\n\tdefer fd.Close()\n\n\tdata, _ := json.MarshalIndent(bm, \"\", \" \")\n\tfd.Write(data)\n\n\treturn nil\n}", "func ImageManifestSchema() *gojsonschema.Schema {\n\treturn loadSchema(\"image-manifest.schema.json\")\n}", "func (mc *manifestCache) Put(db string, contents manifestContents, t time.Time) error {\n\tmc.mu.Lock()\n\tdefer mc.mu.Unlock()\n\n\tif entry, ok := mc.entry(db); ok {\n\t\tmc.totalSize -= entry.contents.size()\n\t\tmc.lru.Remove(entry.lruEntry)\n\t\tdelete(mc.cache, db)\n\t}\n\n\tif contents.size() <= mc.maxSize {\n\t\tnewEl := mc.lru.PushBack(db)\n\t\tce := manifestCacheEntry{lruEntry: newEl, contents: contents, t: t}\n\t\tmc.cache[db] = ce\n\t\tmc.totalSize += ce.contents.size()\n\t\tfor el := mc.lru.Front(); el != nil && mc.totalSize > mc.maxSize; {\n\t\t\tkey1 := el.Value.(string)\n\t\t\tce, ok := mc.cache[key1]\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"manifestCache is missing expected value for %s\", key1)\n\t\t\t}\n\t\t\tnext := el.Next()\n\t\t\tdelete(mc.cache, key1)\n\t\t\tmc.totalSize -= ce.contents.size()\n\t\t\tmc.lru.Remove(el)\n\t\t\tel = next\n\t\t}\n\t}\n\n\treturn nil\n}", "func (sd *SiaDir) UpdateMetadata(metadata Metadata) error {\n\tsd.mu.Lock()\n\tdefer sd.mu.Unlock()\n\tsd.metadata.AggregateHealth = metadata.AggregateHealth\n\tsd.metadata.AggregateLastHealthCheckTime = metadata.AggregateLastHealthCheckTime\n\tsd.metadata.AggregateMinRedundancy = metadata.AggregateMinRedundancy\n\tsd.metadata.AggregateModTime = metadata.AggregateModTime\n\tsd.metadata.AggregateNumFiles = metadata.AggregateNumFiles\n\tsd.metadata.AggregateNumStuckChunks = metadata.AggregateNumStuckChunks\n\tsd.metadata.AggregateNumSubDirs = metadata.AggregateNumSubDirs\n\tsd.metadata.AggregateSize = metadata.AggregateSize\n\tsd.metadata.AggregateStuckHealth = metadata.AggregateStuckHealth\n\n\tsd.metadata.Health = metadata.Health\n\tsd.metadata.LastHealthCheckTime = metadata.LastHealthCheckTime\n\tsd.metadata.MinRedundancy = metadata.MinRedundancy\n\tsd.metadata.ModTime = metadata.ModTime\n\tsd.metadata.NumFiles = metadata.NumFiles\n\tsd.metadata.NumStuckChunks = metadata.NumStuckChunks\n\tsd.metadata.NumSubDirs = metadata.NumSubDirs\n\tsd.metadata.Size = metadata.Size\n\tsd.metadata.StuckHealth = metadata.StuckHealth\n\treturn sd.saveDir()\n}", "func (j *Janitor) SyncManifestsInNextRepo() (returnErr error) {\n\tdefer func() {\n\t\tif returnErr == nil {\n\t\t\tsyncManifestsSuccessCounter.Inc()\n\t\t} else if returnErr != sql.ErrNoRows {\n\t\t\tsyncManifestsFailedCounter.Inc()\n\t\t\treturnErr = fmt.Errorf(\"while syncing manifests in a replica repo: %s\", returnErr.Error())\n\t\t}\n\t}()\n\n\t//find repository to sync\n\tvar repo keppel.Repository\n\terr := j.db.SelectOne(&repo, syncManifestRepoSelectQuery, j.timeNow())\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\tlogg.Debug(\"no accounts to sync manifests in - slowing down...\")\n\t\t\treturn sql.ErrNoRows\n\t\t}\n\t\treturn err\n\t}\n\n\t//find corresponding account\n\taccount, err := keppel.FindAccount(j.db, repo.AccountName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot find account for repo %s: %s\", repo.FullName(), err.Error())\n\t}\n\n\t//do not perform manifest sync while account is in maintenance (maintenance mode blocks all kinds of replication)\n\tif !account.InMaintenance {\n\t\terr = j.performManifestSync(*account, repo)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t_, err = j.db.Exec(syncManifestDoneQuery, repo.ID, j.timeNow().Add(1*time.Hour))\n\treturn err\n}", "func (is *ImageStoreLocal) GetImageManifest(repo, reference string) ([]byte, godigest.Digest, string, error) {\n\tdir := path.Join(is.rootDir, repo)\n\tif !is.DirExists(dir) {\n\t\treturn nil, \"\", \"\", zerr.ErrRepoNotFound\n\t}\n\n\tvar lockLatency time.Time\n\n\tvar err error\n\n\tis.RLock(&lockLatency)\n\tdefer func() {\n\t\tis.RUnlock(&lockLatency)\n\n\t\tif err == nil {\n\t\t\tmonitoring.IncDownloadCounter(is.metrics, repo)\n\t\t}\n\t}()\n\n\tindex, err := common.GetIndex(is, repo, is.log)\n\tif err != nil {\n\t\treturn nil, \"\", \"\", err\n\t}\n\n\tmanifestDesc, found := common.GetManifestDescByReference(index, reference)\n\tif !found {\n\t\treturn nil, \"\", \"\", zerr.ErrManifestNotFound\n\t}\n\n\tbuf, err := is.GetBlobContent(repo, manifestDesc.Digest)\n\tif err != nil {\n\t\tif errors.Is(err, zerr.ErrBlobNotFound) {\n\t\t\treturn nil, \"\", \"\", zerr.ErrManifestNotFound\n\t\t}\n\n\t\treturn nil, \"\", \"\", err\n\t}\n\n\tvar manifest ispec.Manifest\n\tif err := json.Unmarshal(buf, &manifest); err != nil {\n\t\tis.log.Error().Err(err).Str(\"dir\", dir).Msg(\"invalid JSON\")\n\n\t\treturn nil, \"\", \"\", err\n\t}\n\n\treturn buf, manifestDesc.Digest, manifestDesc.MediaType, nil\n}", "func (_m *Backend) UpdateManifest(ctx context.Context, request *models.UpdateManifestRequest) error {\n\tret := _m.Called(ctx, request)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *models.UpdateManifestRequest) error); ok {\n\t\tr0 = rf(ctx, request)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *GGCRImage) Manifest() (*v1.Manifest, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Manifest\")\n\tret0, _ := ret[0].(*v1.Manifest)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func ReadManifest(manifestReader savior.SeekSource) (*tlc.Container, *BlockHashMap, error) {\n\tcontainer := &tlc.Container{}\n\tblockHashes := NewBlockHashMap()\n\n\trawWire := wire.NewReadContext(manifestReader)\n\terr := rawWire.ExpectMagic(pwr.ManifestMagic)\n\tif err != nil {\n\t\treturn nil, nil, errors.WithStack(err)\n\t}\n\n\tmh := &pwr.ManifestHeader{}\n\terr = rawWire.ReadMessage(mh)\n\tif err != nil {\n\t\treturn nil, nil, errors.WithStack(err)\n\t}\n\n\tif mh.Algorithm != pwr.HashAlgorithm_SHAKE128_32 {\n\t\terr = fmt.Errorf(\"Manifest has unsupported hash algorithm %d, expected %d\", mh.Algorithm, pwr.HashAlgorithm_SHAKE128_32)\n\t\treturn nil, nil, errors.WithStack(err)\n\t}\n\n\twire, err := pwr.DecompressWire(rawWire, mh.GetCompression())\n\tif err != nil {\n\t\treturn nil, nil, errors.WithStack(err)\n\t}\n\n\terr = wire.ReadMessage(container)\n\tif err != nil {\n\t\treturn nil, nil, errors.WithStack(err)\n\t}\n\n\tsh := &pwr.SyncHeader{}\n\tmbh := &pwr.ManifestBlockHash{}\n\n\tfor fileIndex, f := range container.Files {\n\t\tsh.Reset()\n\t\terr = wire.ReadMessage(sh)\n\t\tif err != nil {\n\t\t\treturn nil, nil, errors.WithStack(err)\n\t\t}\n\n\t\tif int64(fileIndex) != sh.FileIndex {\n\t\t\terr = fmt.Errorf(\"manifest format error: expected file %d, got %d\", fileIndex, sh.FileIndex)\n\t\t\treturn nil, nil, errors.WithStack(err)\n\t\t}\n\n\t\tnumBlocks := ComputeNumBlocks(f.Size)\n\t\tfor blockIndex := int64(0); blockIndex < numBlocks; blockIndex++ {\n\t\t\tmbh.Reset()\n\t\t\terr = wire.ReadMessage(mbh)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, errors.WithStack(err)\n\t\t\t}\n\n\t\t\tloc := BlockLocation{FileIndex: int64(fileIndex), BlockIndex: blockIndex}\n\t\t\tblockHashes.Set(loc, append([]byte{}, mbh.Hash...))\n\t\t}\n\t}\n\n\treturn container, blockHashes, nil\n}", "func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Store, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error {\n\n\tvar m sync.Mutex\n\tmanifestStack := []ocispec.Descriptor{}\n\n\tfilterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {\n\t\tswitch desc.MediaType {\n\t\tcase images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,\n\t\t\timages.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:\n\t\t\tm.Lock()\n\t\t\tmanifestStack = append(manifestStack, desc)\n\t\t\tm.Unlock()\n\t\t\treturn nil, images.ErrStopHandler\n\t\tdefault:\n\t\t\treturn nil, nil\n\t\t}\n\t})\n\n\tpushHandler := PushHandler(pusher, store)\n\n\tplatformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform)\n\n\tannotateHandler := annotateDistributionSourceHandler(platformFilterhandler, store)\n\n\tvar handler images.Handler = images.Handlers(\n\t\tannotateHandler,\n\t\tfilterHandler,\n\t\tpushHandler,\n\t)\n\tif wrapper != nil {\n\t\thandler = wrapper(handler)\n\t}\n\n\tif err := images.Dispatch(ctx, handler, limiter, desc); err != nil {\n\t\treturn err\n\t}\n\n\t// Iterate in reverse order as seen, parent always uploaded after child\n\tfor i := len(manifestStack) - 1; i >= 0; i-- {\n\t\t_, err := pushHandler(ctx, manifestStack[i])\n\t\tif err != nil {\n\t\t\t// TODO(estesp): until we have a more complete method for index push, we need to report\n\t\t\t// missing dependencies in an index/manifest list by sensing the \"400 Bad Request\"\n\t\t\t// as a marker for this problem\n\t\t\tif (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex ||\n\t\t\t\tmanifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) &&\n\t\t\t\terrors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), \"400 Bad Request\") {\n\t\t\t\treturn fmt.Errorf(\"manifest list/index references to blobs and/or manifests are missing in your target registry: %w\", err)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *Store) GetImageManifestJSON(key string) ([]byte, error) {\n\tkey, err := s.ResolveKey(key)\n\tif err != nil {\n\t\treturn nil, errwrap.Wrap(errors.New(\"error resolving image ID\"), err)\n\t}\n\tkeyLock, err := lock.SharedKeyLock(s.imageLockDir, key)\n\tif err != nil {\n\t\treturn nil, errwrap.Wrap(errors.New(\"error locking image\"), err)\n\t}\n\tdefer keyLock.Close()\n\n\timj, err := s.stores[imageManifestType].Read(key)\n\tif err != nil {\n\t\treturn nil, errwrap.Wrap(errors.New(\"error retrieving image manifest\"), err)\n\t}\n\treturn imj, nil\n}" ]
[ "0.7010766", "0.66876704", "0.6556861", "0.5991127", "0.59570235", "0.5899974", "0.5800305", "0.5798259", "0.5712472", "0.5641731", "0.5567172", "0.55645925", "0.5563863", "0.5545608", "0.55113065", "0.54682016", "0.54171425", "0.5391867", "0.53072697", "0.5294346", "0.52491206", "0.52232593", "0.5156525", "0.51499933", "0.5138121", "0.5096326", "0.50845486", "0.5083501", "0.50699955", "0.5051783", "0.5048233", "0.5014765", "0.501162", "0.49990457", "0.49907577", "0.4988504", "0.49797848", "0.49776164", "0.49662793", "0.49609208", "0.4952872", "0.49408513", "0.49341825", "0.49028334", "0.490021", "0.48941484", "0.4892671", "0.4892144", "0.48871708", "0.4878904", "0.4874638", "0.48498702", "0.48375973", "0.48359916", "0.48343647", "0.48145634", "0.47916007", "0.47896552", "0.47771862", "0.47771418", "0.47724852", "0.47645622", "0.47556284", "0.4754419", "0.47526306", "0.4747289", "0.47411466", "0.47353104", "0.47138062", "0.4700822", "0.46996227", "0.4699013", "0.46972016", "0.4690487", "0.468993", "0.46839532", "0.46795058", "0.46728757", "0.46684164", "0.46654987", "0.4664932", "0.46573317", "0.46424815", "0.46371606", "0.46355888", "0.46300554", "0.46164542", "0.4615714", "0.46127918", "0.46120253", "0.46065447", "0.46009424", "0.45915487", "0.45907688", "0.45863208", "0.4582113", "0.457934", "0.4574636", "0.45704705", "0.45603585" ]
0.8265896
0
OnDeleteManifest is called when a manifest is deleted. It updates metadb according to the type of image pushed(normal images, signatues, etc.). In care of any errors, it makes sure to keep consistency between metadb and the image store.
func OnDeleteManifest(repo, reference, mediaType string, digest godigest.Digest, manifestBlob []byte, storeController storage.StoreController, metaDB mTypes.MetaDB, log log.Logger, ) error { imgStore := storeController.GetImageStore(repo) isSignature, signatureType, signedManifestDigest, err := storage.CheckIsImageSignature(repo, manifestBlob, reference) if err != nil { log.Error().Err(err).Msg("can't check if image is a signature or not") return err } manageRepoMetaSuccessfully := true if isSignature { err = metaDB.DeleteSignature(repo, signedManifestDigest, mTypes.SignatureMetadata{ SignatureDigest: digest.String(), SignatureType: signatureType, }) if err != nil { log.Error().Err(err).Msg("metadb: can't check if image is a signature or not") manageRepoMetaSuccessfully = false } } else { err = metaDB.DeleteRepoTag(repo, reference) if err != nil { log.Info().Msg("metadb: restoring image store") // restore image store _, _, err := imgStore.PutImageManifest(repo, reference, mediaType, manifestBlob) if err != nil { log.Error().Err(err).Msg("metadb: error while restoring image store, database is not consistent") } manageRepoMetaSuccessfully = false } if referredDigest, hasSubject := common.GetReferredSubject(manifestBlob); hasSubject { err := metaDB.DeleteReferrer(repo, referredDigest, digest) if err != nil { log.Error().Err(err).Msg("metadb: error while deleting referrer") return err } } } if !manageRepoMetaSuccessfully { log.Info().Str("tag", reference).Str("repository", repo). Msg("metadb: deleting image meta was unsuccessful for tag in repo") return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (imh *manifestHandler) DeleteManifest(w http.ResponseWriter, r *http.Request) {\n\tdcontext.GetLogger(imh).Debug(\"DeleteImageManifest\")\n\n\tif imh.App.isCache {\n\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)\n\t\treturn\n\t}\n\n\tif imh.Tag != \"\" {\n\t\tdcontext.GetLogger(imh).Debug(\"DeleteImageTag\")\n\t\ttagService := imh.Repository.Tags(imh.Context)\n\t\tif err := tagService.Untag(imh.Context, imh.Tag); err != nil {\n\t\t\tswitch err.(type) {\n\t\t\tcase distribution.ErrTagUnknown, driver.PathNotFoundError:\n\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))\n\t\t\tdefault:\n\t\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\treturn\n\t}\n\n\tmanifests, err := imh.Repository.Manifests(imh)\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\n\terr = manifests.Delete(imh, imh.Digest)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase digest.ErrDigestUnsupported:\n\t\tcase digest.ErrDigestInvalidFormat:\n\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)\n\t\t\treturn\n\t\tcase distribution.ErrBlobUnknown:\n\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown)\n\t\t\treturn\n\t\tcase distribution.ErrUnsupported:\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)\n\t\t\treturn\n\t\tdefault:\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown)\n\t\t\treturn\n\t\t}\n\t}\n\n\ttagService := imh.Repository.Tags(imh)\n\treferencedTags, err := tagService.Lookup(imh, distribution.Descriptor{Digest: imh.Digest})\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\n\tfor _, tag := range referencedTags {\n\t\tif err := tagService.Untag(imh, tag); err != nil {\n\t\t\timh.Errors = append(imh.Errors, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.WriteHeader(http.StatusAccepted)\n}", "func (p *Processor) DeleteManifest(account keppel.Account, repo keppel.Repository, manifestDigest digest.Digest, actx keppel.AuditContext) error {\n\tvar (\n\t\ttagResults []keppel.Tag\n\t\ttags []string\n\t)\n\n\t_, err := p.db.Select(&tagResults,\n\t\t`SELECT * FROM tags WHERE repo_id = $1 AND digest = $2`,\n\t\trepo.ID, manifestDigest)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, tagResult := range tagResults {\n\t\ttags = append(tags, tagResult.Name)\n\t}\n\n\tresult, err := p.db.Exec(\n\t\t//this also deletes tags referencing this manifest because of \"ON DELETE CASCADE\"\n\t\t`DELETE FROM manifests WHERE repo_id = $1 AND digest = $2`,\n\t\trepo.ID, manifestDigest)\n\tif err != nil {\n\t\totherDigest, err2 := p.db.SelectStr(\n\t\t\t`SELECT parent_digest FROM manifest_manifest_refs WHERE repo_id = $1 AND child_digest = $2`,\n\t\t\trepo.ID, manifestDigest)\n\t\t// more than one manifest is referenced by another manifest\n\t\tif otherDigest != \"\" && err2 == nil {\n\t\t\treturn fmt.Errorf(\"cannot delete a manifest which is referenced by the manifest %s\", otherDigest)\n\t\t}\n\t\t// if the SELECT failed return the previous error to not shadow it\n\t\treturn err\n\t}\n\trowsDeleted, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rowsDeleted == 0 {\n\t\treturn sql.ErrNoRows\n\t}\n\n\t//We delete in the storage *after* the deletion is durable in the DB to be\n\t//extra sure that we did not break any constraints (esp. manifest-manifest\n\t//refs and manifest-blob refs) that the DB enforces. Doing things in this\n\t//order might mean that, if DeleteManifest fails, we're left with a manifest\n\t//in the backing storage that is not referenced in the DB anymore, but this\n\t//is not a huge problem since the janitor can clean those up after the fact.\n\t//What's most important is that we don't lose any data in the backing storage\n\t//while it is still referenced in the DB.\n\t//\n\t//Also, the DELETE statement could fail if some concurrent process created a\n\t//manifest reference in the meantime. If that happens, and we have already\n\t//deleted the manifest in the backing storage, we've caused an inconsistency\n\t//that we cannot recover from.\n\terr = p.sd.DeleteManifest(account, repo.Name, manifestDigest)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif userInfo := actx.UserIdentity.UserInfo(); userInfo != nil {\n\t\tp.auditor.Record(audittools.EventParameters{\n\t\t\tTime: p.timeNow(),\n\t\t\tRequest: actx.Request,\n\t\t\tUser: userInfo,\n\t\t\tReasonCode: http.StatusOK,\n\t\t\tAction: cadf.DeleteAction,\n\t\t\tTarget: auditManifest{\n\t\t\t\tAccount: account,\n\t\t\t\tRepository: repo,\n\t\t\t\tDigest: manifestDigest,\n\t\t\t\tTags: tags,\n\t\t\t},\n\t\t})\n\t}\n\n\treturn nil\n}", "func OnUpdateManifest(repo, reference, mediaType string, digest godigest.Digest, body []byte,\n\tstoreController storage.StoreController, metaDB mTypes.MetaDB, log log.Logger,\n) error {\n\timgStore := storeController.GetImageStore(repo)\n\n\t// check if image is a signature\n\tisSignature, signatureType, signedManifestDigest, err := storage.CheckIsImageSignature(repo, body, reference)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"can't check if image is a signature or not\")\n\n\t\tif err := imgStore.DeleteImageManifest(repo, reference, false); err != nil {\n\t\t\tlog.Error().Err(err).Str(\"manifest\", reference).Str(\"repository\", repo).Msg(\"couldn't remove image manifest in repo\")\n\n\t\t\treturn err\n\t\t}\n\n\t\treturn err\n\t}\n\n\tmetadataSuccessfullySet := true\n\n\tif isSignature {\n\t\tlayersInfo, errGetLayers := GetSignatureLayersInfo(repo, reference, digest.String(), signatureType, body,\n\t\t\timgStore, log)\n\t\tif errGetLayers != nil {\n\t\t\tmetadataSuccessfullySet = false\n\t\t\terr = errGetLayers\n\t\t} else {\n\t\t\terr = metaDB.AddManifestSignature(repo, signedManifestDigest, mTypes.SignatureMetadata{\n\t\t\t\tSignatureType: signatureType,\n\t\t\t\tSignatureDigest: digest.String(),\n\t\t\t\tLayersInfo: layersInfo,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tlog.Error().Err(err).Msg(\"metadb: error while putting repo meta\")\n\t\t\t\tmetadataSuccessfullySet = false\n\t\t\t} else {\n\t\t\t\terr = metaDB.UpdateSignaturesValidity(repo, signedManifestDigest)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error().Err(err).Str(\"repository\", repo).Str(\"reference\", reference).Str(\"digest\",\n\t\t\t\t\t\tsignedManifestDigest.String()).Msg(\"metadb: failed verify signatures validity for signed image\")\n\t\t\t\t\tmetadataSuccessfullySet = false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\terr = SetImageMetaFromInput(repo, reference, mediaType, digest, body,\n\t\t\timgStore, metaDB, log)\n\t\tif err != nil {\n\t\t\tmetadataSuccessfullySet = false\n\t\t}\n\t}\n\n\tif !metadataSuccessfullySet {\n\t\tlog.Info().Str(\"tag\", reference).Str(\"repository\", repo).Msg(\"uploading image meta was unsuccessful for tag in repo\")\n\n\t\tif err := imgStore.DeleteImageManifest(repo, reference, false); err != nil {\n\t\t\tlog.Error().Err(err).Str(\"reference\", reference).Str(\"repository\", repo).\n\t\t\t\tMsg(\"couldn't remove image manifest in repo\")\n\n\t\t\treturn err\n\t\t}\n\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (is *ImageStoreLocal) DeleteImageManifest(repo, reference string, detectCollision bool) error {\n\tdir := path.Join(is.rootDir, repo)\n\tif !is.DirExists(dir) {\n\t\treturn zerr.ErrRepoNotFound\n\t}\n\n\tvar lockLatency time.Time\n\n\tvar err error\n\n\tis.Lock(&lockLatency)\n\tdefer func() {\n\t\tis.Unlock(&lockLatency)\n\n\t\tif err == nil {\n\t\t\tmonitoring.SetStorageUsage(is.metrics, is.rootDir, repo)\n\t\t}\n\t}()\n\n\tindex, err := common.GetIndex(is, repo, is.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanifestDesc, err := common.RemoveManifestDescByReference(&index, reference, detectCollision)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = common.UpdateIndexWithPrunedImageManifests(is, &index, repo, manifestDesc, manifestDesc.Digest, is.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// now update \"index.json\"\n\tdir = path.Join(is.rootDir, repo)\n\tfile := path.Join(dir, \"index.json\")\n\n\tbuf, err := json.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := is.writeFile(file, buf); err != nil {\n\t\treturn err\n\t}\n\n\t// Delete blob only when blob digest not present in manifest entry.\n\t// e.g. 1.0.1 & 1.0.2 have same blob digest so if we delete 1.0.1, blob should not be removed.\n\ttoDelete := true\n\n\tfor _, manifest := range index.Manifests {\n\t\tif manifestDesc.Digest.String() == manifest.Digest.String() {\n\t\t\ttoDelete = false\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif toDelete {\n\t\tp := path.Join(dir, \"blobs\", manifestDesc.Digest.Algorithm().String(), manifestDesc.Digest.Encoded())\n\n\t\t_ = os.Remove(p)\n\t}\n\n\treturn nil\n}", "func (is *ObjectStorage) DeleteImageManifest(repo, reference string, detectCollisions bool) error {\n\tdir := path.Join(is.rootDir, repo)\n\tif fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {\n\t\treturn zerr.ErrRepoNotFound\n\t}\n\n\tvar lockLatency time.Time\n\n\tvar err error\n\n\tis.Lock(&lockLatency)\n\tdefer func() {\n\t\tis.Unlock(&lockLatency)\n\n\t\tif err == nil {\n\t\t\tmonitoring.SetStorageUsage(is.metrics, is.rootDir, repo)\n\t\t}\n\t}()\n\n\tindex, err := common.GetIndex(is, repo, is.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanifestDesc, err := common.RemoveManifestDescByReference(&index, reference, detectCollisions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = common.UpdateIndexWithPrunedImageManifests(is, &index, repo, manifestDesc, manifestDesc.Digest, is.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// now update \"index.json\"\n\tdir = path.Join(is.rootDir, repo)\n\tfile := path.Join(dir, \"index.json\")\n\n\tbuf, err := json.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := writeFile(is.store, file, buf); err != nil {\n\t\tis.log.Debug().Str(\"deleting reference\", reference).Msg(\"\")\n\n\t\treturn err\n\t}\n\n\t// Delete blob only when blob digest not present in manifest entry.\n\t// e.g. 1.0.1 & 1.0.2 have same blob digest so if we delete 1.0.1, blob should not be removed.\n\ttoDelete := true\n\n\tfor _, manifest := range index.Manifests {\n\t\tif manifestDesc.Digest.String() == manifest.Digest.String() {\n\t\t\ttoDelete = false\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif toDelete {\n\t\tp := path.Join(dir, \"blobs\", manifestDesc.Digest.Algorithm().String(), manifestDesc.Digest.Encoded())\n\n\t\terr = is.store.Delete(context.Background(), p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func OnGetManifest(name, reference string, body []byte,\n\tstoreController storage.StoreController, metaDB mTypes.MetaDB, log log.Logger,\n) error {\n\t// check if image is a signature\n\tisSignature, _, _, err := storage.CheckIsImageSignature(name, body, reference)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"can't check if manifest is a signature or not\")\n\n\t\treturn err\n\t}\n\n\tif !isSignature {\n\t\terr := metaDB.IncrementImageDownloads(name, reference)\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Str(\"repository\", name).Str(\"reference\", reference).\n\t\t\t\tMsg(\"unexpected error for image\")\n\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (mr *ManifestResource) Delete(_ http.ResponseWriter, req *http.Request, _ httprouter.Params) restful.Exchanger {\n\treturn &DELETEManifestHandler{\n\t\tState: mr.context.liveState(),\n\t\tQueryValues: mr.ParseQuery(req),\n\t\tStateWriter: sous.StateWriter(mr.context.StateManager),\n\t}\n}", "func DeleteManifest(registry, repository string, manifest manifest.Data) error {\n\t// Will perform an actual delete\n\tdeleteCmd := newDeleteManifestsCommand(registry, repository, manifest.Digest)\n\n\tvar outb bytes.Buffer\n\tdeleteCmd.Stdout = &outb\n\n\treturn deleteCmd.Run()\n}", "func (d *swiftDriver) DeleteManifest(account keppel.Account, repoName string, manifestDigest digest.Digest) error {\n\tc, _, err := d.getBackendConnection(account)\n\tif err != nil {\n\t\treturn err\n\t}\n\to := manifestObject(c, repoName, manifestDigest)\n\treturn o.Delete(nil, nil)\n}", "func (m *manifestService) Delete(ctx context.Context, dgst digest.Digest) error {\n\tcontext.GetLogger(ctx).Debugf(\"(*manifestService).Delete\")\n\treturn m.manifests.Delete(withRepository(ctx, m.repo), dgst)\n}", "func (bm *BlobsManifest) Delete() error {\n\n\tfor _, chunk := range bm.Chunks {\n\t\t// for Huge Blob mode, no need remove blobs\n\t\t_, _, length := utils.ParseBlobDigest(chunk)\n\t\tif length != 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tb := blobs.GetBlobPartial(\"\", chunk)\n\t\tif b != nil {\n\t\t\tb.Delete()\n\t\t}\n\t}\n\n\t// to remove Huge Blob Image\n\timageDir := configuration.RootDirectory() + manifest.ManifestDir + \"/\" + bm.BlobSum\n\tutils.RemoveDir(imageDir)\n\n\tutils.Remove(blobsManifestPath(bm.BlobSum))\n\n\treturn nil\n}", "func (s *s3ManifestService) Delete(ctx context.Context, dgst godigest.Digest) error {\n\treturn fmt.Errorf(\"unimplemented\")\n}", "func (mh *MetadataHandler) HandleDeleteMetadata(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tvars := mux.Vars(r)\n\tvar (\n\t\tappID string\n\t\tok bool\n\t)\n\tif appID, ok = vars[\"appID\"]; !ok {\n\t\tw.WriteHeader(http.StatusBadRequest) // 400\n\t\treturn\n\t}\n\n\terr := mh.Repository.Delete(appID)\n\tif err != nil {\n\t\tif err == repository.ErrIDNotFound {\n\t\t\tw.WriteHeader(http.StatusConflict) // 409\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusInternalServerError) // 500\n\t\t}\n\t\tyaml.NewEncoder(w).Encode(err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusNoContent) // 204\n}", "func deleteManifests(ctx context.Context, acrClient api.AcrCLIClientInterface, loginURL string, repoName string, args []string) error {\n\tfor i := 0; i < len(args); i++ {\n\t\t_, err := acrClient.DeleteManifest(ctx, repoName, args[i])\n\t\tif err != nil {\n\t\t\t// If there is an error (this includes not found and not allowed operations) the deletion of the images is stopped and an error is returned.\n\t\t\treturn errors.Wrap(err, \"failed to delete manifests\")\n\t\t}\n\t\tfmt.Printf(\"%s/%s@%s\\n\", loginURL, repoName, args[i])\n\t}\n\treturn nil\n}", "func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error {\n\t// 1. Check all files in manifest exist.\n\tfor id := range mf.Tables {\n\t\tif _, ok := idMap[id]; !ok {\n\t\t\treturn fmt.Errorf(\"file does not exist for table %d\", id)\n\t\t}\n\t}\n\n\t// 2. Delete files that shouldn't exist.\n\tfor id := range idMap {\n\t\tif _, ok := mf.Tables[id]; !ok {\n\t\t\tkv.elog.Printf(\"Table file %d not referenced in MANIFEST\\n\", id)\n\t\t\tfilename := table.NewFilename(id, kv.opt.Dir)\n\t\t\tif err := os.Remove(filename); err != nil {\n\t\t\t\treturn y.Wrapf(err, \"While removing table %d\", id)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) {\n\tdcontext.GetLogger(imh).Debug(\"PutImageManifest\")\n\tmanifests, err := imh.Repository.Manifests(imh)\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\n\tvar jsonBuf bytes.Buffer\n\tif err := copyFullPayload(imh, w, r, &jsonBuf, maxManifestBodySize, \"image manifest PUT\"); err != nil {\n\t\t// copyFullPayload reports the error if necessary\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err.Error()))\n\t\treturn\n\t}\n\n\tmediaType := r.Header.Get(\"Content-Type\")\n\tmanifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes())\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err))\n\t\treturn\n\t}\n\n\tif imh.Digest != \"\" {\n\t\tif desc.Digest != imh.Digest {\n\t\t\tdcontext.GetLogger(imh).Errorf(\"payload digest does not match: %q != %q\", desc.Digest, imh.Digest)\n\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)\n\t\t\treturn\n\t\t}\n\t} else if imh.Tag != \"\" {\n\t\timh.Digest = desc.Digest\n\t} else {\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(\"no tag or digest specified\"))\n\t\treturn\n\t}\n\n\tisAnOCIManifest := mediaType == v1.MediaTypeImageManifest || mediaType == v1.MediaTypeImageIndex\n\n\tif isAnOCIManifest {\n\t\tdcontext.GetLogger(imh).Debug(\"Putting an OCI Manifest!\")\n\t} else {\n\t\tdcontext.GetLogger(imh).Debug(\"Putting a Docker Manifest!\")\n\t}\n\n\tvar options []distribution.ManifestServiceOption\n\tif imh.Tag != \"\" {\n\t\toptions = append(options, distribution.WithTag(imh.Tag))\n\t}\n\n\tif err := imh.applyResourcePolicy(manifest); err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\n\t_, err = manifests.Put(imh, manifest, options...)\n\tif err != nil {\n\t\t// TODO(stevvooe): These error handling switches really need to be\n\t\t// handled by an app global mapper.\n\t\tif err == distribution.ErrUnsupported {\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)\n\t\t\treturn\n\t\t}\n\t\tif err == distribution.ErrAccessDenied {\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeDenied)\n\t\t\treturn\n\t\t}\n\t\tswitch err := err.(type) {\n\t\tcase distribution.ErrManifestVerification:\n\t\t\tfor _, verificationError := range err {\n\t\t\t\tswitch verificationError := verificationError.(type) {\n\t\t\t\tcase distribution.ErrManifestBlobUnknown:\n\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest))\n\t\t\t\tcase distribution.ErrManifestNameInvalid:\n\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeNameInvalid.WithDetail(err))\n\t\t\t\tcase distribution.ErrManifestUnverified:\n\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified)\n\t\t\t\tdefault:\n\t\t\t\t\tif verificationError == digest.ErrDigestInvalidFormat {\n\t\t\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)\n\t\t\t\t\t} else {\n\t\t\t\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase errcode.Error:\n\t\t\timh.Errors = append(imh.Errors, err)\n\t\tdefault:\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t}\n\t\treturn\n\t}\n\n\t// Tag this manifest\n\tif imh.Tag != \"\" {\n\t\ttags := imh.Repository.Tags(imh)\n\t\terr = tags.Tag(imh, imh.Tag, desc)\n\t\tif err != nil {\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t\treturn\n\t\t}\n\n\t}\n\n\t// Construct a canonical url for the uploaded manifest.\n\tref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest)\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\treturn\n\t}\n\n\tlocation, err := imh.urlBuilder.BuildManifestURL(ref)\n\tif err != nil {\n\t\t// NOTE(stevvooe): Given the behavior above, this absurdly unlikely to\n\t\t// happen. We'll log the error here but proceed as if it worked. Worst\n\t\t// case, we set an empty location header.\n\t\tdcontext.GetLogger(imh).Errorf(\"error building manifest url from digest: %v\", err)\n\t}\n\n\tw.Header().Set(\"Location\", location)\n\tw.Header().Set(\"Docker-Content-Digest\", imh.Digest.String())\n\tw.WriteHeader(http.StatusCreated)\n\n\tdcontext.GetLogger(imh).Debug(\"Succeeded in putting manifest!\")\n}", "func (ir *ImageEngine) ManifestRemoveDigest(ctx context.Context, name string, image string) (string, error) {\n\tupdatedListID, err := manifests.Remove(ir.ClientCtx, name, image, nil)\n\tif err != nil {\n\t\treturn updatedListID, fmt.Errorf(\"removing from manifest %s: %w\", name, err)\n\t}\n\treturn fmt.Sprintf(\"%s :%s\\n\", updatedListID, image), nil\n}", "func (job *purgeManifestJob) process(ctx context.Context, acrClient api.AcrCLIClientInterface) error {\n\tresp, err := acrClient.DeleteManifest(ctx, job.repoName, job.digest)\n\tif err == nil {\n\t\tfmt.Printf(\"Deleted %s/%s@%s\\n\", job.loginURL, job.repoName, job.digest)\n\t\treturn nil\n\t}\n\n\tif resp != nil && resp.Response != nil && resp.StatusCode == http.StatusNotFound {\n\t\t// If the manifest is not found it can be assumed to have been deleted.\n\t\tfmt.Printf(\"Skipped %s/%s@%s, HTTP status: %d\\n\", job.loginURL, job.repoName, job.digest, resp.StatusCode)\n\t\treturn nil\n\t}\n\n\treturn err\n}", "func (mc *manifestCache) Delete(db string) {\n\tmc.mu.Lock()\n\tdefer mc.mu.Unlock()\n\n\tif entry, ok := mc.entry(db); ok {\n\t\tmc.totalSize -= entry.contents.size()\n\t\tmc.lru.Remove(entry.lruEntry)\n\t\tdelete(mc.cache, db)\n\t}\n\n\treturn\n}", "func DeleteImageMeta(key string) error {\n\terr := imageDb.Delete([]byte(key), pebble.Sync)\n\n\treturn err\n}", "func (rl *ReferrerList) Delete(m manifest.Manifest) error {\n\trlM, ok := rl.Manifest.GetOrig().(v1.Index)\n\tif !ok {\n\t\treturn fmt.Errorf(\"referrer list manifest is not an OCI index for %s\", rl.Subject.CommonName())\n\t}\n\t// delete matching entries from the list\n\tmDesc := m.GetDescriptor()\n\tfound := false\n\tfor i := len(rlM.Manifests) - 1; i >= 0; i-- {\n\t\tif rlM.Manifests[i].Digest == mDesc.Digest {\n\t\t\tif i < len(rlM.Manifests)-1 {\n\t\t\t\trlM.Manifests = append(rlM.Manifests[:i], rlM.Manifests[i+1:]...)\n\t\t\t} else {\n\t\t\t\trlM.Manifests = rlM.Manifests[:i]\n\t\t\t}\n\t\t\tfound = true\n\t\t}\n\t}\n\tif !found {\n\t\treturn fmt.Errorf(\"subject not found in referrer list%.0w\", types.ErrNotFound)\n\t}\n\terr := rl.Manifest.SetOrig(rlM)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func handleAppInstanceStatusDelete(ctxArg interface{}, key string,\n\tstatusArg interface{}) {\n\tctx := ctxArg.(*zedmanagerContext)\n\tpublishAppInstanceSummary(ctx)\n}", "func (_m *DirectRepositoryWriter) DeleteManifest(ctx context.Context, id manifest.ID) error {\n\tret := _m.Called(ctx, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, manifest.ID) error); ok {\n\t\tr0 = rf(ctx, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func handleDelete(ctx *verifierContext, status *types.VerifyImageStatus) {\n\n\tlog.Functionf(\"handleDelete(%s) refcount %d\",\n\t\tstatus.ImageSha256, status.RefCount)\n\n\tif _, err := os.Stat(status.FileLocation); err == nil {\n\t\tlog.Functionf(\"handleDelete removing %s\",\n\t\t\tstatus.FileLocation)\n\t\tif err := os.RemoveAll(status.FileLocation); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t} else {\n\t\tlog.Warnf(\"handleDelete: Unable to delete %s: %s\",\n\t\t\tstatus.FileLocation, err)\n\t}\n\n\tunpublishVerifyImageStatus(ctx, status)\n\tlog.Functionf(\"handleDelete done for %s\", status.ImageSha256)\n}", "func (g Goba) DeleteImage(typ DatabaseType, name string) error {\n\tfor _, handler := range g.handlers {\n\t\tif handler.Type() == typ {\n\t\t\treturn handler.DeleteImage(name)\n\t\t}\n\t}\n\treturn ErrNoSuchHandler\n}", "func (sma *SmIPAM) OnIPAMPolicyDelete(obj *ctkit.IPAMPolicy) error {\n\tlog.Info(\"OnIPAMPolicyDelete: received: \", obj.Spec)\n\n\tpolicy, err := sma.FindIPAMPolicy(obj.Tenant, obj.Namespace, obj.Name)\n\n\tif err != nil {\n\t\tlog.Error(\"FindIPAMPolicy returned an error: \", err, \"for: \", obj.Tenant, obj.Namespace, obj.Name)\n\t\treturn errors.New(\"Object doesn't exist\")\n\t}\n\n\t// delete it from the DB\n\treturn sma.sm.DeleteObjectToMbus(\"\", policy, nil)\n}", "func (ir *ImageEngine) ManifestRm(ctx context.Context, names []string) (*entities.ImageRemoveReport, []error) {\n\treturn ir.Remove(ctx, names, entities.ImageRemoveOptions{LookupManifest: true})\n}", "func (_m *RepositoryWriter) DeleteManifest(ctx context.Context, id manifest.ID) error {\n\tret := _m.Called(ctx, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, manifest.ID) error); ok {\n\t\tr0 = rf(ctx, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func deleteMetadata(id string) error {\n\terr := db.Update(func(tx *bolt.Tx) error {\n\t\treturn tx.Bucket(dbBucket).Delete([]byte(id))\n\t})\n\treturn err\n}", "func (m *ImageManifest) UnmarshalJSON(data []byte) (err error) {\n\tmanifestMap := make(map[string]json.RawMessage)\n\tif err = json.Unmarshal(data, &manifestMap); err != nil {\n\t\terr = errors.WithStack(err)\n\t\treturn\n\t}\n\n\tfor k, v := range manifestMap {\n\t\tswitch k {\n\t\tcase \"mediaType\":\n\t\t\terr = json.Unmarshal(v, &m.MediaType)\n\t\tcase \"schemaVersion\":\n\t\t\terr = json.Unmarshal(v, &m.SchemaVersion)\n\t\tcase \"config\":\n\t\t\tm.Config, err = unmarshalConfig(v)\n\t\tcase \"layers\":\n\t\t\tm.Layers, err = unmarshalLayers(v)\n\t\tdefault:\n\t\t}\n\t\tif err != nil {\n\t\t\terr = errors.WithStack(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}", "func (s *tagStore) Manifest(ctx context.Context, t *models.Tag) (*models.Manifest, error) {\n\tdefer metrics.InstrumentQuery(\"tag_manifest\")()\n\tq := `SELECT\n\t\t\tm.id,\n\t\t\tm.top_level_namespace_id,\n\t\t\tm.repository_id,\n\t\t\tm.schema_version,\n\t\t\tmt.media_type,\n\t\t\tencode(m.digest, 'hex') as digest,\n\t\t\tm.payload,\n\t\t\tmtc.media_type as configuration_media_type,\n\t\t\tencode(m.configuration_blob_digest, 'hex') as configuration_blob_digest,\n\t\t\tm.configuration_payload,\n\t\t\tm.created_at\n\t\tFROM\n\t\t\tmanifests AS m\n\t\t\tJOIN media_types AS mt ON mt.id = m.media_type_id\n\t\t\tLEFT JOIN media_types AS mtc ON mtc.id = m.configuration_media_type_id\n\t\tWHERE\n\t\t\tm.top_level_namespace_id = $1\n\t\t\tAND m.repository_id = $2\n\t\t\tAND m.id = $3`\n\trow := s.db.QueryRowContext(ctx, q, t.NamespaceID, t.RepositoryID, t.ManifestID)\n\n\treturn scanFullManifest(row)\n}", "func (is *ObjectStorage) PutImageManifest(repo, reference, mediaType string, //nolint: gocyclo\n\tbody []byte,\n) (godigest.Digest, godigest.Digest, error) {\n\tif err := is.InitRepo(repo); err != nil {\n\t\tis.log.Debug().Err(err).Msg(\"init repo\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\tvar lockLatency time.Time\n\n\tvar err error\n\n\tis.Lock(&lockLatency)\n\tdefer func() {\n\t\tis.Unlock(&lockLatency)\n\n\t\tif err == nil {\n\t\t\tmonitoring.SetStorageUsage(is.metrics, is.rootDir, repo)\n\t\t\tmonitoring.IncUploadCounter(is.metrics, repo)\n\t\t}\n\t}()\n\n\trefIsDigest := true\n\n\tmDigest, err := common.GetAndValidateRequestDigest(body, reference, is.log)\n\tif err != nil {\n\t\tif errors.Is(err, zerr.ErrBadManifest) {\n\t\t\treturn mDigest, \"\", err\n\t\t}\n\n\t\trefIsDigest = false\n\t}\n\n\tdig, err := common.ValidateManifest(is, repo, reference, mediaType, body, is.log)\n\tif err != nil {\n\t\treturn dig, \"\", err\n\t}\n\n\tindex, err := common.GetIndex(is, repo, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// create a new descriptor\n\tdesc := ispec.Descriptor{\n\t\tMediaType: mediaType, Size: int64(len(body)), Digest: mDigest,\n\t}\n\n\tif !refIsDigest {\n\t\tdesc.Annotations = map[string]string{ispec.AnnotationRefName: reference}\n\t}\n\n\tvar subjectDigest godigest.Digest\n\n\tartifactType := \"\"\n\n\tif mediaType == ispec.MediaTypeImageManifest {\n\t\tvar manifest ispec.Manifest\n\n\t\terr := json.Unmarshal(body, &manifest)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t\tif manifest.Subject != nil {\n\t\t\tsubjectDigest = manifest.Subject.Digest\n\t\t}\n\n\t\tartifactType = zcommon.GetManifestArtifactType(manifest)\n\t} else if mediaType == ispec.MediaTypeImageIndex {\n\t\tvar index ispec.Index\n\n\t\terr := json.Unmarshal(body, &index)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t\tif index.Subject != nil {\n\t\t\tsubjectDigest = index.Subject.Digest\n\t\t}\n\n\t\tartifactType = zcommon.GetIndexArtifactType(index)\n\t}\n\n\tupdateIndex, oldDgst, err := common.CheckIfIndexNeedsUpdate(&index, &desc, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif !updateIndex {\n\t\treturn desc.Digest, subjectDigest, nil\n\t}\n\n\t// write manifest to \"blobs\"\n\tdir := path.Join(is.rootDir, repo, \"blobs\", mDigest.Algorithm().String())\n\tmanifestPath := path.Join(dir, mDigest.Encoded())\n\n\tif err = is.store.PutContent(context.Background(), manifestPath, body); err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", manifestPath).Msg(\"unable to write\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\terr = common.UpdateIndexWithPrunedImageManifests(is, &index, repo, desc, oldDgst, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// now update \"index.json\"\n\tindex.Manifests = append(index.Manifests, desc)\n\tdir = path.Join(is.rootDir, repo)\n\tindexPath := path.Join(dir, \"index.json\")\n\n\tbuf, err := json.Marshal(index)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", indexPath).Msg(\"unable to marshal JSON\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\t// update the descriptors artifact type in order to check for signatures when applying the linter\n\tdesc.ArtifactType = artifactType\n\n\t// apply linter only on images, not signatures\n\tpass, err := common.ApplyLinter(is, is.linter, repo, desc)\n\tif !pass {\n\t\tis.log.Error().Err(err).Str(\"repository\", repo).Str(\"reference\", reference).Msg(\"linter didn't pass\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\tif err = is.store.PutContent(context.Background(), indexPath, buf); err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", manifestPath).Msg(\"unable to write\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn desc.Digest, subjectDigest, nil\n}", "func (is *ImageStoreLocal) PutImageManifest(repo, reference, mediaType string, //nolint: gocyclo\n\tbody []byte,\n) (godigest.Digest, godigest.Digest, error) {\n\tif err := is.InitRepo(repo); err != nil {\n\t\tis.log.Debug().Err(err).Msg(\"init repo\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\tvar lockLatency time.Time\n\n\tvar err error\n\n\tis.Lock(&lockLatency)\n\tdefer func() {\n\t\tis.Unlock(&lockLatency)\n\n\t\tif err == nil {\n\t\t\tmonitoring.SetStorageUsage(is.metrics, is.rootDir, repo)\n\t\t\tmonitoring.IncUploadCounter(is.metrics, repo)\n\t\t}\n\t}()\n\n\trefIsDigest := true\n\n\tmDigest, err := common.GetAndValidateRequestDigest(body, reference, is.log)\n\tif err != nil {\n\t\tif errors.Is(err, zerr.ErrBadManifest) {\n\t\t\treturn mDigest, \"\", err\n\t\t}\n\n\t\trefIsDigest = false\n\t}\n\n\tdigest, err := common.ValidateManifest(is, repo, reference, mediaType, body, is.log)\n\tif err != nil {\n\t\treturn digest, \"\", err\n\t}\n\n\tindex, err := common.GetIndex(is, repo, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// create a new descriptor\n\tdesc := ispec.Descriptor{\n\t\tMediaType: mediaType, Size: int64(len(body)), Digest: mDigest,\n\t}\n\n\tif !refIsDigest {\n\t\tdesc.Annotations = map[string]string{ispec.AnnotationRefName: reference}\n\t}\n\n\tvar subjectDigest godigest.Digest\n\n\tartifactType := \"\"\n\n\tif mediaType == ispec.MediaTypeImageManifest {\n\t\tvar manifest ispec.Manifest\n\n\t\terr := json.Unmarshal(body, &manifest)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t\tif manifest.Subject != nil {\n\t\t\tsubjectDigest = manifest.Subject.Digest\n\t\t}\n\n\t\tartifactType = zcommon.GetManifestArtifactType(manifest)\n\t} else if mediaType == ispec.MediaTypeImageIndex {\n\t\tvar index ispec.Index\n\n\t\terr := json.Unmarshal(body, &index)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t\tif index.Subject != nil {\n\t\t\tsubjectDigest = index.Subject.Digest\n\t\t}\n\n\t\tartifactType = zcommon.GetIndexArtifactType(index)\n\t}\n\n\tupdateIndex, oldDgst, err := common.CheckIfIndexNeedsUpdate(&index, &desc, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif !updateIndex {\n\t\treturn desc.Digest, subjectDigest, nil\n\t}\n\n\t// write manifest to \"blobs\"\n\tdir := path.Join(is.rootDir, repo, \"blobs\", mDigest.Algorithm().String())\n\t_ = ensureDir(dir, is.log)\n\tfile := path.Join(dir, mDigest.Encoded())\n\n\t// in case the linter will not pass, it will be garbage collected\n\tif err := is.writeFile(file, body); err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", file).Msg(\"unable to write\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\terr = common.UpdateIndexWithPrunedImageManifests(is, &index, repo, desc, oldDgst, is.log)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// now update \"index.json\"\n\tindex.Manifests = append(index.Manifests, desc)\n\tdir = path.Join(is.rootDir, repo)\n\tfile = path.Join(dir, \"index.json\")\n\n\tbuf, err := json.Marshal(index)\n\tif err := inject.Error(err); err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", file).Msg(\"unable to marshal JSON\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\t// update the descriptors artifact type in order to check for signatures when applying the linter\n\tdesc.ArtifactType = artifactType\n\n\t// apply linter only on images, not signatures or indexes\n\tpass, err := common.ApplyLinter(is, is.linter, repo, desc)\n\tif !pass {\n\t\tis.log.Error().Err(err).Str(\"repository\", repo).Str(\"reference\", reference).Msg(\"linter didn't pass\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\terr = is.writeFile(file, buf)\n\tif err := inject.Error(err); err != nil {\n\t\tis.log.Error().Err(err).Str(\"file\", file).Msg(\"unable to write\")\n\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn desc.Digest, subjectDigest, nil\n}", "func (signup *EventSignup) OnDeleted(container *ioccontainer.Container) error {\n\terr := signup.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar eventRepository EventRepository\n\tcontainer.Make(&eventRepository)\n\n\tevent, err := eventRepository.GetEventByID(signup.EventID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn signup.sendNotification(event, \"member_signed_out\", container)\n}", "func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) {\n\tdcontext.GetLogger(imh).Debug(\"GetImageManifest\")\n\tmanifests, err := imh.Repository.Manifests(imh)\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\tvar supports [numStorageTypes]bool\n\n\t// this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about \"q=\" values\n\t// https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202\n\tfor _, acceptHeader := range r.Header[\"Accept\"] {\n\t\t// r.Header[...] is a slice in case the request contains the same header more than once\n\t\t// if the header isn't set, we'll get the zero value, which \"range\" will handle gracefully\n\n\t\t// we need to split each header value on \",\" to get the full list of \"Accept\" values (per RFC 2616)\n\t\t// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1\n\t\tfor _, mediaType := range strings.Split(acceptHeader, \",\") {\n\t\t\tif mediaType, _, err = mime.ParseMediaType(mediaType); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif mediaType == schema2.MediaTypeManifest {\n\t\t\t\tsupports[manifestSchema2] = true\n\t\t\t}\n\t\t\tif mediaType == manifestlist.MediaTypeManifestList {\n\t\t\t\tsupports[manifestlistSchema] = true\n\t\t\t}\n\t\t\tif mediaType == v1.MediaTypeImageManifest {\n\t\t\t\tsupports[ociSchema] = true\n\t\t\t}\n\t\t\tif mediaType == v1.MediaTypeImageIndex {\n\t\t\t\tsupports[ociImageIndexSchema] = true\n\t\t\t}\n\t\t}\n\t}\n\n\tif imh.Tag != \"\" {\n\t\ttags := imh.Repository.Tags(imh)\n\t\tdesc, err := tags.Get(imh, imh.Tag)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(distribution.ErrTagUnknown); ok {\n\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))\n\t\t\t} else {\n\t\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\timh.Digest = desc.Digest\n\t}\n\n\tif etagMatch(r, imh.Digest.String()) {\n\t\tw.WriteHeader(http.StatusNotModified)\n\t\treturn\n\t}\n\n\tvar options []distribution.ManifestServiceOption\n\tif imh.Tag != \"\" {\n\t\toptions = append(options, distribution.WithTag(imh.Tag))\n\t}\n\tmanifest, err := manifests.Get(imh, imh.Digest, options...)\n\tif err != nil {\n\t\tif _, ok := err.(distribution.ErrManifestUnknownRevision); ok {\n\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))\n\t\t} else {\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t}\n\t\treturn\n\t}\n\t// determine the type of the returned manifest\n\tmanifestType := manifestSchema1\n\tschema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest)\n\tmanifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList)\n\tif isSchema2 {\n\t\tmanifestType = manifestSchema2\n\t} else if _, isOCImanifest := manifest.(*ocischema.DeserializedManifest); isOCImanifest {\n\t\tmanifestType = ociSchema\n\t} else if isManifestList {\n\t\tif manifestList.MediaType == manifestlist.MediaTypeManifestList {\n\t\t\tmanifestType = manifestlistSchema\n\t\t} else if manifestList.MediaType == v1.MediaTypeImageIndex {\n\t\t\tmanifestType = ociImageIndexSchema\n\t\t}\n\t}\n\n\tif manifestType == ociSchema && !supports[ociSchema] {\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithMessage(\"OCI manifest found, but accept header does not support OCI manifests\"))\n\t\treturn\n\t}\n\tif manifestType == ociImageIndexSchema && !supports[ociImageIndexSchema] {\n\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithMessage(\"OCI index found, but accept header does not support OCI indexes\"))\n\t\treturn\n\t}\n\t// Only rewrite schema2 manifests when they are being fetched by tag.\n\t// If they are being fetched by digest, we can't return something not\n\t// matching the digest.\n\tif imh.Tag != \"\" && manifestType == manifestSchema2 && !supports[manifestSchema2] {\n\t\t// Rewrite manifest in schema1 format\n\t\tdcontext.GetLogger(imh).Infof(\"rewriting manifest %s in schema1 format to support old client\", imh.Digest.String())\n\n\t\tmanifest, err = imh.convertSchema2Manifest(schema2Manifest)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t} else if imh.Tag != \"\" && manifestType == manifestlistSchema && !supports[manifestlistSchema] {\n\t\t// Rewrite manifest in schema1 format\n\t\tdcontext.GetLogger(imh).Infof(\"rewriting manifest list %s in schema1 format to support old client\", imh.Digest.String())\n\n\t\t// Find the image manifest corresponding to the default\n\t\t// platform\n\t\tvar manifestDigest digest.Digest\n\t\tfor _, manifestDescriptor := range manifestList.Manifests {\n\t\t\tif manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS {\n\t\t\t\tmanifestDigest = manifestDescriptor.Digest\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif manifestDigest == \"\" {\n\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown)\n\t\t\treturn\n\t\t}\n\n\t\tmanifest, err = manifests.Get(imh, manifestDigest)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(distribution.ErrManifestUnknownRevision); ok {\n\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))\n\t\t\t} else {\n\t\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t// If necessary, convert the image manifest\n\t\tif schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supports[manifestSchema2] {\n\t\t\tmanifest, err = imh.convertSchema2Manifest(schema2Manifest)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\timh.Digest = manifestDigest\n\t\t}\n\t}\n\n\tct, p, err := manifest.Payload()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", ct)\n\tw.Header().Set(\"Content-Length\", fmt.Sprint(len(p)))\n\tw.Header().Set(\"Docker-Content-Digest\", imh.Digest.String())\n\tw.Header().Set(\"Etag\", fmt.Sprintf(`\"%s\"`, imh.Digest))\n\tw.Write(p)\n}", "func (rc *regClient) TagDelete(ctx context.Context, ref types.Ref) error {\n\tvar tempManifest manifest.Manifest\n\tif ref.Tag == \"\" {\n\t\treturn ErrMissingTag\n\t}\n\n\t// attempt to delete the tag directly, available in OCI distribution-spec, and Hub API\n\treq := httpReq{\n\t\thost: ref.Registry,\n\t\tnoMirrors: true,\n\t\tapis: map[string]httpReqAPI{\n\t\t\t\"\": {\n\t\t\t\tmethod: \"DELETE\",\n\t\t\t\trepository: ref.Repository,\n\t\t\t\tpath: \"manifests/\" + ref.Tag,\n\t\t\t\tignoreErr: true, // do not trigger backoffs if this fails\n\t\t\t},\n\t\t\t\"hub\": {\n\t\t\t\tmethod: \"DELETE\",\n\t\t\t\tpath: \"repositories/\" + ref.Repository + \"/tags/\" + ref.Tag + \"/\",\n\t\t\t},\n\t\t},\n\t}\n\n\tresp, err := rc.httpDo(ctx, req)\n\tif resp != nil {\n\t\tdefer resp.Close()\n\t}\n\t// TODO: Hub may return a different status\n\tif err == nil && resp != nil && resp.HTTPResponse().StatusCode == 202 {\n\t\treturn nil\n\t}\n\t// ignore errors, fallback to creating a temporary manifest to replace the tag and deleting that manifest\n\n\t// lookup the current manifest media type\n\tcurManifest, err := rc.ManifestHead(ctx, ref)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create empty image config with single label\n\t// Note, this should be MediaType specific, but it appears that docker uses OCI for the config\n\tnow := time.Now()\n\tconf := ociv1.Image{\n\t\tCreated: &now,\n\t\tConfig: ociv1.ImageConfig{\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"delete-tag\": ref.Tag,\n\t\t\t\t\"delete-date\": now.String(),\n\t\t\t},\n\t\t},\n\t\tOS: \"linux\",\n\t\tArchitecture: \"amd64\",\n\t\tRootFS: ociv1.RootFS{\n\t\t\tType: \"layers\",\n\t\t\tDiffIDs: []digest.Digest{},\n\t\t},\n\t}\n\tconfB, err := json.Marshal(conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdigester := digest.Canonical.Digester()\n\tconfBuf := bytes.NewBuffer(confB)\n\t_, err = confBuf.WriteTo(digester.Hash())\n\tif err != nil {\n\t\treturn err\n\t}\n\tconfDigest := digester.Digest()\n\n\t// create manifest with config, matching the original tag manifest type\n\tswitch curManifest.GetMediaType() {\n\tcase MediaTypeOCI1Manifest, MediaTypeOCI1ManifestList:\n\t\ttempManifest, err = manifest.FromOrig(ociv1.Manifest{\n\t\t\tVersioned: ociv1Specs.Versioned{\n\t\t\t\tSchemaVersion: 1,\n\t\t\t},\n\t\t\tConfig: ociv1.Descriptor{\n\t\t\t\tMediaType: MediaTypeOCI1ImageConfig,\n\t\t\t\tDigest: confDigest,\n\t\t\t\tSize: int64(len(confB)),\n\t\t\t},\n\t\t\tLayers: []ociv1.Descriptor{},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault: // default to the docker v2 schema\n\t\ttempManifest, err = manifest.FromOrig(dockerSchema2.Manifest{\n\t\t\tVersioned: dockerManifest.Versioned{\n\t\t\t\tSchemaVersion: 2,\n\t\t\t\tMediaType: MediaTypeDocker2Manifest,\n\t\t\t},\n\t\t\tConfig: dockerDistribution.Descriptor{\n\t\t\t\tMediaType: MediaTypeDocker2ImageConfig,\n\t\t\t\tDigest: confDigest,\n\t\t\t\tSize: int64(len(confB)),\n\t\t\t},\n\t\t\tLayers: []dockerDistribution.Descriptor{},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\trc.log.WithFields(logrus.Fields{\n\t\t\"ref\": ref.Reference,\n\t}).Debug(\"Sending dummy manifest to replace tag\")\n\n\t// push config\n\t_, _, err = rc.BlobPut(ctx, ref, confDigest, ioutil.NopCloser(bytes.NewReader(confB)), MediaTypeDocker2ImageConfig, int64(len(confB)))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed sending dummy config to delete %s: %w\", ref.CommonName(), err)\n\t}\n\n\t// push manifest to tag\n\terr = rc.ManifestPut(ctx, ref, tempManifest)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed sending dummy manifest to delete %s: %w\", ref.CommonName(), err)\n\t}\n\n\tref.Digest = tempManifest.GetDigest().String()\n\n\t// delete manifest by digest\n\trc.log.WithFields(logrus.Fields{\n\t\t\"ref\": ref.Reference,\n\t\t\"digest\": ref.Digest,\n\t}).Debug(\"Deleting dummy manifest\")\n\terr = rc.ManifestDelete(ctx, ref)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed deleting dummy manifest for %s: %w\", ref.CommonName(), err)\n\t}\n\n\treturn nil\n}", "func (announcement *Announcement) DeleteMetadata(key Metadata_Key, value []byte) {\n\tnewMeta := make([]*Metadata, 0, len(announcement.Metadata))\n\tfor _, meta := range announcement.Metadata {\n\t\tif !(meta.Key == key && bytes.Equal(value, meta.Value)) {\n\t\t\tnewMeta = append(newMeta, meta)\n\t\t}\n\t}\n\tannouncement.Metadata = newMeta\n}", "func (p *Processor) ValidateAndStoreManifest(account keppel.Account, repo keppel.Repository, m IncomingManifest, actx keppel.AuditContext) (*keppel.Manifest, error) {\n\t//check if the objects we want to create already exist in the database; this\n\t//check is not 100% reliable since it does not run in the same transaction as\n\t//the actual upsert, so results should be taken with a grain of salt; but the\n\t//result is accurate enough to avoid most duplicate audit events\n\tcontentsDigest := digest.Canonical.FromBytes(m.Contents)\n\tmanifestExistsAlready, err := p.db.SelectBool(checkManifestExistsQuery, repo.ID, contentsDigest.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogg.Debug(\"ValidateAndStoreManifest: in repo %d, manifest %s already exists = %t\", repo.ID, contentsDigest, manifestExistsAlready)\n\tvar tagExistsAlready bool\n\tif m.Reference.IsTag() {\n\t\ttagExistsAlready, err = p.db.SelectBool(checkTagExistsAtSameDigestQuery, repo.ID, m.Reference.Tag, contentsDigest.String())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlogg.Debug(\"ValidateAndStoreManifest: in repo %d, tag %s @%s already exists = %t\", repo.ID, m.Reference.Tag, contentsDigest, tagExistsAlready)\n\t}\n\n\t//the quota check can be skipped if we are sure that we won't need to insert\n\t//a new row into the manifests table\n\tif !manifestExistsAlready {\n\t\terr = p.checkQuotaForManifestPush(account)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tmanifest := &keppel.Manifest{\n\t\t//NOTE: .Digest and .SizeBytes are computed by validateAndStoreManifestCommon()\n\t\tRepositoryID: repo.ID,\n\t\tMediaType: m.MediaType,\n\t\tPushedAt: m.PushedAt,\n\t\tValidatedAt: m.PushedAt,\n\t}\n\tif m.Reference.IsDigest() {\n\t\t//allow validateAndStoreManifestCommon() to validate the user-supplied\n\t\t//digest against the actual manifest data\n\t\tmanifest.Digest = m.Reference.Digest\n\t}\n\terr = p.validateAndStoreManifestCommon(account, repo, manifest, m.Contents,\n\t\tfunc(tx *gorp.Transaction) error {\n\t\t\tif m.Reference.IsTag() {\n\t\t\t\terr = upsertTag(tx, keppel.Tag{\n\t\t\t\t\tRepositoryID: repo.ID,\n\t\t\t\t\tName: m.Reference.Tag,\n\t\t\t\t\tDigest: manifest.Digest,\n\t\t\t\t\tPushedAt: m.PushedAt,\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t//after making all DB changes, but before committing the DB transaction,\n\t\t\t//write the manifest into the backend\n\t\t\treturn p.sd.WriteManifest(account, repo.Name, manifest.Digest, m.Contents)\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//submit audit events, but only if we are reasonably sure that we actually\n\t//inserted a new manifest and/or changed a tag (without this restriction, we\n\t//would log an audit event everytime a manifest is validated or a tag is\n\t//synced; before the introduction of this check, we generated millions of\n\t//useless audit events per month)\n\tif userInfo := actx.UserIdentity.UserInfo(); userInfo != nil {\n\t\trecord := func(target audittools.TargetRenderer) {\n\t\t\tp.auditor.Record(audittools.EventParameters{\n\t\t\t\tTime: p.timeNow(),\n\t\t\t\tRequest: actx.Request,\n\t\t\t\tUser: userInfo,\n\t\t\t\tReasonCode: http.StatusOK,\n\t\t\t\tAction: cadf.CreateAction,\n\t\t\t\tTarget: target,\n\t\t\t})\n\t\t}\n\t\tif !manifestExistsAlready {\n\t\t\trecord(auditManifest{\n\t\t\t\tAccount: account,\n\t\t\t\tRepository: repo,\n\t\t\t\tDigest: manifest.Digest,\n\t\t\t})\n\t\t}\n\t\tif m.Reference.IsTag() && !tagExistsAlready {\n\t\t\trecord(auditTag{\n\t\t\t\tAccount: account,\n\t\t\t\tRepository: repo,\n\t\t\t\tDigest: manifest.Digest,\n\t\t\t\tTagName: m.Reference.Tag,\n\t\t\t})\n\t\t}\n\t}\n\treturn manifest, nil\n}", "func (l *Logger) OnMessageDelete(ds *discordgo.Session, md *discordgo.MessageDelete) {\n\tif l.LogDeletes == false {\n\t\treturn\n\t}\n\n\t// TODO: Create separate log for bots.\n\tif md.Author.ID == ds.State.User.ID {\n\t\treturn\n\t}\n\n\tlog.Printf(\"%+v\", md)\n\tlog.Printf(\"%+v\", md.Message)\n\n\t// TODO: Implement delete logging\n\tembed := message.GetDefaultEmbed()\n\tembed.Title = \"Message Deleted\"\n\tembed.Description = fmt.Sprintf(\"Message Deleted: %s\", md.Content)\n\n\tmessage.SendEmbed(ds, l.ChannelID, embed)\n}", "func TriggerPostDelete(appName string) {\n\terr := common.PropertyDestroy(\"buildpacks\", appName)\n\tif err != nil {\n\t\tcommon.LogFail(err.Error())\n\t}\n}", "func (s *store) OnDelete(obj interface{}) {\n\tpod, ok := obj.(*api.Pod)\n\tif !ok {\n\t\tdeletedObj, dok := obj.(kcache.DeletedFinalStateUnknown)\n\t\tif dok {\n\t\t\tpod, ok = deletedObj.Obj.(*api.Pod)\n\t\t}\n\t}\n\n\tif !ok {\n\t\tlog.Errorf(\"Expected Pod but OnDelete handler received %+v\", obj)\n\t\treturn\n\t}\n\n\tif pod.Status.PodIP != \"\" {\n\t\ts.mutex.Lock()\n\t\tdelete(s.rolesByIP, pod.Status.PodIP)\n\t\ts.mutex.Unlock()\n\t}\n}", "func PushManifest(img string, auth dockertypes.AuthConfig) (hash string, length int, err error) {\n\tsrcImages := []types.ManifestEntry{}\n\n\tfor i, platform := range platformsToSearchForIndex {\n\t\tosArchArr := strings.Split(platform, \"/\")\n\t\tif len(osArchArr) != 2 && len(osArchArr) != 3 {\n\t\t\treturn hash, length, fmt.Errorf(\"platform argument %d is not of form 'os/arch': '%s'\", i, platform)\n\t\t}\n\t\tvariant := \"\"\n\t\tos, arch := osArchArr[0], osArchArr[1]\n\t\tif len(osArchArr) == 3 {\n\t\t\tvariant = osArchArr[2]\n\t\t}\n\t\tsrcImages = append(srcImages, types.ManifestEntry{\n\t\t\tImage: fmt.Sprintf(\"%s-%s\", img, arch),\n\t\t\tPlatform: ocispec.Platform{\n\t\t\t\tOS: os,\n\t\t\t\tArchitecture: arch,\n\t\t\t\tVariant: variant,\n\t\t\t},\n\t\t})\n\t}\n\n\tyamlInput := types.YAMLInput{\n\t\tImage: img,\n\t\tManifests: srcImages,\n\t}\n\n\tlog.Debugf(\"pushing manifest list for %s -> %#v\", img, yamlInput)\n\n\t// push the manifest list with the auth as given, ignore missing, do not allow insecure\n\treturn registry.PushManifestList(auth.Username, auth.Password, yamlInput, true, false, false, types.OCI, \"\")\n}", "func (e *EventHandler) OnDelete(obj interface{}) {\n\td := event.DeleteEvent{}\n\n\t// Deal with tombstone events by pulling the object out. Tombstone events wrap the object in a\n\t// DeleteFinalStateUnknown struct, so the object needs to be pulled out.\n\t// Copied from sample-controller\n\t// This should never happen if we aren't missing events, which we have concluded that we are not\n\t// and made decisions off of this belief. Maybe this shouldn't be here?\n\tvar ok bool\n\tif _, ok = obj.(client.Object); !ok {\n\t\t// If the object doesn't have Metadata, assume it is a tombstone object of type DeletedFinalStateUnknown\n\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\tif !ok {\n\t\t\tlog.Error(nil, \"Error decoding objects. Expected cache.DeletedFinalStateUnknown\",\n\t\t\t\t\"type\", fmt.Sprintf(\"%T\", obj),\n\t\t\t\t\"object\", obj)\n\t\t\treturn\n\t\t}\n\n\t\t// Set DeleteStateUnknown to true\n\t\td.DeleteStateUnknown = true\n\n\t\t// Set obj to the tombstone obj\n\t\tobj = tombstone.Obj\n\t}\n\n\t// Pull Object out of the object\n\tif o, ok := obj.(client.Object); ok {\n\t\td.Object = o\n\t} else {\n\t\tlog.Error(nil, \"OnDelete missing Object\",\n\t\t\t\"object\", obj, \"type\", fmt.Sprintf(\"%T\", obj))\n\t\treturn\n\t}\n\n\tfor _, p := range e.predicates {\n\t\tif !p.Delete(d) {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Invoke delete handler\n\tctx, cancel := context.WithCancel(e.ctx)\n\tdefer cancel()\n\te.handler.Delete(ctx, d, e.queue)\n}", "func (st *MemStorage) Delete(gun string) error {\n\tst.lock.Lock()\n\tdefer st.lock.Unlock()\n\tfor k := range st.tufMeta {\n\t\tif strings.HasPrefix(k, gun) {\n\t\t\tdelete(st.tufMeta, k)\n\t\t}\n\t}\n\tdelete(st.checksums, gun)\n\treturn nil\n}", "func (s *TattooStorage) DeleteMetadata(name string) {\n\ts.MetadataDB.Delete(name)\n\ts.MetadataDB.SaveIndex()\n}", "func (s *storageImageDestination) PutManifest(ctx context.Context, manifest []byte) error {\n\ts.manifest = make([]byte, len(manifest))\n\tcopy(s.manifest, manifest)\n\treturn nil\n}", "func (reg *Reg) referrerDelete(ctx context.Context, r ref.Ref, m manifest.Manifest) error {\n\t// get subject field\n\tmSubject, ok := m.(manifest.Subjecter)\n\tif !ok {\n\t\treturn fmt.Errorf(\"manifest does not support the subject field: %w\", types.ErrUnsupportedMediaType)\n\t}\n\tsubject, err := mSubject.GetSubject()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// validate/set subject descriptor\n\tif subject == nil || subject.MediaType == \"\" || subject.Digest == \"\" || subject.Size <= 0 {\n\t\treturn fmt.Errorf(\"refers is not set%.0w\", types.ErrNotFound)\n\t}\n\n\trSubject := r\n\trSubject.Tag = \"\"\n\trSubject.Digest = subject.Digest.String()\n\n\t// if referrer API is available, nothing to do, return\n\tif reg.referrerPing(ctx, rSubject) {\n\t\treturn nil\n\t}\n\n\t// fallback to using tag schema for refers\n\trl, err := reg.referrerListTag(ctx, rSubject)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = rl.Delete(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// push updated referrer list by tag\n\trlTag, err := referrer.FallbackTag(rSubject)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rl.IsEmpty() {\n\t\terr = reg.TagDelete(ctx, rlTag)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\t// if delete is not supported, fall back to pushing empty list\n\t}\n\treturn reg.ManifestPut(ctx, rlTag, rl.Manifest)\n}", "func (b *Backend) ManifestAnnotate(ctx context.Context, req *pb.ManifestAnnotateRequest) (*gogotypes.Empty, error) {\n\tvar emptyResp = &gogotypes.Empty{}\n\n\tif !b.daemon.opts.Experimental {\n\t\treturn emptyResp, errors.New(\"please enable experimental to use manifest feature\")\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"ManifestList\": req.GetManifestList(),\n\t\t\"Manifest\": req.GetManifest(),\n\t}).Info(\"ManifestAnnotateRequest received\")\n\n\tmanifestName := req.GetManifestList()\n\tmanifestImage := req.GetManifest()\n\timageOS := req.GetOs()\n\timageArch := req.GetArch()\n\timageOSFeature := req.GetOsFeatures()\n\timageVariant := req.GetVariant()\n\n\t// get list image\n\t_, listImage, err := image.FindImage(b.daemon.localStore, manifestName)\n\tif err != nil {\n\t\treturn emptyResp, err\n\t}\n\n\t// load list from list image\n\t_, list, err := loadListFromImage(b.daemon.localStore, listImage.ID)\n\tif err != nil {\n\t\treturn emptyResp, err\n\t}\n\n\t// add image to list, if image already exists, it will be substituted\n\tinstanceDigest, err := list.addImage(ctx, b.daemon.localStore, manifestImage)\n\tif err != nil {\n\t\treturn emptyResp, err\n\t}\n\n\t// modify image platform if user specifies\n\tfor i := range list.docker.Manifests {\n\t\tif list.docker.Manifests[i].Digest == instanceDigest {\n\t\t\tif imageOS != \"\" {\n\t\t\t\tlist.docker.Manifests[i].Platform.OS = imageOS\n\t\t\t}\n\t\t\tif imageArch != \"\" {\n\t\t\t\tlist.docker.Manifests[i].Platform.Architecture = imageArch\n\t\t\t}\n\t\t\tif len(imageOSFeature) > 0 {\n\t\t\t\tlist.docker.Manifests[i].Platform.OSFeatures = append([]string{}, imageOSFeature...)\n\t\t\t}\n\t\t\tif imageVariant != \"\" {\n\t\t\t\tlist.docker.Manifests[i].Platform.Variant = imageVariant\n\t\t\t}\n\t\t}\n\t}\n\n\t// save list to image\n\t_, err = list.saveListToImage(b.daemon.localStore, listImage.ID, \"\", manifest.DockerV2ListMediaType)\n\n\treturn emptyResp, err\n}", "func (mdm *MetaDagModifier) RemoveMetadata(root ipld.Node, metakeys []byte) (ipld.Node, error) {\n\t// Read the existing metadata map.\n\tb, encodedTree, err := util.ReadMetadataListFromDag(mdm.ctx, root, mdm.dagserv, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif b == nil {\n\t\treturn nil, errors.New(\"no metadata exists\")\n\t}\n\n\t// Determine the specific scenario.\n\t// Scenario #1:\n\tvar newMetaNode ipld.Node\n\tvar children *ft.DagMetaNodes\n\tvar clear bool\n\tchildren, err = ft.GetChildrenForDagWithMeta(mdm.ctx, root, mdm.dagserv)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif children == nil {\n\t\treturn nil, errors.New(\"expected DAG node with metadata child node\")\n\t}\n\n\t// Create existing map and check with the given key list.\n\tm := make(map[string]interface{})\n\terr = json.Unmarshal(b, &m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinputKeys := strings.Split(string(metakeys), \",\")\n\n\texists := util.KeyIntersects(m, inputKeys)\n\tif !exists {\n\t\treturn nil, errors.New(\"no metadata entries with the given keys\")\n\t} else {\n\t\t// Truncate(0) on the metadata sub-DAG.\n\t\terr := mdm.Truncate(0)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Check if inputM and m have the same key set\n\t\tif util.EqualKeySets(m, inputKeys) {\n\t\t\t// Scenario #1: clear metadata.\n\t\t\tclear = true\n\t\t} else {\n\t\t\t// Scenario #2: delete a subset of the metadata map.\n\t\t\t// iterate the inputKeys to delete each key from the existing map.\n\t\t\tfor _, k := range inputKeys {\n\t\t\t\tdelete(m, k)\n\t\t\t}\n\n\t\t\tb, err = json.Marshal(m)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Create a metadata sub-DAG\n\t\t\tnewMetaNode, err = mdm.buildNewMetaDataDag(b, encodedTree)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tdnode := children.DataNode\n\t// if clear is true, return dnode as root.\n\tif clear {\n\t\treturn dnode, nil\n\t}\n\n\t// Attach the modified metadata sub-DAG to a new root for the BTFS file DAG.\n\tfileSize, err := FileSize(dnode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnewRoot, err := mdm.GetDb().AttachMetadataDag(dnode, fileSize, newMetaNode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmdm.GetDb().Add(newRoot)\n\n\treturn newRoot, nil\n}", "func (ctrler CtrlDefReactor) OnSnapshotRestoreDelete(obj *SnapshotRestore) error {\n\tlog.Info(\"OnSnapshotRestoreDelete is not implemented\")\n\treturn nil\n}", "func Remove(options types.RemoveOptions, config config.Store) error {\n\tapp := &AppImage{}\n\n\tindexFile := fmt.Sprintf(\"%s.json\", path.Join(config.IndexStore, options.Executable))\n\tlogger.Debugf(\"Checking if %s exists\", indexFile)\n\tif !helpers.CheckIfFileExists(indexFile) {\n\t\tfmt.Printf(\"%s is not installed \\n\", tui.Yellow(options.Executable))\n\t\treturn nil\n\t}\n\n\tbar := tui.NewProgressBar(7, \"r\")\n\n\tlogger.Debugf(\"Unmarshalling JSON from %s\", indexFile)\n\tindexBytes, err := ioutil.ReadFile(indexFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbar.Add(1)\n\n\terr = json.Unmarshal(indexBytes, app)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif app.IconPath != \"\" {\n\t\tlogger.Debugf(\"Removing thumbnail, %s\", app.IconPath)\n\t\tos.Remove(app.IconPath)\n\t}\n\tbar.Add(1)\n\n\tif app.IconPathHicolor != \"\" {\n\t\tlogger.Debugf(\"Removing symlink to hicolor theme, %s\", app.IconPathHicolor)\n\t\tos.Remove(app.IconPathHicolor)\n\t}\n\tbar.Add(1)\n\n\tif app.DesktopFile != \"\" {\n\t\tlogger.Debugf(\"Removing desktop file, %s\", app.DesktopFile)\n\t\tos.Remove(app.DesktopFile)\n\t}\n\tbar.Add(1)\n\n\tbinDir := path.Join(xdg.Home, \".local\", \"bin\")\n\tbinFile := path.Join(binDir, options.Executable)\n\n\tif helpers.CheckIfFileExists(binFile) {\n\t\tbinAbsPath, err := filepath.EvalSymlinks(binFile)\n\t\tif err == nil && strings.HasPrefix(binAbsPath, config.LocalStore) {\n\t\t\t// this link points to config.LocalStore, where all AppImages are stored\n\t\t\t// I guess we need to remove them, no asking and all\n\t\t\t// make sure we remove the file first to prevent conflicts in future\n\t\t\t_ = os.Remove(binFile)\n\t\t}\n\t}\n\tbar.Add(1)\n\n\tlogger.Debugf(\"Removing appimage, %s\", app.Filepath)\n\t_ = os.Remove(app.Filepath)\n\tbar.Add(1)\n\n\tlogger.Debugf(\"Removing index file, %s\", indexFile)\n\t_ = os.Remove(indexFile)\n\tbar.Add(1)\n\n\tbar.Finish()\n\tfmt.Printf(\"\\n\")\n\tfmt.Printf(\"✅ %s removed successfully\\n\", app.Executable)\n\tlogger.Debugf(\"Removing all files completed successfully\")\n\n\treturn bar.Finish()\n}", "func (m *Manifest) UnmarshalJSON(data []byte) error {\n\taux := &manifestAux{\n\t\tABI: &m.ABI,\n\t\tTrusts: &m.Trusts,\n\t\tSafeMethods: &m.SafeMethods,\n\t}\n\n\tif err := json.Unmarshal(data, aux); err != nil {\n\t\treturn err\n\t}\n\n\tif aux.Features[\"storage\"] {\n\t\tm.Features |= smartcontract.HasStorage\n\t}\n\tif aux.Features[\"payable\"] {\n\t\tm.Features |= smartcontract.IsPayable\n\t}\n\n\tm.Groups = aux.Groups\n\tm.Permissions = aux.Permissions\n\tm.SupportedStandards = aux.SupportedStandards\n\tm.Extra = aux.Extra\n\n\treturn nil\n}", "func (r *Registry) ImageManifest(image Image, token string) (*ImageManifest, error) {\n\turl := r.GetDigestUrl(image)\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Accept\", schema2.MediaTypeManifest)\n\tif token != \"\" {\n\t\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", token))\n\t}\n\n\tresp, err := r.Client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\trespBody, _ := GetRespBody(resp)\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusUnauthorized {\n\t\t\tklog.Error(statusUnauthorized)\n\t\t\treturn nil, restful.NewError(resp.StatusCode, statusUnauthorized)\n\t\t}\n\t\tklog.Errorf(\"got response: statusCode is '%d', body is '%s'\\n\", resp.StatusCode, respBody)\n\t\treturn nil, restful.NewError(resp.StatusCode, \"got image manifest failed\")\n\t}\n\n\timageManifest := &ImageManifest{}\n\terr = json.Unmarshal(respBody, imageManifest)\n\n\treturn imageManifest, err\n}", "func AppDeleteHandler(context utils.Context, w http.ResponseWriter, r *http.Request) {\n\n\tdbConn := context.DBConn\n\tdbBucket := context.DBBucketApp\n\n\tvars := mux.Vars(r)\n\n\tenv := vars[\"environment\"]\n\tapp := vars[\"application\"]\n\n\tkey := []byte(env + \"_\" + app)\n\n\tif err := database.DeleteDBValue(dbConn, dbBucket, key); err != nil {\n\t\tlog.LogInfo.Printf(\"Failed to read db value: %v\", err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"200 - OK: valued deleted or was not found\"))\n\n}", "func (follower *Follower) OnDelete(m *Message, w io.Writer) (err error) {\n\tfollower.mutex.Lock()\n\tdelete(follower.table, m.GetKey())\n\tfollower.mutex.Unlock()\n\n\tlog.Printf(\"Table after Delete: %v\\n\", follower.table)\n\treturn nil\n}", "func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error {\n\tvar errs distribution.ErrManifestVerification\n\n\tif mnfst.Manifest.SchemaVersion != 2 {\n\t\treturn fmt.Errorf(\"unrecognized manifest schema version %d\", mnfst.Manifest.SchemaVersion)\n\t}\n\n\tif skipDependencyVerification {\n\t\treturn nil\n\t}\n\n\tmanifestService, err := ms.repository.Manifests(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tblobsService := ms.repository.Blobs(ctx)\n\n\tfor _, descriptor := range mnfst.References() {\n\t\tvar err error\n\n\t\tswitch descriptor.MediaType {\n\t\tcase schema2.MediaTypeForeignLayer:\n\t\t\t// Clients download this layer from an external URL, so do not check for\n\t\t\t// its presence.\n\t\t\tif len(descriptor.URLs) == 0 {\n\t\t\t\terr = errMissingURL\n\t\t\t}\n\t\t\tallow := ms.manifestURLs.allow\n\t\t\tdeny := ms.manifestURLs.deny\n\t\t\tfor _, u := range descriptor.URLs {\n\t\t\t\tvar pu *url.URL\n\t\t\t\tpu, err = url.Parse(u)\n\t\t\t\tif err != nil || (pu.Scheme != \"http\" && pu.Scheme != \"https\") || pu.Fragment != \"\" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) {\n\t\t\t\t\terr = errInvalidURL\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase schema2.MediaTypeManifest, schema1.MediaTypeManifest:\n\t\t\tvar exists bool\n\t\t\texists, err = manifestService.Exists(ctx, descriptor.Digest)\n\t\t\tif err != nil || !exists {\n\t\t\t\terr = distribution.ErrBlobUnknown // just coerce to unknown.\n\t\t\t}\n\n\t\t\tfallthrough // double check the blob store.\n\t\tdefault:\n\t\t\t// forward all else to blob storage\n\t\t\tif len(descriptor.URLs) == 0 {\n\t\t\t\t_, err = blobsService.Stat(ctx, descriptor.Digest)\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\tif err != distribution.ErrBlobUnknown {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\n\t\t\t// On error here, we always append unknown blob errors.\n\t\t\terrs = append(errs, distribution.ErrManifestBlobUnknown{Digest: descriptor.Digest})\n\t\t}\n\t}\n\n\tif len(errs) != 0 {\n\t\treturn errs\n\t}\n\n\treturn nil\n}", "func (es *EventSyncer) handleDeleteEvent(obj interface{}) {\n\tevent, ok := obj.(*corev1.Event)\n\tif !ok {\n\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\tif !ok {\n\t\t\tklog.Errorf(\"Couldn't get object from tombstone %v\", obj)\n\t\t\treturn\n\t\t}\n\t\tevent, ok = tombstone.Obj.(*corev1.Event)\n\t\tif !ok {\n\t\t\tklog.Errorf(\"Tombstone contained object that is not a event %v\", obj)\n\t\t\treturn\n\t\t}\n\t}\n\n\tes.addKindAndVersion(event)\n\n\tgo syncToNode(watch.Deleted, util.ResourceEvent, event)\n\n\tsyncToStorage(es.ctx, watch.Deleted, util.ResourceEvent, event)\n}", "func (o *OCIDir) referrerDelete(ctx context.Context, r ref.Ref, m manifest.Manifest) error {\n\t// get refers field\n\tmSubject, ok := m.(manifest.Subjecter)\n\tif !ok {\n\t\treturn fmt.Errorf(\"manifest does not support subject: %w\", types.ErrUnsupportedMediaType)\n\t}\n\tsubject, err := mSubject.GetSubject()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// validate/set subject descriptor\n\tif subject == nil || subject.MediaType == \"\" || subject.Digest == \"\" || subject.Size <= 0 {\n\t\treturn fmt.Errorf(\"subject is not set%.0w\", types.ErrNotFound)\n\t}\n\n\t// get descriptor for subject\n\trSubject := r\n\trSubject.Tag = \"\"\n\trSubject.Digest = subject.Digest.String()\n\n\t// pull existing referrer list\n\trl, err := o.ReferrerList(ctx, rSubject)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = rl.Delete(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// push updated referrer list by tag\n\trlTag, err := referrer.FallbackTag(rSubject)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rl.IsEmpty() {\n\t\terr = o.TagDelete(ctx, rlTag)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\t// if delete is not supported, fall back to pushing empty list\n\t}\n\treturn o.ManifestPut(ctx, rlTag, rl.Manifest)\n}", "func (c *Controller) onDelete(obj interface{}) {\n\t//cluster := obj.(*crv1.Pgcluster)\n\t//\tlog.Debugf(\"[Controller] ns=%s onDelete %s\", cluster.ObjectMeta.Namespace, cluster.ObjectMeta.SelfLink)\n\n\t//handle pgcluster cleanup\n\t//\tclusteroperator.DeleteClusterBase(c.PgclusterClientset, c.PgclusterClient, cluster, cluster.ObjectMeta.Namespace)\n}", "func UploadManifest(ctx context.Context, repo distribution.Repository, tag string, manifest distribution.Manifest) error {\n\tcanonical, err := CanonicalManifest(manifest)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tms, err := repo.Manifests(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get manifest service for %s: %w\", repo.Named(), err)\n\t}\n\n\tdgst, err := ms.Put(ctx, manifest, distribution.WithTag(tag))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to upload manifest to %s: %w\", repo.Named(), err)\n\t}\n\n\tif expectedDgst := digest.FromBytes(canonical); dgst != expectedDgst {\n\t\treturn fmt.Errorf(\"upload manifest to %s failed: digest mismatch: got %s, want %s\", repo.Named(), dgst, expectedDgst)\n\t}\n\n\treturn nil\n}", "func (d *DataPolicyManifest) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &d.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &d.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &d.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &d.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", d, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (manager *Manager) onDeleteEgressPolicy(policy *Policy) {\n\tconfigID := ParseCEGPConfigID(policy)\n\n\tmanager.Lock()\n\tdefer manager.Unlock()\n\n\tlogger := log.WithField(logfields.CiliumEgressGatewayPolicyName, configID.Name)\n\n\tif manager.policyConfigs[configID] == nil {\n\t\tlogger.Warn(\"Can't delete CiliumEgressGatewayPolicy: policy not found\")\n\t}\n\n\tlogger.Debug(\"Deleted CiliumEgressGatewayPolicy\")\n\n\tdelete(manager.policyConfigs, configID)\n\n\tmanager.setEventBitmap(eventDeletePolicy)\n\tmanager.reconciliationTrigger.TriggerWithReason(\"policy deleted\")\n}", "func (am *ArtifactMap) DeleteApp(AppGUID string) {\n\tvar desiredApp *ArtifactEntry\n\tindex := 0\n\tfor i, app := range am.AppList {\n\t\tif app.GUID == AppGUID {\n\t\t\tdesiredApp = app\n\t\t\tindex = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif desiredApp != nil {\n\t\tam.AppList = append(am.AppList[:index], am.AppList[index+1:]...)\n\t\tam.appTitleToID.Delete(desiredApp.Title)\n\t\tam.appTitleToItemID.Delete(desiredApp.Title)\n\t}\n}", "func (sm *stateMachine) OnDelete(key string) {\n\tif !sm.running.Load() {\n\t\tsm.logger.Warn(\"state machine is stopped\",\n\t\t\tlogger.String(\"type\", sm.stateMachineType.String()))\n\t\treturn\n\t}\n\tsm.logger.Info(\"discovery state removed\",\n\t\tlogger.String(\"type\", sm.stateMachineType.String()),\n\t\tlogger.String(\"key\", key))\n\tif sm.onDeleteFn != nil {\n\t\tsm.onDeleteFn(key)\n\t}\n}", "func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Descriptor) (ocispec.Descriptor, error) {\n\tif !(desc.MediaType == images.MediaTypeDockerSchema2Manifest ||\n\t\tdesc.MediaType == ocispec.MediaTypeImageManifest) {\n\n\t\tlog.G(ctx).Warnf(\"do nothing for media type: %s\", desc.MediaType)\n\t\treturn desc, nil\n\t}\n\n\t// read manifest data\n\tmb, err := content.ReadBlob(ctx, store, desc)\n\tif err != nil {\n\t\treturn ocispec.Descriptor{}, fmt.Errorf(\"failed to read index data: %w\", err)\n\t}\n\n\tvar manifest ocispec.Manifest\n\tif err := json.Unmarshal(mb, &manifest); err != nil {\n\t\treturn ocispec.Descriptor{}, fmt.Errorf(\"failed to unmarshal data into manifest: %w\", err)\n\t}\n\n\t// check config media type\n\tif manifest.Config.MediaType != LegacyConfigMediaType {\n\t\treturn desc, nil\n\t}\n\n\tmanifest.Config.MediaType = images.MediaTypeDockerSchema2Config\n\tdata, err := json.MarshalIndent(manifest, \"\", \" \")\n\tif err != nil {\n\t\treturn ocispec.Descriptor{}, fmt.Errorf(\"failed to marshal manifest: %w\", err)\n\t}\n\n\t// update manifest with gc labels\n\tdesc.Digest = digest.Canonical.FromBytes(data)\n\tdesc.Size = int64(len(data))\n\n\tlabels := map[string]string{}\n\tfor i, c := range append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...) {\n\t\tlabels[fmt.Sprintf(\"containerd.io/gc.ref.content.%d\", i)] = c.Digest.String()\n\t}\n\n\tref := remotes.MakeRefKey(ctx, desc)\n\tif err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil {\n\t\treturn ocispec.Descriptor{}, fmt.Errorf(\"failed to update content: %w\", err)\n\t}\n\treturn desc, nil\n}", "func (c *Controller) syncHandler(key string) error {\n\t// If an error occurs during handling, we'll requeue the item so we can\n\t// attempt processing again later. This could have been caused by a\n\t// temporary network failure, or any other transient reason.\n\n\t// Convert the namespace/name string into a distinct namespace and name\n\tns, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\tutilruntime.HandleError(fmt.Errorf(\"invalid resource key: %s\", key))\n\t\treturn nil\n\t}\n\n\tklog.V(4).Infof(\"start processing Manifest %q\", key)\n\t// Get the Manifest resource with this name\n\tmanifest, err := c.manifestLister.Manifests(ns).Get(name)\n\t// The Manifest resource may no longer exist, in which case we stop processing.\n\tif errors.IsNotFound(err) {\n\t\tklog.V(2).Infof(\"Manifest %q has been deleted\", key)\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif manifest.Template.Raw == nil {\n\t\tklog.Warning(\"manifest.Template.Raw is empty, %q\", klog.KObj(manifest))\n\t\treturn nil\n\t}\n\tutd := &unstructured.Unstructured{}\n\terr = json.Unmarshal(manifest.Template.Raw, &utd.Object)\n\tif err != nil {\n\t\tklog.Errorf(\"unmarshal error, %q, err=%v\", klog.KObj(manifest), err)\n\t\treturn err\n\t}\n\n\tresourceKind := utd.GroupVersionKind().Kind\n\n\tmatchAnnotations := util.FindAnnotationsMathKeyPrefix(utd.GetAnnotations())\n\t//为空则表示在update时候进行过更新,清除过annotation\n\tdeleteSubscription := len(matchAnnotations) == 0\n\tif manifest.DeletionTimestamp != nil {\n\t\t//删除\n\t\tdeleteSubscription = true\n\t}\n\tmatchLabels := map[string]string{\n\t\t\"bkbcs.tencent.com/resource-kind\": resourceKind,\n\t\t\"bkbcs.tencent.com/resource-ns\": utd.GetNamespace(),\n\t\t\"bkbcs.tencent.com/resource-name\": utd.GetName(),\n\t}\n\tsubscriptionName := c.genAutoCreateSubscriptionName(utd.GetName())\n\n\tsubscriptionList, err := c.clusternetClient.AppsV1alpha1().Subscriptions(utd.GetNamespace()).List(context.Background(), metav1.ListOptions{\n\t\tLabelSelector: labels.Set(matchLabels).String(),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\t//只会存在0个或1个\n\tif len(subscriptionList.Items) > 1 {\n\t\treturn fmt.Errorf(\"auto create sub matchLabels match %d\", len(subscriptionList.Items))\n\t}\n\tif deleteSubscription {\n\t\tklog.Infof(\"start delete subscription %s\", subscriptionName)\n\t\t//删除Subscription\n\t\terr = c.clusternetClient.AppsV1alpha1().Subscriptions(utd.GetNamespace()).Delete(context.Background(), subscriptionList.Items[0].Name, metav1.DeleteOptions{})\n\t\tif errors.IsNotFound(err) {\n\t\t\tklog.V(2).Infof(\"Subscription %s:%s has been deleted\", ns, name)\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\t//更新或创建Subscription\n\tif len(subscriptionList.Items) == 0 {\n\t\t//create\n\t\tsubscription := &appsapi.Subscription{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: subscriptionName,\n\t\t\t\tNamespace: utd.GetNamespace(),\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"bkbcs.tencent.com/created-by\": \"bcs-clusternet-controller\",\n\t\t\t\t},\n\t\t\t\tLabels: matchLabels,\n\t\t\t},\n\t\t\tSpec: c.genSubscriptionSpec(matchAnnotations, utd.GroupVersionKind(), utd.GetNamespace(), utd.GetName()),\n\t\t}\n\t\tklog.Infof(\"start create Subscriptions %q\", klog.KObj(subscription))\n\t\t_, err = c.clusternetClient.AppsV1alpha1().Subscriptions(utd.GetNamespace()).Create(context.Background(), subscription, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"create Subscriptions %q error, err=%+v\", klog.KObj(subscription), err)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\t//update\n\tmatchSubscription := subscriptionList.Items[0]\n\tmatchSubscription.Spec = c.genSubscriptionSpec(matchAnnotations, utd.GroupVersionKind(), utd.GetNamespace(), utd.GetName())\n\tklog.Infof(\"start update Subscriptions %q\", klog.KObj(&matchSubscription))\n\t_, err = c.clusternetClient.AppsV1alpha1().Subscriptions(utd.GetNamespace()).Update(context.Background(), &matchSubscription, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tklog.Errorf(\"update subscriptions %q error, err=%v\", klog.KObj(&matchSubscription), err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *SGController) onSgDeleted(sg *v1alpha1.Statefulguardian) {\n\tglog.Infof(\"Cluster %s deleted\", sg.Name)\n\texecCont:= NewExecController(sg)\n execCont.ClusterQuit(m.ctx)\n\tmetrics.IncEventCounter(sgsDeletedCount)\n\tmetrics.DecEventGauge(sgsTotalCount)\n\tglog.Infof(\"Delete statefulset\")\n\tm.statefulSetControl.DeleteStatefulSet(sg)\n}", "func (m *BoltMeta) Delete(key []byte) error {\n\terr := m.DS.Update(func(tx *bolt.Tx) error {\n\t\tbucket, err := tx.CreateBucketIfNotExists(m.MetadataBucket)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = bucket.Delete(key)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn err\n}", "func RegisterManifestHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {\n\treturn RegisterManifestHandlerClient(ctx, mux, NewManifestClient(conn))\n}", "func UpdateManifest(m Manifests, root string, serviceID flux.ResourceID, f func(manifest []byte) ([]byte, error)) error {\n\tservices, err := m.FindDefinedServices(root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpaths := services[serviceID]\n\tif len(paths) == 0 {\n\t\treturn ErrNoResourceFilesFoundForService\n\t}\n\tif len(paths) > 1 {\n\t\treturn ErrMultipleResourceFilesFoundForService\n\t}\n\n\tdef, err := ioutil.ReadFile(paths[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewDef, err := f(def)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfi, err := os.Stat(paths[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(paths[0], newDef, fi.Mode())\n}", "func (s *MetalLBSpeaker) OnDeleteService(svc *slim_corev1.Service) error {\n\tif s.shutDown() {\n\t\treturn ErrShutDown\n\t}\n\tvar (\n\t\tsvcID = k8s.ParseServiceID(svc)\n\t\tl = log.WithFields(logrus.Fields{\n\t\t\t\"component\": \"MetalLBSpeaker.OnDeleteService\",\n\t\t\t\"service-id\": svcID,\n\t\t})\n\t\tmeta = fence.Meta{}\n\t)\n\n\ts.Lock()\n\tdelete(s.services, svcID)\n\ts.Unlock()\n\n\tif err := meta.FromObjectMeta(&svc.ObjectMeta); err != nil {\n\t\tl.WithError(err).Error(\"failed to parse event metadata\")\n\t}\n\n\tl.Debug(\"adding event to queue\")\n\t// Passing nil as the service will force the MetalLB speaker to withdraw\n\t// the BGP announcement.\n\ts.queue.Add(svcEvent{\n\t\tMeta: meta,\n\t\top: Delete,\n\t\tid: svcID,\n\t\tsvc: nil,\n\t\teps: nil,\n\t})\n\treturn nil\n}", "func (ctrler CtrlDefReactor) OnConfigurationSnapshotDelete(obj *ConfigurationSnapshot) error {\n\tlog.Info(\"OnConfigurationSnapshotDelete is not implemented\")\n\treturn nil\n}", "func (m *Manifest) Unarchive(id uuid.UUID, txes ...*sqlx.Tx) (*db.Manifest, error) {\n\tconn := prepConn(m.Conn, txes...)\n\n\tu, err := db.FindManifest(conn, id.String())\n\tif err != nil {\n\t\treturn nil, terror.New(err, \"\")\n\t}\n\n\tif !u.Archived {\n\t\treturn u, nil\n\t}\n\n\tu.Archived = false\n\tu.ArchivedAt = null.TimeFromPtr(nil)\n\t_, err = u.Update(conn, boil.Whitelist(db.ManifestColumns.Archived, db.ManifestColumns.ArchivedAt))\n\tif err != nil {\n\t\treturn nil, terror.New(err, \"\")\n\t}\n\treturn u, nil\n}", "func TestSwizzlerRemoveMetadata(t *testing.T) {\n\tf, origMeta := createNewSwizzler(t)\n\n\tf.RemoveMetadata(\"targets/a\")\n\n\tfor role, metaBytes := range origMeta {\n\t\tnewMeta, err := f.MetadataCache.GetSized(role.String(), store.NoSizeLimit)\n\t\tif role != \"targets/a\" {\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.True(t, bytes.Equal(metaBytes, newMeta), \"bytes have changed for role %s\", role)\n\t\t} else {\n\t\t\trequire.Error(t, err)\n\t\t\trequire.IsType(t, store.ErrMetaNotFound{}, err)\n\t\t}\n\t}\n}", "func jsonDeleteHandler(w http.ResponseWriter, r *http.Request) {\n\trunID := r.FormValue(\"runID\")\n\n\t// Extract the username from the runID and the cookie to make sure they match.\n\trunUser := strings.Split(runID, \"-\")[0]\n\tloggedInUser := strings.Split(login.LoggedInAs(r), \"@\")[0]\n\tif !login.IsAdmin(r) && runUser != loggedInUser {\n\t\thttputils.ReportError(w, r, nil, \"You must be logged on as an admin to delete other users' runs.\")\n\t\treturn\n\t}\n\n\t// Remove ResultStore data.\n\terr := resultStore.RemoveRun(runID)\n\tif err != nil {\n\t\thttputils.ReportError(w, r, err, fmt.Sprintf(\"Failed to remove run %s from server\", runID))\n\t\treturn\n\t}\n\n\t// TODO(lchoi): Create a storage container class that has an aggregate remove\n\t// function and call that here to simplify the handler logic. PurgeDigests in\n\t// MemDiffStore must first be refactored to also remove diff images.\n\n\t// Remove screenshots and diff images from the DiffStore.\n\timagePath := filepath.Join(*imageDir, diffstore.DEFAULT_IMG_DIR_NAME, runID)\n\tdiffPath := filepath.Join(*imageDir, diffstore.DEFAULT_DIFFIMG_DIR_NAME, runID)\n\terr = os.RemoveAll(imagePath)\n\tif err != nil {\n\t\thttputils.ReportError(w, r, err, fmt.Sprintf(\"Failed to remove screenshots for run %s from DiffStore\", runID))\n\t\treturn\n\t}\n\terr = os.RemoveAll(diffPath)\n\tif err != nil {\n\t\thttputils.ReportError(w, r, err, fmt.Sprintf(\"Failed to remove diff images for run %s from DiffStore\", runID))\n\t\treturn\n\t}\n}", "func (m *manifestService) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {\n\tcontext.GetLogger(ctx).Debugf(\"(*manifestService).Put\")\n\n\tmh, err := NewManifestHandler(m.repo, manifest)\n\tif err != nil {\n\t\treturn \"\", regapi.ErrorCodeManifestInvalid.WithDetail(err)\n\t}\n\tmediaType, payload, _, err := mh.Payload()\n\tif err != nil {\n\t\treturn \"\", regapi.ErrorCodeManifestInvalid.WithDetail(err)\n\t}\n\n\t// this is fast to check, let's do it before verification\n\tif !m.acceptschema2 && mediaType == schema2.MediaTypeManifest {\n\t\treturn \"\", regapi.ErrorCodeManifestInvalid.WithDetail(fmt.Errorf(\"manifest V2 schema 2 not allowed\"))\n\t}\n\n\t// in order to stat the referenced blobs, repository need to be set on the context\n\tif err := mh.Verify(withRepository(ctx, m.repo), false); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t_, err = m.manifests.Put(withRepository(ctx, m.repo), manifest, options...)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tconfig, err := mh.Config(ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdgst, err := mh.Digest()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Upload to openshift\n\tism := imageapiv1.ImageStreamMapping{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: m.repo.namespace,\n\t\t\tName: m.repo.name,\n\t\t},\n\t\tImage: imageapiv1.Image{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: dgst.String(),\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\timageapi.ManagedByOpenShiftAnnotation: \"true\",\n\t\t\t\t\timageapi.ImageManifestBlobStoredAnnotation: \"true\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tDockerImageReference: fmt.Sprintf(\"%s/%s/%s@%s\", m.repo.config.registryAddr, m.repo.namespace, m.repo.name, dgst.String()),\n\t\t\tDockerImageManifest: string(payload),\n\t\t\tDockerImageManifestMediaType: mediaType,\n\t\t\tDockerImageConfig: string(config),\n\t\t},\n\t}\n\n\tfor _, option := range options {\n\t\tif opt, ok := option.(distribution.WithTagOption); ok {\n\t\t\tism.Tag = opt.Tag\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif _, err = m.repo.registryOSClient.ImageStreamMappings(m.repo.namespace).Create(&ism); err != nil {\n\t\t// if the error was that the image stream wasn't found, try to auto provision it\n\t\tstatusErr, ok := err.(*kerrors.StatusError)\n\t\tif !ok {\n\t\t\tcontext.GetLogger(ctx).Errorf(\"error creating ImageStreamMapping: %s\", err)\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif quotautil.IsErrorQuotaExceeded(statusErr) {\n\t\t\tcontext.GetLogger(ctx).Errorf(\"denied creating ImageStreamMapping: %v\", statusErr)\n\t\t\treturn \"\", distribution.ErrAccessDenied\n\t\t}\n\n\t\tstatus := statusErr.ErrStatus\n\t\tkind := strings.ToLower(status.Details.Kind)\n\t\tisValidKind := kind == \"imagestream\" /*pre-1.2*/ || kind == \"imagestreams\" /*1.2 to 1.6*/ || kind == \"imagestreammappings\" /*1.7+*/\n\t\tif !isValidKind || status.Code != http.StatusNotFound || status.Details.Name != m.repo.name {\n\t\t\tcontext.GetLogger(ctx).Errorf(\"error creating ImageStreamMapping: %s\", err)\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif _, err := m.repo.createImageStream(ctx); err != nil {\n\t\t\tif e, ok := err.(errcode.Error); ok && e.ErrorCode() == errcode.ErrorCodeUnknown {\n\t\t\t\t// TODO: convert statusErr to distribution error\n\t\t\t\treturn \"\", statusErr\n\t\t\t}\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t// try to create the ISM again\n\t\tif _, err := m.repo.registryOSClient.ImageStreamMappings(m.repo.namespace).Create(&ism); err != nil {\n\t\t\tif quotautil.IsErrorQuotaExceeded(err) {\n\t\t\t\tcontext.GetLogger(ctx).Errorf(\"denied a creation of ImageStreamMapping: %v\", err)\n\t\t\t\treturn \"\", distribution.ErrAccessDenied\n\t\t\t}\n\t\t\tcontext.GetLogger(ctx).Errorf(\"error creating ImageStreamMapping: %s\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn dgst, nil\n}", "func (bot *bot) onGuildDelete(s *discordgo.Session, msg *discordgo.GuildDelete) {\n\tbot.logger.Debugf(\"Got GuildDelete event: %s\", msg.ID)\n\tbot.removeGuildChannels(guildID(msg.ID))\n}", "func deleteAppConfigHandler(ctx *gin.Context) {\n log.Info(fmt.Sprintf(\"received request to delete config %s\", ctx.Param(\"appId\")))\n\n // get app ID from path and convert to UUID\n appId, err := uuid.Parse(ctx.Param(\"appId\"))\n if err != nil {\n log.Error(fmt.Errorf(\"unable to app ID: %+v\", err))\n ctx.JSON(http.StatusBadRequest, gin.H{\n \"status_code\": http.StatusBadRequest, \"message\": \"Invalid app ID\"})\n return\n }\n\n db, _ := ctx.MustGet(\"db\").(*Persistence)\n _, err = db.GetConfigByAppId(appId)\n if err != nil {\n switch err {\n case ErrAppNotFound:\n log.Warn(fmt.Sprintf(\"cannot find config for app %s\", appId))\n ctx.JSON(http.StatusNotFound, gin.H{\n \"http_code\": http.StatusNotFound, \"message\": \"Cannot find config for app\"})\n default:\n log.Error(fmt.Errorf(\"unable to retrieve config from database: %+v\", err))\n ctx.JSON(http.StatusInternalServerError, gin.H{\n \"http_code\": http.StatusInternalServerError, \"message\": \"Internal server error\"})\n }\n return\n }\n\n if err := db.DeleteConfigByAppId(appId); err != nil {\n log.Error(fmt.Errorf(\"unable to delete config: %+v\", err))\n ctx.JSON(http.StatusInternalServerError, gin.H{\n \"http_code\": http.StatusInternalServerError, \"message\": \"Internal server error\"})\n return\n }\n ctx.JSON(http.StatusOK, gin.H{\n \"http_code\": http.StatusOK, \"message\": \"Successfully delete config\"})\n}", "func (i *IpScheduler) OnDelete(del interface{}) {\n\tdelPod, ok := del.(*v1.Pod)\n\tif !ok {\n\t\treturn\n\t}\n\tif delPod.GetNamespace() == \"bcs-system\" {\n\t\treturn\n\t}\n\tblog.Infof(\"pod %s/%s is deletd\", delPod.GetName(), delPod.GetNamespace())\n\ti.CacheLock.Lock()\n\ti.NodeIPCache.DeleteResource(cache.GetMetaKey(delPod.GetName(), delPod.GetNamespace()))\n\ti.CacheLock.Unlock()\n}", "func UpdateManifest(m Manifests, root string, paths []string, id flux.ResourceID, f func(manifest []byte) ([]byte, error)) error {\n\tresources, err := m.LoadManifests(root, paths)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresource, ok := resources[id.String()]\n\tif !ok {\n\t\treturn ErrResourceNotFound(id.String())\n\t}\n\n\tpath := filepath.Join(root, resource.Source())\n\tdef, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewDef, err := f(def)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(path, newDef, fi.Mode())\n}", "func cleanupManifest(origData, finalData []byte) ([]byte, error) {\n\tobjectMetacreationTs := []byte(\"\\n creationTimestamp: null\\n\")\n\tspecTemplatecreationTs := []byte(\"\\n creationTimestamp: null\\n\")\n\tjobSpecTemplatecreationTs := []byte(\"\\n creationTimestamp: null\\n\")\n\tnullStatus := []byte(\"\\nstatus: {}\\n\")\n\tnullReplicaStatus := []byte(\"status:\\n replicas: 0\\n\")\n\tnullLBStatus := []byte(\"status:\\n loadBalancer: {}\\n\")\n\tnullMetaStatus := []byte(\"\\n status: {}\\n\")\n\n\tvar hasObjectMetacreationTs, hasSpecTemplatecreationTs, hasJobSpecTemplatecreationTs, hasNullStatus,\n\t\thasNullReplicaStatus, hasNullLBStatus, hasNullMetaStatus bool\n\n\tif origData != nil {\n\t\thasObjectMetacreationTs = bytes.Contains(origData, objectMetacreationTs)\n\t\thasSpecTemplatecreationTs = bytes.Contains(origData, specTemplatecreationTs)\n\t\thasJobSpecTemplatecreationTs = bytes.Contains(origData, jobSpecTemplatecreationTs)\n\n\t\thasNullStatus = bytes.Contains(origData, nullStatus)\n\t\thasNullReplicaStatus = bytes.Contains(origData, nullReplicaStatus)\n\t\thasNullLBStatus = bytes.Contains(origData, nullLBStatus)\n\t\thasNullMetaStatus = bytes.Contains(origData, nullMetaStatus)\n\t} // null value is false in case of origFile\n\n\tif !hasObjectMetacreationTs {\n\t\tfinalData = bytes.Replace(finalData, objectMetacreationTs, []byte(\"\\n\"), -1)\n\t}\n\tif !hasSpecTemplatecreationTs {\n\t\tfinalData = bytes.Replace(finalData, specTemplatecreationTs, []byte(\"\\n\"), -1)\n\t}\n\tif !hasJobSpecTemplatecreationTs {\n\t\tfinalData = bytes.Replace(finalData, jobSpecTemplatecreationTs, []byte(\"\\n\"), -1)\n\t}\n\tif !hasNullStatus {\n\t\tfinalData = bytes.Replace(finalData, nullStatus, []byte(\"\\n\"), -1)\n\t}\n\tif !hasNullReplicaStatus {\n\t\tfinalData = bytes.Replace(finalData, nullReplicaStatus, []byte(\"\\n\"), -1)\n\t}\n\tif !hasNullLBStatus {\n\t\tfinalData = bytes.Replace(finalData, nullLBStatus, []byte(\"\\n\"), -1)\n\t}\n\tif !hasNullMetaStatus {\n\t\tfinalData = bytes.Replace(finalData, nullMetaStatus, []byte(\"\\n\"), -1)\n\t}\n\n\treturn finalData, nil\n}", "func TriggerPostDelete(appName string) error {\n\tif err := common.PropertyDestroy(\"apps\", appName); err != nil {\n\t\tcommon.LogWarn(err.Error())\n\t}\n\n\treturn nil\n}", "func Delete(config Config, plugin plugins.PluginSpec) error {\n\treturn handleYaml(config, delete, plugin)\n}", "func (c *Config) handleActionDeleteMessage(client MQTT.Client, message MQTT.Message) {\n\tactionsToBeDeleted := []actionmanager.Action{}\n\terr := json.Unmarshal(message.Payload(), &actionsToBeDeleted)\n\tif err != nil {\n\t\tklog.Errorf(\"Error in unmarshalling: %s\", err)\n\t}\n\tfor _, actionToBeDeleted := range actionsToBeDeleted {\n\t\tactionExists := false\n\t\tfor index, action := range c.ActionManager.Actions {\n\t\t\tif strings.EqualFold(action.Name, actionToBeDeleted.Name) {\n\t\t\t\tactionExists = true\n\t\t\t\tcopy(c.ActionManager.Actions[index:], c.ActionManager.Actions[index+1:])\n\t\t\t\tc.ActionManager.Actions = c.ActionManager.Actions[:len(c.ActionManager.Actions)-1]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tconfiguration.Config.ActionManager = c.ActionManager\n\t\tif !actionExists {\n\t\t\tklog.Errorf(\"Action: %s did not exist\", actionToBeDeleted.Name)\n\t\t} else {\n\t\t\tklog.Infof(\"Action: %s has been deleted \", actionToBeDeleted.Name)\n\t\t}\n\t}\n}", "func (p *PCache) Delete(kind string, in interface{}) error {\n\t// Deletes from cache and statemgr\n\tobj, err := runtime.GetObjectMeta(in)\n\tkey := obj.GetKey()\n\tif err != nil {\n\t\treturn fmt.Errorf((\"Object is not an apiserver object\"))\n\t}\n\tp.Log.Debugf(\"delete for %s %s\", kind, key)\n\n\terr = p.DeletePcache(kind, in)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.RLock()\n\tkindInfo := p.kindOptsMap[kind]\n\tp.RUnlock()\n\n\tif kindInfo.WriteToApiserver {\n\t\tp.Log.Debugf(\"%s %s attempting to delete from statemgr\", kind, key)\n\t\treturn p.deleteStatemgr(in, kindInfo)\n\t}\n\treturn nil\n}", "func TriggerPostDelete(appName string) error {\n\treturn common.PropertyDestroy(\"registry\", appName)\n}", "func ExtractKindFromManifest(manifest string) string {\n\tre := regexp.MustCompile(kindRegex)\n\tmatches := re.FindStringSubmatch(manifest)\n\n\tif len(matches) > 0 {\n\t\treturn matches[1]\n\t}\n\n\treturn \"\"\n}", "func (t *targetrunner) receiveBucketMD(newbucketmd *bucketMD, msg *ActionMsg) (errstr string) {\n\tif msg.Action == \"\" {\n\t\tglog.Infof(\"receive bucket-metadata: version %d\", newbucketmd.version())\n\t} else {\n\t\tglog.Infof(\"receive bucket-metadata: version %d, message %+v\", newbucketmd.version(), msg)\n\t}\n\tt.bmdowner.Lock()\n\tbucketmd := t.bmdowner.get()\n\tmyver := bucketmd.version()\n\tif newbucketmd.version() <= myver {\n\t\tt.bmdowner.Unlock()\n\t\tif newbucketmd.version() < myver {\n\t\t\terrstr = fmt.Sprintf(\"Attempt to downgrade bucket-metadata version %d to %d\", myver, newbucketmd.version())\n\t\t}\n\t\treturn\n\t}\n\tt.bmdowner.put(newbucketmd)\n\tt.bmdowner.Unlock()\n\n\tfor bucket := range bucketmd.LBmap {\n\t\t_, ok := newbucketmd.LBmap[bucket]\n\t\tif !ok {\n\t\t\tglog.Infof(\"Destroy local bucket %s\", bucket)\n\t\t\tfor mpath := range ctx.mountpaths.Available {\n\t\t\t\tlocalbucketfqn := filepath.Join(makePathLocal(mpath), bucket)\n\t\t\t\tif err := os.RemoveAll(localbucketfqn); err != nil {\n\t\t\t\t\tglog.Errorf(\"Failed to destroy local bucket dir %q, err: %v\", localbucketfqn, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfor mpath := range ctx.mountpaths.Available {\n\t\tfor bucket := range bucketmd.LBmap {\n\t\t\tlocalbucketfqn := filepath.Join(makePathLocal(mpath), bucket)\n\t\t\tif err := CreateDir(localbucketfqn); err != nil {\n\t\t\t\tglog.Errorf(\"Failed to create local bucket dir %q, err: %v\", localbucketfqn, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (ctrler CtrlDefReactor) OnBucketDelete(obj *Bucket) error {\n\tlog.Info(\"OnBucketDelete is not implemented\")\n\treturn nil\n}", "func ComputeManifest(refToImage map[name.Reference]v1.Image) (Manifest, error) {\n\timageToTags := dedupRefToImage(refToImage)\n\treturn calculateManifest(imageToTags)\n}", "func (d *InboundCacheDriver) StoreManifest(location models.ImageReference, contents []byte, mediaType string, now time.Time) error {\n\td.Entries[location] = inboundCacheEntry{contents, mediaType, now}\n\treturn nil\n}", "func deleteTaskMeta(db *leveldb.DB, name string) error {\n\tif db == nil {\n\t\treturn terror.ErrWorkerLogInvalidHandler.Generate()\n\t}\n\n\terr := db.Delete(encodeTaskMetaKey(name), nil)\n\tif err != nil {\n\t\treturn terror.ErrWorkerLogDeleteTaskMeta.Delegate(err, name)\n\t}\n\n\treturn nil\n}", "func DeleteMatterHandler(c *gin.Context) {\n\tvar p form.BaseQueryParam\n\tif err := c.ShouldBind(&p); err != nil {\n\t\tutils.Error(c, err)\n\t\treturn\n\t}\n\n\tif err := models.DeleteMatterByUUID(p.UUID); err != nil {\n\t\tutils.Error(c, err)\n\t\treturn\n\t}\n\n\tutils.Ok(c, p.UUID)\n}", "func afterObjectDelete(ctx context.Context, object *storage.ObjectHandle, err error) error {\n\treturn err\n}", "func (p *Processor) ReplicateManifest(ctx context.Context, account keppel.Account, repo keppel.Repository, reference models.ManifestReference, actx keppel.AuditContext) (*keppel.Manifest, []byte, error) {\n\tmanifestBytes, manifestMediaType, err := p.downloadManifestViaInboundCache(ctx, account, repo, reference)\n\tif err != nil {\n\t\tif errorIsManifestNotFound(err) {\n\t\t\treturn nil, nil, UpstreamManifestMissingError{reference, err}\n\t\t}\n\t\treturn nil, nil, err\n\t}\n\n\t//parse the manifest to discover references to other manifests and blobs\n\tmanifestParsed, _, err := keppel.ParseManifest(manifestMediaType, manifestBytes)\n\tif err != nil {\n\t\treturn nil, nil, keppel.ErrManifestInvalid.With(err.Error())\n\t}\n\n\t//replicate referenced manifests recursively if required\n\tfor _, desc := range manifestParsed.ManifestReferences(account.PlatformFilter) {\n\t\t_, err := keppel.FindManifest(p.db, repo, desc.Digest)\n\t\tif errors.Is(err, sql.ErrNoRows) {\n\t\t\t_, _, err = p.ReplicateManifest(ctx, account, repo, models.ManifestReference{Digest: desc.Digest}, actx)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\t//mark all missing blobs as pending replication\n\tfor _, desc := range manifestParsed.BlobReferences() {\n\t\t//mark referenced blobs as pending replication if not replicated yet\n\t\tblob, err := p.FindBlobOrInsertUnbackedBlob(desc, account)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\t//also ensure that the blob is mounted in this repo (this is also\n\t\t//important if the blob exists; it may only have been replicated in a\n\t\t//different repo)\n\t\terr = keppel.MountBlobIntoRepo(p.db, *blob, repo)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\t//if the manifest is an image, we need to replicate the image configuration\n\t//blob immediately because ValidateAndStoreManifest() uses it for validation\n\t//purposes\n\tconfigBlobDesc := manifestParsed.FindImageConfigBlob()\n\tif configBlobDesc != nil {\n\t\tconfigBlob, err := keppel.FindBlobByAccountName(p.db, configBlobDesc.Digest, account)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif configBlob.StorageID == \"\" {\n\t\t\t_, err = p.ReplicateBlob(ctx, *configBlob, account, repo, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tmanifest, err := p.ValidateAndStoreManifest(account, repo, IncomingManifest{\n\t\tReference: reference,\n\t\tMediaType: manifestMediaType,\n\t\tContents: manifestBytes,\n\t\tPushedAt: p.timeNow(),\n\t}, actx)\n\treturn manifest, manifestBytes, err\n}", "func delImage(w http.ResponseWriter, req *http.Request) {\n\n\t// Manage Cors\n\tsetCors(&w)\n\tif req.Method == \"OPTIONS\" {\n\t\treturn\n\t}\n\n\t// Authenticate user\n\tclaims, err := authRequest(req)\n\tif err != nil {\n\t\tlogger.Error(\"Unauthorized request to upload sending 401: %v\", err)\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tw.Write([]byte(\"401 - Unauthorized request, ensure you sign in and obtain the jwt auth token\"))\n\t\treturn\n\t}\n\n\tvars := mux.Vars(req)\n\t// validate url parameters and retrieve imageMeta\n\timageMeta, err := validateVars(vars)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to validate vars sending 400: %v\", err)\n\t\tif strings.Contains(err.Error(), \"404 - Not found\") {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tw.Write([]byte(\"404 - Not found, no image with that information available\"))\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"400 - Bad request unable to parse url parameters\"))\n\t\treturn\n\t}\n\n\t// Ensure there is no uid miss match\n\tuidVal, err := strconv.Atoi(vars[\"uid\"])\n\tif uidVal != int(imageMeta.Uid) {\n\t\tlogger.Error(\"uid miss match when attempting to delete image sending 400\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"400 - Uid mismatch ensure you are using the correct image reference\"))\n\t\treturn\n\t}\n\n\t// Ensure user has access permissions\n\tif claims.Uid != int(imageMeta.Uid) {\n\t\tlogger.Error(\"unauthorized user attempting to delete image\")\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tw.Write([]byte(\"401 - Unauthorized, you do not have permissions to modify this image\"))\n\t\treturn\n\t}\n\n\t// Delete meta from database\n\terr = DeleteImageData(imageMeta)\n\tif err != nil {\n\t\tlogger.Error(\"failed to delete image from database sending 500: %v\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(\"500 - Unable to delete image from database, try again later\"))\n\t\treturn\n\t}\n\n\t// Delete file from storage\n\tfileRef := fmt.Sprintf(\"./%s/%s/%s\", IMAGE_DIR, vars[\"uid\"], vars[\"fileId\"])\n\terr = os.Remove(fileRef)\n\t// Orphaned file is ok to leave as database entry is already deleted\n\t// Automated data integrity checks or manual removal is recommended\n\t// This will look like a successfull deletion from the users perspective\n\tif err != nil {\n\t\tlogger.Error(\"failed to delete image data, clean orphaned files via automated data integrity check: %v\", err)\n\t} else {\n\t\tlogger.Info(\"Successfully deleted image: %v\", imageMeta.Id)\n\t}\n\n\treturn\n}", "func (a *ACBuild) ReplaceManifest(manifestPath string) (err error) {\n\tif err = a.lock(); err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err1 := a.unlock(); err == nil {\n\t\t\terr = err1\n\t\t}\n\t}()\n\n\tfinfo, err := os.Stat(manifestPath)\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\treturn fmt.Errorf(\"no such file or directory: %s\", manifestPath)\n\tcase err != nil:\n\t\treturn err\n\tcase finfo.IsDir():\n\t\treturn fmt.Errorf(\"%s is a directory\", manifestPath)\n\tdefault:\n\t\tbreak\n\t}\n\n\tmanblob, err := ioutil.ReadFile(manifestPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Marshal and Unmarshal the manifest to assert that it's valid and to\n\t// strip any whitespace\n\n\tvar man schema.ImageManifest\n\terr = man.UnmarshalJSON(manblob)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanblob, err = man.MarshalJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(path.Join(a.CurrentACIPath, aci.ManifestFile), manblob, 0755)\n}", "func (m *MinishiftRunner) EnsureDeleted() error {\n\tm.RunCommandAndPrintError(\"delete --force\")\n\n\tdeleted := m.CheckStatus(\"Does Not Exist\")\n\tif deleted == false {\n\t\treturn errors.New(\"Deletion of minishift instance was not successful!\")\n\t}\n\n\treturn nil\n}", "func newManifestDeleteCmd(manifestParams *manifestParameters) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"delete\",\n\t\tShort: \"Delete manifest from a repository\",\n\t\tLong: newManifestDeleteCmdLongMessage,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tregistryName, err := manifestParams.GetRegistryName()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tloginURL := api.LoginURL(registryName)\n\t\t\tacrClient, err := api.GetAcrCLIClientWithAuth(loginURL, manifestParams.username, manifestParams.password, manifestParams.configs)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tctx := context.Background()\n\t\t\terr = deleteManifests(ctx, acrClient, loginURL, manifestParams.repoName, args)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\treturn cmd\n}", "func (service *ProjectService) RemoveManifestEntry(at int) error {\n\tmanifest := service.mod.World()\n\tentry, err := manifest.Entry(at)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn service.commander.Register(\n\t\tcmd.Named(\"RemoveManifestEntry\"),\n\t\tcmd.Forward(func(modder world.Modder) error {\n\t\t\treturn manifest.RemoveEntry(at)\n\t\t}),\n\t\tcmd.Reverse(func(modder world.Modder) error {\n\t\t\treturn manifest.InsertEntry(at, entry)\n\t\t}),\n\t)\n}" ]
[ "0.7518358", "0.72563094", "0.6750114", "0.6628918", "0.6548979", "0.65356153", "0.6269419", "0.61748946", "0.6108742", "0.5964179", "0.5844784", "0.5762061", "0.57612425", "0.5721427", "0.5636067", "0.55578166", "0.55270004", "0.54325235", "0.5378246", "0.5231348", "0.51393896", "0.5107284", "0.5041435", "0.50396556", "0.49881947", "0.49754554", "0.4965625", "0.4962901", "0.49559453", "0.4948405", "0.4942128", "0.49223283", "0.4896784", "0.489515", "0.4891379", "0.48751673", "0.48710933", "0.48521778", "0.48502922", "0.48497415", "0.48493102", "0.48293012", "0.48275676", "0.4806508", "0.48009673", "0.4799116", "0.47955748", "0.47675937", "0.47594693", "0.47561574", "0.4744037", "0.47410232", "0.47368357", "0.4736163", "0.4733307", "0.4722616", "0.46885628", "0.46885228", "0.46831283", "0.4680995", "0.4677794", "0.46759054", "0.46686164", "0.4663949", "0.4661308", "0.4659165", "0.46454698", "0.46424967", "0.45962298", "0.459237", "0.45847657", "0.45823514", "0.4580604", "0.4574006", "0.45738888", "0.45723897", "0.4571063", "0.4569047", "0.45666575", "0.4561441", "0.45595175", "0.4535443", "0.45316228", "0.45232156", "0.4521395", "0.45146322", "0.45118627", "0.45056283", "0.45012575", "0.44972104", "0.44962814", "0.44780597", "0.44779342", "0.44755685", "0.44728416", "0.44707215", "0.4462026", "0.4454086", "0.44480476", "0.44438004" ]
0.86380994
0
OnDeleteManifest is called when a manifest is downloaded. It increments the download couter on that manifest.
func OnGetManifest(name, reference string, body []byte, storeController storage.StoreController, metaDB mTypes.MetaDB, log log.Logger, ) error { // check if image is a signature isSignature, _, _, err := storage.CheckIsImageSignature(name, body, reference) if err != nil { log.Error().Err(err).Msg("can't check if manifest is a signature or not") return err } if !isSignature { err := metaDB.IncrementImageDownloads(name, reference) if err != nil { log.Error().Err(err).Str("repository", name).Str("reference", reference). Msg("unexpected error for image") return err } } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func OnDeleteManifest(repo, reference, mediaType string, digest godigest.Digest, manifestBlob []byte,\n\tstoreController storage.StoreController, metaDB mTypes.MetaDB, log log.Logger,\n) error {\n\timgStore := storeController.GetImageStore(repo)\n\n\tisSignature, signatureType, signedManifestDigest, err := storage.CheckIsImageSignature(repo, manifestBlob,\n\t\treference)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"can't check if image is a signature or not\")\n\n\t\treturn err\n\t}\n\n\tmanageRepoMetaSuccessfully := true\n\n\tif isSignature {\n\t\terr = metaDB.DeleteSignature(repo, signedManifestDigest, mTypes.SignatureMetadata{\n\t\t\tSignatureDigest: digest.String(),\n\t\t\tSignatureType: signatureType,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Error().Err(err).Msg(\"metadb: can't check if image is a signature or not\")\n\t\t\tmanageRepoMetaSuccessfully = false\n\t\t}\n\t} else {\n\t\terr = metaDB.DeleteRepoTag(repo, reference)\n\t\tif err != nil {\n\t\t\tlog.Info().Msg(\"metadb: restoring image store\")\n\n\t\t\t// restore image store\n\t\t\t_, _, err := imgStore.PutImageManifest(repo, reference, mediaType, manifestBlob)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error().Err(err).Msg(\"metadb: error while restoring image store, database is not consistent\")\n\t\t\t}\n\n\t\t\tmanageRepoMetaSuccessfully = false\n\t\t}\n\n\t\tif referredDigest, hasSubject := common.GetReferredSubject(manifestBlob); hasSubject {\n\t\t\terr := metaDB.DeleteReferrer(repo, referredDigest, digest)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error().Err(err).Msg(\"metadb: error while deleting referrer\")\n\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif !manageRepoMetaSuccessfully {\n\t\tlog.Info().Str(\"tag\", reference).Str(\"repository\", repo).\n\t\t\tMsg(\"metadb: deleting image meta was unsuccessful for tag in repo\")\n\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (imh *manifestHandler) DeleteManifest(w http.ResponseWriter, r *http.Request) {\n\tdcontext.GetLogger(imh).Debug(\"DeleteImageManifest\")\n\n\tif imh.App.isCache {\n\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)\n\t\treturn\n\t}\n\n\tif imh.Tag != \"\" {\n\t\tdcontext.GetLogger(imh).Debug(\"DeleteImageTag\")\n\t\ttagService := imh.Repository.Tags(imh.Context)\n\t\tif err := tagService.Untag(imh.Context, imh.Tag); err != nil {\n\t\t\tswitch err.(type) {\n\t\t\tcase distribution.ErrTagUnknown, driver.PathNotFoundError:\n\t\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err))\n\t\t\tdefault:\n\t\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\treturn\n\t}\n\n\tmanifests, err := imh.Repository.Manifests(imh)\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\n\terr = manifests.Delete(imh, imh.Digest)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase digest.ErrDigestUnsupported:\n\t\tcase digest.ErrDigestInvalidFormat:\n\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid)\n\t\t\treturn\n\t\tcase distribution.ErrBlobUnknown:\n\t\t\timh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown)\n\t\t\treturn\n\t\tcase distribution.ErrUnsupported:\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)\n\t\t\treturn\n\t\tdefault:\n\t\t\timh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown)\n\t\t\treturn\n\t\t}\n\t}\n\n\ttagService := imh.Repository.Tags(imh)\n\treferencedTags, err := tagService.Lookup(imh, distribution.Descriptor{Digest: imh.Digest})\n\tif err != nil {\n\t\timh.Errors = append(imh.Errors, err)\n\t\treturn\n\t}\n\n\tfor _, tag := range referencedTags {\n\t\tif err := tagService.Untag(imh, tag); err != nil {\n\t\t\timh.Errors = append(imh.Errors, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.WriteHeader(http.StatusAccepted)\n}", "func (p *Processor) DeleteManifest(account keppel.Account, repo keppel.Repository, manifestDigest digest.Digest, actx keppel.AuditContext) error {\n\tvar (\n\t\ttagResults []keppel.Tag\n\t\ttags []string\n\t)\n\n\t_, err := p.db.Select(&tagResults,\n\t\t`SELECT * FROM tags WHERE repo_id = $1 AND digest = $2`,\n\t\trepo.ID, manifestDigest)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, tagResult := range tagResults {\n\t\ttags = append(tags, tagResult.Name)\n\t}\n\n\tresult, err := p.db.Exec(\n\t\t//this also deletes tags referencing this manifest because of \"ON DELETE CASCADE\"\n\t\t`DELETE FROM manifests WHERE repo_id = $1 AND digest = $2`,\n\t\trepo.ID, manifestDigest)\n\tif err != nil {\n\t\totherDigest, err2 := p.db.SelectStr(\n\t\t\t`SELECT parent_digest FROM manifest_manifest_refs WHERE repo_id = $1 AND child_digest = $2`,\n\t\t\trepo.ID, manifestDigest)\n\t\t// more than one manifest is referenced by another manifest\n\t\tif otherDigest != \"\" && err2 == nil {\n\t\t\treturn fmt.Errorf(\"cannot delete a manifest which is referenced by the manifest %s\", otherDigest)\n\t\t}\n\t\t// if the SELECT failed return the previous error to not shadow it\n\t\treturn err\n\t}\n\trowsDeleted, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rowsDeleted == 0 {\n\t\treturn sql.ErrNoRows\n\t}\n\n\t//We delete in the storage *after* the deletion is durable in the DB to be\n\t//extra sure that we did not break any constraints (esp. manifest-manifest\n\t//refs and manifest-blob refs) that the DB enforces. Doing things in this\n\t//order might mean that, if DeleteManifest fails, we're left with a manifest\n\t//in the backing storage that is not referenced in the DB anymore, but this\n\t//is not a huge problem since the janitor can clean those up after the fact.\n\t//What's most important is that we don't lose any data in the backing storage\n\t//while it is still referenced in the DB.\n\t//\n\t//Also, the DELETE statement could fail if some concurrent process created a\n\t//manifest reference in the meantime. If that happens, and we have already\n\t//deleted the manifest in the backing storage, we've caused an inconsistency\n\t//that we cannot recover from.\n\terr = p.sd.DeleteManifest(account, repo.Name, manifestDigest)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif userInfo := actx.UserIdentity.UserInfo(); userInfo != nil {\n\t\tp.auditor.Record(audittools.EventParameters{\n\t\t\tTime: p.timeNow(),\n\t\t\tRequest: actx.Request,\n\t\t\tUser: userInfo,\n\t\t\tReasonCode: http.StatusOK,\n\t\t\tAction: cadf.DeleteAction,\n\t\t\tTarget: auditManifest{\n\t\t\t\tAccount: account,\n\t\t\t\tRepository: repo,\n\t\t\t\tDigest: manifestDigest,\n\t\t\t\tTags: tags,\n\t\t\t},\n\t\t})\n\t}\n\n\treturn nil\n}", "func (mr *ManifestResource) Delete(_ http.ResponseWriter, req *http.Request, _ httprouter.Params) restful.Exchanger {\n\treturn &DELETEManifestHandler{\n\t\tState: mr.context.liveState(),\n\t\tQueryValues: mr.ParseQuery(req),\n\t\tStateWriter: sous.StateWriter(mr.context.StateManager),\n\t}\n}", "func (d *swiftDriver) DeleteManifest(account keppel.Account, repoName string, manifestDigest digest.Digest) error {\n\tc, _, err := d.getBackendConnection(account)\n\tif err != nil {\n\t\treturn err\n\t}\n\to := manifestObject(c, repoName, manifestDigest)\n\treturn o.Delete(nil, nil)\n}", "func DeleteManifest(registry, repository string, manifest manifest.Data) error {\n\t// Will perform an actual delete\n\tdeleteCmd := newDeleteManifestsCommand(registry, repository, manifest.Digest)\n\n\tvar outb bytes.Buffer\n\tdeleteCmd.Stdout = &outb\n\n\treturn deleteCmd.Run()\n}", "func (m *manifestService) Delete(ctx context.Context, dgst digest.Digest) error {\n\tcontext.GetLogger(ctx).Debugf(\"(*manifestService).Delete\")\n\treturn m.manifests.Delete(withRepository(ctx, m.repo), dgst)\n}", "func (rl *ReferrerList) Delete(m manifest.Manifest) error {\n\trlM, ok := rl.Manifest.GetOrig().(v1.Index)\n\tif !ok {\n\t\treturn fmt.Errorf(\"referrer list manifest is not an OCI index for %s\", rl.Subject.CommonName())\n\t}\n\t// delete matching entries from the list\n\tmDesc := m.GetDescriptor()\n\tfound := false\n\tfor i := len(rlM.Manifests) - 1; i >= 0; i-- {\n\t\tif rlM.Manifests[i].Digest == mDesc.Digest {\n\t\t\tif i < len(rlM.Manifests)-1 {\n\t\t\t\trlM.Manifests = append(rlM.Manifests[:i], rlM.Manifests[i+1:]...)\n\t\t\t} else {\n\t\t\t\trlM.Manifests = rlM.Manifests[:i]\n\t\t\t}\n\t\t\tfound = true\n\t\t}\n\t}\n\tif !found {\n\t\treturn fmt.Errorf(\"subject not found in referrer list%.0w\", types.ErrNotFound)\n\t}\n\terr := rl.Manifest.SetOrig(rlM)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (is *ObjectStorage) DeleteImageManifest(repo, reference string, detectCollisions bool) error {\n\tdir := path.Join(is.rootDir, repo)\n\tif fi, err := is.store.Stat(context.Background(), dir); err != nil || !fi.IsDir() {\n\t\treturn zerr.ErrRepoNotFound\n\t}\n\n\tvar lockLatency time.Time\n\n\tvar err error\n\n\tis.Lock(&lockLatency)\n\tdefer func() {\n\t\tis.Unlock(&lockLatency)\n\n\t\tif err == nil {\n\t\t\tmonitoring.SetStorageUsage(is.metrics, is.rootDir, repo)\n\t\t}\n\t}()\n\n\tindex, err := common.GetIndex(is, repo, is.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanifestDesc, err := common.RemoveManifestDescByReference(&index, reference, detectCollisions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = common.UpdateIndexWithPrunedImageManifests(is, &index, repo, manifestDesc, manifestDesc.Digest, is.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// now update \"index.json\"\n\tdir = path.Join(is.rootDir, repo)\n\tfile := path.Join(dir, \"index.json\")\n\n\tbuf, err := json.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := writeFile(is.store, file, buf); err != nil {\n\t\tis.log.Debug().Str(\"deleting reference\", reference).Msg(\"\")\n\n\t\treturn err\n\t}\n\n\t// Delete blob only when blob digest not present in manifest entry.\n\t// e.g. 1.0.1 & 1.0.2 have same blob digest so if we delete 1.0.1, blob should not be removed.\n\ttoDelete := true\n\n\tfor _, manifest := range index.Manifests {\n\t\tif manifestDesc.Digest.String() == manifest.Digest.String() {\n\t\t\ttoDelete = false\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif toDelete {\n\t\tp := path.Join(dir, \"blobs\", manifestDesc.Digest.Algorithm().String(), manifestDesc.Digest.Encoded())\n\n\t\terr = is.store.Delete(context.Background(), p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *s3ManifestService) Delete(ctx context.Context, dgst godigest.Digest) error {\n\treturn fmt.Errorf(\"unimplemented\")\n}", "func (o *OCIDir) referrerDelete(ctx context.Context, r ref.Ref, m manifest.Manifest) error {\n\t// get refers field\n\tmSubject, ok := m.(manifest.Subjecter)\n\tif !ok {\n\t\treturn fmt.Errorf(\"manifest does not support subject: %w\", types.ErrUnsupportedMediaType)\n\t}\n\tsubject, err := mSubject.GetSubject()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// validate/set subject descriptor\n\tif subject == nil || subject.MediaType == \"\" || subject.Digest == \"\" || subject.Size <= 0 {\n\t\treturn fmt.Errorf(\"subject is not set%.0w\", types.ErrNotFound)\n\t}\n\n\t// get descriptor for subject\n\trSubject := r\n\trSubject.Tag = \"\"\n\trSubject.Digest = subject.Digest.String()\n\n\t// pull existing referrer list\n\trl, err := o.ReferrerList(ctx, rSubject)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = rl.Delete(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// push updated referrer list by tag\n\trlTag, err := referrer.FallbackTag(rSubject)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rl.IsEmpty() {\n\t\terr = o.TagDelete(ctx, rlTag)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\t// if delete is not supported, fall back to pushing empty list\n\t}\n\treturn o.ManifestPut(ctx, rlTag, rl.Manifest)\n}", "func deleteManifests(ctx context.Context, acrClient api.AcrCLIClientInterface, loginURL string, repoName string, args []string) error {\n\tfor i := 0; i < len(args); i++ {\n\t\t_, err := acrClient.DeleteManifest(ctx, repoName, args[i])\n\t\tif err != nil {\n\t\t\t// If there is an error (this includes not found and not allowed operations) the deletion of the images is stopped and an error is returned.\n\t\t\treturn errors.Wrap(err, \"failed to delete manifests\")\n\t\t}\n\t\tfmt.Printf(\"%s/%s@%s\\n\", loginURL, repoName, args[i])\n\t}\n\treturn nil\n}", "func (is *ImageStoreLocal) DeleteImageManifest(repo, reference string, detectCollision bool) error {\n\tdir := path.Join(is.rootDir, repo)\n\tif !is.DirExists(dir) {\n\t\treturn zerr.ErrRepoNotFound\n\t}\n\n\tvar lockLatency time.Time\n\n\tvar err error\n\n\tis.Lock(&lockLatency)\n\tdefer func() {\n\t\tis.Unlock(&lockLatency)\n\n\t\tif err == nil {\n\t\t\tmonitoring.SetStorageUsage(is.metrics, is.rootDir, repo)\n\t\t}\n\t}()\n\n\tindex, err := common.GetIndex(is, repo, is.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanifestDesc, err := common.RemoveManifestDescByReference(&index, reference, detectCollision)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = common.UpdateIndexWithPrunedImageManifests(is, &index, repo, manifestDesc, manifestDesc.Digest, is.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// now update \"index.json\"\n\tdir = path.Join(is.rootDir, repo)\n\tfile := path.Join(dir, \"index.json\")\n\n\tbuf, err := json.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := is.writeFile(file, buf); err != nil {\n\t\treturn err\n\t}\n\n\t// Delete blob only when blob digest not present in manifest entry.\n\t// e.g. 1.0.1 & 1.0.2 have same blob digest so if we delete 1.0.1, blob should not be removed.\n\ttoDelete := true\n\n\tfor _, manifest := range index.Manifests {\n\t\tif manifestDesc.Digest.String() == manifest.Digest.String() {\n\t\t\ttoDelete = false\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif toDelete {\n\t\tp := path.Join(dir, \"blobs\", manifestDesc.Digest.Algorithm().String(), manifestDesc.Digest.Encoded())\n\n\t\t_ = os.Remove(p)\n\t}\n\n\treturn nil\n}", "func (mc *manifestCache) Delete(db string) {\n\tmc.mu.Lock()\n\tdefer mc.mu.Unlock()\n\n\tif entry, ok := mc.entry(db); ok {\n\t\tmc.totalSize -= entry.contents.size()\n\t\tmc.lru.Remove(entry.lruEntry)\n\t\tdelete(mc.cache, db)\n\t}\n\n\treturn\n}", "func downloadDeleteHandler(w http.ResponseWriter, r *http.Request) {\n\tupDownDelete(\"download\", theCfg.downloadDir, false, w, r)\n\n}", "func (bm *BlobsManifest) Delete() error {\n\n\tfor _, chunk := range bm.Chunks {\n\t\t// for Huge Blob mode, no need remove blobs\n\t\t_, _, length := utils.ParseBlobDigest(chunk)\n\t\tif length != 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tb := blobs.GetBlobPartial(\"\", chunk)\n\t\tif b != nil {\n\t\t\tb.Delete()\n\t\t}\n\t}\n\n\t// to remove Huge Blob Image\n\timageDir := configuration.RootDirectory() + manifest.ManifestDir + \"/\" + bm.BlobSum\n\tutils.RemoveDir(imageDir)\n\n\tutils.Remove(blobsManifestPath(bm.BlobSum))\n\n\treturn nil\n}", "func (job *purgeManifestJob) process(ctx context.Context, acrClient api.AcrCLIClientInterface) error {\n\tresp, err := acrClient.DeleteManifest(ctx, job.repoName, job.digest)\n\tif err == nil {\n\t\tfmt.Printf(\"Deleted %s/%s@%s\\n\", job.loginURL, job.repoName, job.digest)\n\t\treturn nil\n\t}\n\n\tif resp != nil && resp.Response != nil && resp.StatusCode == http.StatusNotFound {\n\t\t// If the manifest is not found it can be assumed to have been deleted.\n\t\tfmt.Printf(\"Skipped %s/%s@%s, HTTP status: %d\\n\", job.loginURL, job.repoName, job.digest, resp.StatusCode)\n\t\treturn nil\n\t}\n\n\treturn err\n}", "func (reg *Reg) referrerDelete(ctx context.Context, r ref.Ref, m manifest.Manifest) error {\n\t// get subject field\n\tmSubject, ok := m.(manifest.Subjecter)\n\tif !ok {\n\t\treturn fmt.Errorf(\"manifest does not support the subject field: %w\", types.ErrUnsupportedMediaType)\n\t}\n\tsubject, err := mSubject.GetSubject()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// validate/set subject descriptor\n\tif subject == nil || subject.MediaType == \"\" || subject.Digest == \"\" || subject.Size <= 0 {\n\t\treturn fmt.Errorf(\"refers is not set%.0w\", types.ErrNotFound)\n\t}\n\n\trSubject := r\n\trSubject.Tag = \"\"\n\trSubject.Digest = subject.Digest.String()\n\n\t// if referrer API is available, nothing to do, return\n\tif reg.referrerPing(ctx, rSubject) {\n\t\treturn nil\n\t}\n\n\t// fallback to using tag schema for refers\n\trl, err := reg.referrerListTag(ctx, rSubject)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = rl.Delete(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// push updated referrer list by tag\n\trlTag, err := referrer.FallbackTag(rSubject)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rl.IsEmpty() {\n\t\terr = reg.TagDelete(ctx, rlTag)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\t// if delete is not supported, fall back to pushing empty list\n\t}\n\treturn reg.ManifestPut(ctx, rlTag, rl.Manifest)\n}", "func (sma *SmIPAM) OnIPAMPolicyDelete(obj *ctkit.IPAMPolicy) error {\n\tlog.Info(\"OnIPAMPolicyDelete: received: \", obj.Spec)\n\n\tpolicy, err := sma.FindIPAMPolicy(obj.Tenant, obj.Namespace, obj.Name)\n\n\tif err != nil {\n\t\tlog.Error(\"FindIPAMPolicy returned an error: \", err, \"for: \", obj.Tenant, obj.Namespace, obj.Name)\n\t\treturn errors.New(\"Object doesn't exist\")\n\t}\n\n\t// delete it from the DB\n\treturn sma.sm.DeleteObjectToMbus(\"\", policy, nil)\n}", "func downloadAsyncDeleteHandler(w http.ResponseWriter, r *http.Request) {\n\tupDownDelete(\"download\", theCfg.downloadDir, true, w, r)\n}", "func (_m *DirectRepositoryWriter) DeleteManifest(ctx context.Context, id manifest.ID) error {\n\tret := _m.Called(ctx, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, manifest.ID) error); ok {\n\t\tr0 = rf(ctx, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (_m *RepositoryWriter) DeleteManifest(ctx context.Context, id manifest.ID) error {\n\tret := _m.Called(ctx, id)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, manifest.ID) error); ok {\n\t\tr0 = rf(ctx, id)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (i *IpScheduler) OnDelete(del interface{}) {\n\tdelPod, ok := del.(*v1.Pod)\n\tif !ok {\n\t\treturn\n\t}\n\tif delPod.GetNamespace() == \"bcs-system\" {\n\t\treturn\n\t}\n\tblog.Infof(\"pod %s/%s is deletd\", delPod.GetName(), delPod.GetNamespace())\n\ti.CacheLock.Lock()\n\ti.NodeIPCache.DeleteResource(cache.GetMetaKey(delPod.GetName(), delPod.GetNamespace()))\n\ti.CacheLock.Unlock()\n}", "func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error {\n\t// 1. Check all files in manifest exist.\n\tfor id := range mf.Tables {\n\t\tif _, ok := idMap[id]; !ok {\n\t\t\treturn fmt.Errorf(\"file does not exist for table %d\", id)\n\t\t}\n\t}\n\n\t// 2. Delete files that shouldn't exist.\n\tfor id := range idMap {\n\t\tif _, ok := mf.Tables[id]; !ok {\n\t\t\tkv.elog.Printf(\"Table file %d not referenced in MANIFEST\\n\", id)\n\t\t\tfilename := table.NewFilename(id, kv.opt.Dir)\n\t\t\tif err := os.Remove(filename); err != nil {\n\t\t\t\treturn y.Wrapf(err, \"While removing table %d\", id)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (follower *Follower) OnDelete(m *Message, w io.Writer) (err error) {\n\tfollower.mutex.Lock()\n\tdelete(follower.table, m.GetKey())\n\tfollower.mutex.Unlock()\n\n\tlog.Printf(\"Table after Delete: %v\\n\", follower.table)\n\treturn nil\n}", "func (ir *ImageEngine) ManifestRemoveDigest(ctx context.Context, name string, image string) (string, error) {\n\tupdatedListID, err := manifests.Remove(ir.ClientCtx, name, image, nil)\n\tif err != nil {\n\t\treturn updatedListID, fmt.Errorf(\"removing from manifest %s: %w\", name, err)\n\t}\n\treturn fmt.Sprintf(\"%s :%s\\n\", updatedListID, image), nil\n}", "func (c *Controller) OnDelete(del common.Cluster) {\n\tblog.Infof(\"cluster %+v delete\", del)\n\tif _, ok := c.reconcilerMap[del.ClusterID]; ok {\n\t\tblog.Infof(\"delete del reconciler for %+v\", del)\n\t\t// call cancel function\n\t\tc.cancelFuncMap[del.ClusterID]()\n\t\tdelete(c.cancelFuncMap, del.ClusterID)\n\t\tdelete(c.reconcilerMap, del.ClusterID)\n\t} else {\n\t\tblog.Infof(\"no reconciler for cluster %+v, need to delete\", del)\n\t}\n}", "func (c *Controller) onDelete(obj interface{}) {\n\t//cluster := obj.(*crv1.Pgcluster)\n\t//\tlog.Debugf(\"[Controller] ns=%s onDelete %s\", cluster.ObjectMeta.Namespace, cluster.ObjectMeta.SelfLink)\n\n\t//handle pgcluster cleanup\n\t//\tclusteroperator.DeleteClusterBase(c.PgclusterClientset, c.PgclusterClient, cluster, cluster.ObjectMeta.Namespace)\n}", "func OnUpdateManifest(repo, reference, mediaType string, digest godigest.Digest, body []byte,\n\tstoreController storage.StoreController, metaDB mTypes.MetaDB, log log.Logger,\n) error {\n\timgStore := storeController.GetImageStore(repo)\n\n\t// check if image is a signature\n\tisSignature, signatureType, signedManifestDigest, err := storage.CheckIsImageSignature(repo, body, reference)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"can't check if image is a signature or not\")\n\n\t\tif err := imgStore.DeleteImageManifest(repo, reference, false); err != nil {\n\t\t\tlog.Error().Err(err).Str(\"manifest\", reference).Str(\"repository\", repo).Msg(\"couldn't remove image manifest in repo\")\n\n\t\t\treturn err\n\t\t}\n\n\t\treturn err\n\t}\n\n\tmetadataSuccessfullySet := true\n\n\tif isSignature {\n\t\tlayersInfo, errGetLayers := GetSignatureLayersInfo(repo, reference, digest.String(), signatureType, body,\n\t\t\timgStore, log)\n\t\tif errGetLayers != nil {\n\t\t\tmetadataSuccessfullySet = false\n\t\t\terr = errGetLayers\n\t\t} else {\n\t\t\terr = metaDB.AddManifestSignature(repo, signedManifestDigest, mTypes.SignatureMetadata{\n\t\t\t\tSignatureType: signatureType,\n\t\t\t\tSignatureDigest: digest.String(),\n\t\t\t\tLayersInfo: layersInfo,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tlog.Error().Err(err).Msg(\"metadb: error while putting repo meta\")\n\t\t\t\tmetadataSuccessfullySet = false\n\t\t\t} else {\n\t\t\t\terr = metaDB.UpdateSignaturesValidity(repo, signedManifestDigest)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error().Err(err).Str(\"repository\", repo).Str(\"reference\", reference).Str(\"digest\",\n\t\t\t\t\t\tsignedManifestDigest.String()).Msg(\"metadb: failed verify signatures validity for signed image\")\n\t\t\t\t\tmetadataSuccessfullySet = false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\terr = SetImageMetaFromInput(repo, reference, mediaType, digest, body,\n\t\t\timgStore, metaDB, log)\n\t\tif err != nil {\n\t\t\tmetadataSuccessfullySet = false\n\t\t}\n\t}\n\n\tif !metadataSuccessfullySet {\n\t\tlog.Info().Str(\"tag\", reference).Str(\"repository\", repo).Msg(\"uploading image meta was unsuccessful for tag in repo\")\n\n\t\tif err := imgStore.DeleteImageManifest(repo, reference, false); err != nil {\n\t\t\tlog.Error().Err(err).Str(\"reference\", reference).Str(\"repository\", repo).\n\t\t\t\tMsg(\"couldn't remove image manifest in repo\")\n\n\t\t\treturn err\n\t\t}\n\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (app *App) Delete(url string, handler handlerFunc) {\n\trequestRegexp := app.craterRouter.normalizeRoute(url)\n\tapp.craterRequestHandler.handleDelete(requestRegexp, func(w http.ResponseWriter, r *http.Request) {\n\t\tapp.serveRequest(w, r, handler, requestRegexp)\n\t})\n}", "func handleDelete(ctx *verifierContext, status *types.VerifyImageStatus) {\n\n\tlog.Functionf(\"handleDelete(%s) refcount %d\",\n\t\tstatus.ImageSha256, status.RefCount)\n\n\tif _, err := os.Stat(status.FileLocation); err == nil {\n\t\tlog.Functionf(\"handleDelete removing %s\",\n\t\t\tstatus.FileLocation)\n\t\tif err := os.RemoveAll(status.FileLocation); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t} else {\n\t\tlog.Warnf(\"handleDelete: Unable to delete %s: %s\",\n\t\t\tstatus.FileLocation, err)\n\t}\n\n\tunpublishVerifyImageStatus(ctx, status)\n\tlog.Functionf(\"handleDelete done for %s\", status.ImageSha256)\n}", "func (rc *RequiredCapability) Delete() (error, error, int) {\n\tauthorized, err := rc.isTenantAuthorized()\n\tif !authorized {\n\t\treturn errors.New(\"not authorized on this tenant\"), nil, http.StatusForbidden\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"checking authorization for existing DS ID: %s\" + err.Error()), http.StatusInternalServerError\n\t}\n\t_, cdnName, _, err := dbhelpers.GetDSNameAndCDNFromID(rc.ReqInfo.Tx.Tx, *rc.DeliveryServiceID)\n\tif err != nil {\n\t\treturn nil, err, http.StatusInternalServerError\n\t}\n\tuserErr, sysErr, errCode := dbhelpers.CheckIfCurrentUserCanModifyCDN(rc.ReqInfo.Tx.Tx, string(cdnName), rc.ReqInfo.User.UserName)\n\tif userErr != nil || sysErr != nil {\n\t\treturn userErr, sysErr, errCode\n\t}\n\treturn api.GenericDelete(rc)\n}", "func (ctrler CtrlDefReactor) OnVersionDelete(obj *Version) error {\n\tlog.Info(\"OnVersionDelete is not implemented\")\n\treturn nil\n}", "func (api *versionAPI) SyncDelete(obj *cluster.Version) error {\n\tvar writeErr error\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, writeErr = apicl.ClusterV1().Version().Delete(context.Background(), &obj.ObjectMeta)\n\t}\n\n\tif writeErr == nil {\n\t\tapi.ct.handleVersionEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Deleted})\n\t}\n\n\treturn writeErr\n}", "func (c *ResourcesHandler) Delete(event.DeleteEvent, workqueue.RateLimitingInterface) {}", "func (c *kubernetesDeploymentManifest) Destroy(ctx context.Context, profile ServiceRequest) error {\n\tspan, _ := apm.StartSpanOptions(ctx, \"Destroying kubernetes deployment\", \"kubernetes.manifest.destroy\", apm.SpanOptions{\n\t\tParent: apm.SpanFromContext(ctx).TraceContext(),\n\t})\n\tdefer span.End()\n\n\tkubectl = cluster.Kubectl().WithNamespace(ctx, getNamespaceFromProfile(profile))\n\tcluster.Cleanup(c.Context)\n\treturn nil\n}", "func (*MemcacheDeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_memcache_service_proto_rawDescGZIP(), []int{8}\n}", "func (ctrler CtrlDefReactor) OnClusterDelete(obj *Cluster) error {\n\tlog.Info(\"OnClusterDelete is not implemented\")\n\treturn nil\n}", "func (c *Controller) OnDelete(obj interface{}) {\n\tdoc, ok := obj.(*v1alpha1.IndicatorDocument)\n\tif !ok {\n\t\tlog.Printf(\"OnDelete received a non-indicatordocument: %T\", obj)\n\t\treturn\n\t}\n\tconfigMap, err := ConfigMap(doc, nil)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to generate ConfigMap: %s\", err)\n\t\treturn\n\t}\n\terr = c.cmEditor.Delete(configMap.Name, nil)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to delete ConfigMap: %s\", err)\n\t\treturn\n\t}\n}", "func UpdateManifest(m Manifests, root string, serviceID flux.ResourceID, f func(manifest []byte) ([]byte, error)) error {\n\tservices, err := m.FindDefinedServices(root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpaths := services[serviceID]\n\tif len(paths) == 0 {\n\t\treturn ErrNoResourceFilesFoundForService\n\t}\n\tif len(paths) > 1 {\n\t\treturn ErrMultipleResourceFilesFoundForService\n\t}\n\n\tdef, err := ioutil.ReadFile(paths[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewDef, err := f(def)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfi, err := os.Stat(paths[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(paths[0], newDef, fi.Mode())\n}", "func newManifestDeleteCmd(manifestParams *manifestParameters) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"delete\",\n\t\tShort: \"Delete manifest from a repository\",\n\t\tLong: newManifestDeleteCmdLongMessage,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tregistryName, err := manifestParams.GetRegistryName()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tloginURL := api.LoginURL(registryName)\n\t\t\tacrClient, err := api.GetAcrCLIClientWithAuth(loginURL, manifestParams.username, manifestParams.password, manifestParams.configs)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tctx := context.Background()\n\t\t\terr = deleteManifests(ctx, acrClient, loginURL, manifestParams.repoName, args)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\treturn cmd\n}", "func (ctrler CtrlDefReactor) OnBucketDelete(obj *Bucket) error {\n\tlog.Info(\"OnBucketDelete is not implemented\")\n\treturn nil\n}", "func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {\n\tif r.DeleteFunc != nil {\n\t\tr.DeleteFunc(obj)\n\t}\n}", "func (mh *MetadataHandler) HandleDeleteMetadata(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tvars := mux.Vars(r)\n\tvar (\n\t\tappID string\n\t\tok bool\n\t)\n\tif appID, ok = vars[\"appID\"]; !ok {\n\t\tw.WriteHeader(http.StatusBadRequest) // 400\n\t\treturn\n\t}\n\n\terr := mh.Repository.Delete(appID)\n\tif err != nil {\n\t\tif err == repository.ErrIDNotFound {\n\t\t\tw.WriteHeader(http.StatusConflict) // 409\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusInternalServerError) // 500\n\t\t}\n\t\tyaml.NewEncoder(w).Encode(err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusNoContent) // 204\n}", "func handleAppInstanceStatusDelete(ctxArg interface{}, key string,\n\tstatusArg interface{}) {\n\tctx := ctxArg.(*zedmanagerContext)\n\tpublishAppInstanceSummary(ctx)\n}", "func (r *ClusterServiceVersionReconciler) deleteExternalResources(CSV *olmv1alpha1.ClusterServiceVersion) error {\n\treqLogger := r.Log.WithValues(\"Request.Name\", CSV.GetName(), \"Request.Namespace\", CSV.GetNamespace())\n\treqLogger.Info(\"deleting csv\")\n\n\tannotations := CSV.GetAnnotations()\n\tif annotations == nil {\n\t\treqLogger.Info(\"No annotations for this CSV\")\n\t\treturn nil\n\t}\n\n\tmeterDefinitionString, ok := annotations[utils.CSV_METERDEFINITION_ANNOTATION]\n\tif !ok {\n\t\treqLogger.Info(\"No value for \", \"key: \", utils.CSV_METERDEFINITION_ANNOTATION)\n\t\treturn nil\n\t}\n\n\tvar errAlpha, errBeta error\n\tmeterDefinitionBeta := &marketplacev1beta1.MeterDefinition{}\n\tmeterDefinitionAlpha := &marketplacev1alpha1.MeterDefinition{}\n\n\terrBeta = meterDefinitionBeta.BuildMeterDefinitionFromString(meterDefinitionString, CSV.GetName(), CSV.GetNamespace(), utils.CSV_ANNOTATION_NAME, utils.CSV_ANNOTATION_NAMESPACE)\n\n\tif errBeta != nil {\n\t\terrAlpha = meterDefinitionAlpha.BuildMeterDefinitionFromString(meterDefinitionString, CSV.GetName(), CSV.GetNamespace(), utils.CSV_ANNOTATION_NAME, utils.CSV_ANNOTATION_NAMESPACE)\n\t}\n\n\tswitch {\n\tcase errBeta == nil:\n\t\terr := r.Client.Delete(context.TODO(), meterDefinitionBeta, client.PropagationPolicy(metav1.DeletePropagationForeground))\n\t\tif err != nil && errors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\tcase errAlpha == nil:\n\t\terr := r.Client.Delete(context.TODO(), meterDefinitionAlpha, client.PropagationPolicy(metav1.DeletePropagationForeground))\n\t\tif err != nil && errors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\terr := emperrors.Combine(errBeta, errAlpha)\n\t\treqLogger.Error(err, \"Could not build a local copy of the MeterDefinition\")\n\t\treturn err\n\t}\n\n\treqLogger.Info(\"found and deleted MeterDefinition\")\n\treturn nil\n\n}", "func (service *ProjectService) RemoveManifestEntry(at int) error {\n\tmanifest := service.mod.World()\n\tentry, err := manifest.Entry(at)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn service.commander.Register(\n\t\tcmd.Named(\"RemoveManifestEntry\"),\n\t\tcmd.Forward(func(modder world.Modder) error {\n\t\t\treturn manifest.RemoveEntry(at)\n\t\t}),\n\t\tcmd.Reverse(func(modder world.Modder) error {\n\t\t\treturn manifest.InsertEntry(at, entry)\n\t\t}),\n\t)\n}", "func UpdateManifest(m Manifests, root string, paths []string, id flux.ResourceID, f func(manifest []byte) ([]byte, error)) error {\n\tresources, err := m.LoadManifests(root, paths)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresource, ok := resources[id.String()]\n\tif !ok {\n\t\treturn ErrResourceNotFound(id.String())\n\t}\n\n\tpath := filepath.Join(root, resource.Source())\n\tdef, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewDef, err := f(def)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(path, newDef, fi.Mode())\n}", "func RegisterManifestHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {\n\treturn RegisterManifestHandlerClient(ctx, mux, NewManifestClient(conn))\n}", "func (c *AddonGenericController) OnRemove(ctx context.Context, name string, sync AddonHandler) {\n\tc.ControllerInterface.OnRemove(ctx, name, generic.ObjectHandler[*v1.Addon](sync))\n}", "func (o *RSSAnnouncement) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tif o == nil {\n\t\treturn 0, errors.New(\"models: no RSSAnnouncement provided for delete\")\n\t}\n\n\targs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), rssAnnouncementPrimaryKeyMapping)\n\tsql := \"DELETE FROM \\\"rss_announcements\\\" WHERE \\\"guild_id\\\"=$1\"\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, sql)\n\t\tfmt.Fprintln(writer, args...)\n\t}\n\tresult, err := exec.ExecContext(ctx, sql, args...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to delete from rss_announcements\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by delete for rss_announcements\")\n\t}\n\n\treturn rowsAff, nil\n}", "func DeleteAssets(registry sources.Registry, graphUpdater *knowledge.GraphUpdater, sem *semaphore.Weighted) http.HandlerFunc {\n\treturn handleUpdate(registry, func(ctx context.Context, source string, body io.Reader) error {\n\t\trequestBody := client.DeleteGraphAssetRequestBody{}\n\t\tif err := json.NewDecoder(body).Decode(&requestBody); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// TODO(c.michaud): verify compatibility of the schema with graph updates\n\t\terr := graphUpdater.RemoveAssets(ctx, source, requestBody.Assets)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to remove assets: %v\", err)\n\t\t}\n\n\t\tlabels := prometheus.Labels{\"source\": source}\n\t\tmetrics.GraphUpdateAssetsDeletedCounter.\n\t\t\tWith(labels).\n\t\t\tAdd(float64(len(requestBody.Assets)))\n\t\treturn nil\n\t}, sem, \"delete_assets\")\n}", "func (manager *Manager) onDeleteEgressPolicy(policy *Policy) {\n\tconfigID := ParseCEGPConfigID(policy)\n\n\tmanager.Lock()\n\tdefer manager.Unlock()\n\n\tlogger := log.WithField(logfields.CiliumEgressGatewayPolicyName, configID.Name)\n\n\tif manager.policyConfigs[configID] == nil {\n\t\tlogger.Warn(\"Can't delete CiliumEgressGatewayPolicy: policy not found\")\n\t}\n\n\tlogger.Debug(\"Deleted CiliumEgressGatewayPolicy\")\n\n\tdelete(manager.policyConfigs, configID)\n\n\tmanager.setEventBitmap(eventDeletePolicy)\n\tmanager.reconciliationTrigger.TriggerWithReason(\"policy deleted\")\n}", "func Delete(rest interfaces.Rest) func(*fiber.Ctx) error {\n\treturn func(ctx *fiber.Ctx) error {\n\t\tappController := rest.GetAppController()\n\n\t\treturn appController.Author.GetAuthors(ctx)\n\t}\n}", "func (m *Manifest) Size() int64 {\n\tif m == nil {\n\t\treturn 0\n\t}\n\n\tsize := m.Meta.Size\n\n\tfor _, f := range m.Files {\n\t\tsize += f.Size\n\t}\n\treturn size\n}", "func (s *store) OnDelete(obj interface{}) {\n\tpod, ok := obj.(*api.Pod)\n\tif !ok {\n\t\tdeletedObj, dok := obj.(kcache.DeletedFinalStateUnknown)\n\t\tif dok {\n\t\t\tpod, ok = deletedObj.Obj.(*api.Pod)\n\t\t}\n\t}\n\n\tif !ok {\n\t\tlog.Errorf(\"Expected Pod but OnDelete handler received %+v\", obj)\n\t\treturn\n\t}\n\n\tif pod.Status.PodIP != \"\" {\n\t\ts.mutex.Lock()\n\t\tdelete(s.rolesByIP, pod.Status.PodIP)\n\t\ts.mutex.Unlock()\n\t}\n}", "func (*MemcacheDeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_memcache_service_proto_rawDescGZIP(), []int{7}\n}", "func (proxier *Proxier) OnServiceDelete(service *api.Service) {\n\tnamespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}\n\tif proxier.serviceChanges.Update(&namespacedName, service, nil) && proxier.isInitialized() {\n//\t\tproxier.syncRunner.Run()\n\t\tproxier.syncProxyRules()\n\t}\n}", "func (signup *EventSignup) OnDeleted(container *ioccontainer.Container) error {\n\terr := signup.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar eventRepository EventRepository\n\tcontainer.Make(&eventRepository)\n\n\tevent, err := eventRepository.GetEventByID(signup.EventID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn signup.sendNotification(event, \"member_signed_out\", container)\n}", "func (c *Controller) delete(d *appsv1.Deployment) error {\n\tep := RegisteelEndpoint + \"/\" + string(d.UID)\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(http.MethodDelete, ep, nil)\n\treq.Header.Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.logger.Infof(\"removed deployment from api: %v\", d.Name)\n\n\treturn nil\n}", "func manifestDispatcher(ctx *Context, r *http.Request) http.Handler {\n\tmanifestHandler := &manifestHandler{\n\t\tContext: ctx,\n\t}\n\tref := getReference(ctx)\n\tdgst, err := digest.Parse(ref)\n\tif err != nil {\n\t\t// We just have a tag\n\t\tmanifestHandler.Tag = ref\n\t} else {\n\t\tmanifestHandler.Digest = dgst\n\t}\n\n\tmhandler := handlers.MethodHandler{\n\t\thttp.MethodGet: http.HandlerFunc(manifestHandler.GetManifest),\n\t\thttp.MethodHead: http.HandlerFunc(manifestHandler.GetManifest),\n\t}\n\n\tif !ctx.readOnly {\n\t\tmhandler[http.MethodPut] = http.HandlerFunc(manifestHandler.PutManifest)\n\t\tmhandler[http.MethodDelete] = http.HandlerFunc(manifestHandler.DeleteManifest)\n\t}\n\n\treturn mhandler\n}", "func deleteHookByPolicy(cfg *Configuration, h *release.Hook, policy string) error {\n\tif hookHasDeletePolicy(h, policy) {\n\t\tb := bytes.NewBufferString(h.Manifest)\n\t\treturn cfg.KubeClient.Delete(b)\n\t}\n\treturn nil\n}", "func (c *MakeRequestController) deleteC(callback func()) {\n\t// reload the data from file to remove only the current request\n\toutput := c.AppDataService.Load()\n\n\toutput.Remove(c.AppCtx.GetMDR())\n\tc.AppCtx.UpdateMDR(models.EmptyMakeRequestData())\n\n\tc.AppDataService.Save(output)\n\tc.AppCtx.RefreshViews(\"all\")\n\n\tcallback()\n}", "func (l *Logger) OnMessageDelete(ds *discordgo.Session, md *discordgo.MessageDelete) {\n\tif l.LogDeletes == false {\n\t\treturn\n\t}\n\n\t// TODO: Create separate log for bots.\n\tif md.Author.ID == ds.State.User.ID {\n\t\treturn\n\t}\n\n\tlog.Printf(\"%+v\", md)\n\tlog.Printf(\"%+v\", md.Message)\n\n\t// TODO: Implement delete logging\n\tembed := message.GetDefaultEmbed()\n\tembed.Title = \"Message Deleted\"\n\tembed.Description = fmt.Sprintf(\"Message Deleted: %s\", md.Content)\n\n\tmessage.SendEmbed(ds, l.ChannelID, embed)\n}", "func DeleteManifestWork(name, namespace string, client client.Client, removeFinalizers bool) error {\n\tmanifestWork := &manifestworkv1.ManifestWork{}\n\tvar retErr error\n\tif err := client.Get(\n\t\tcontext.TODO(),\n\t\ttypes.NamespacedName{Name: name, Namespace: namespace},\n\t\tmanifestWork,\n\t); err != nil {\n\t\treturn err\n\t}\n\n\tif removeFinalizers && len(manifestWork.GetFinalizers()) > 0 {\n\t\tmanifestWork.SetFinalizers([]string{})\n\t\tif err := client.Update(context.TODO(), manifestWork); err != nil {\n\t\t\tlog.Error(err, fmt.Sprintf(\"Failed to remove finalizers of Manifestwork %s in %s namespace\", name, namespace))\n\t\t\tretErr = err\n\t\t}\n\t}\n\n\tif manifestWork.DeletionTimestamp == nil {\n\t\terr := client.Delete(context.TODO(), manifestWork)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn retErr\n}", "func (client ModelClient) DeleteIntentResponder(resp *http.Response) (result OperationStatus, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (*ReleaseDeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_release_proto_rawDescGZIP(), []int{12}\n}", "func (*DeleteFeedRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_asset_v1p2beta1_asset_service_proto_rawDescGZIP(), []int{5}\n}", "func (hh *HealthCheckHandler) Delete(w http.ResponseWriter, r *http.Request) {\n\tuuid := utils.ExtractUUID(r.URL.String())\n\thh.db.Delete(uuid)\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_versions_v1_versions_proto_rawDescGZIP(), []int{11}\n}", "func (r *app) Delete(appGUID string, opts ...bool) error {\n\tasync := true\n\trecursive := false\n\tif len(opts) > 0 {\n\t\tasync = opts[0]\n\t}\n\tif len(opts) > 1 {\n\t\trecursive = opts[1]\n\t}\n\trawURL := fmt.Sprintf(\"/v2/apps/%s?async=%t&recursive=%t\", appGUID, async, recursive)\n\t_, err := r.client.Delete(rawURL)\n\treturn err\n}", "func Delete(url string, authHeader string) (int, []byte) {\n\tcode, _, response := DeleteWithHeaderInResult(url, authHeader)\n\treturn code, response\n}", "func (c *Controller) onDelete(obj interface{}) {\n\tfmt.Println(\"onDelete called\")\n\tc.dequeue(obj)\n}", "func (api *bucketAPI) SyncDelete(obj *objstore.Bucket) error {\n\tvar writeErr error\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, writeErr = apicl.ObjstoreV1().Bucket().Delete(context.Background(), &obj.ObjectMeta)\n\t}\n\n\tif writeErr == nil {\n\t\tapi.ct.handleBucketEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Deleted})\n\t}\n\n\treturn writeErr\n}", "func (*DeleteApplicationResponse) Descriptor() ([]byte, []int) {\n\treturn file_application_proto_rawDescGZIP(), []int{11}\n}", "func (*GrowthChangeHistoryDeleteResp) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{26}\n}", "func (*DeleteFeedRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_asset_v1_asset_service_proto_rawDescGZIP(), []int{12}\n}", "func (client PublishedBlueprintsClient) DeleteResponder(resp *http.Response) (result PublishedBlueprint, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (d *DaemonSetPrepuller) DeleteFunc(component string) error {\n\tdsName := addPrepullPrefix(component)\n\tif err := apiclient.DeleteDaemonSetForeground(d.client, metav1.NamespaceSystem, dsName); err != nil {\n\t\treturn fmt.Errorf(\"unable to cleanup the DaemonSet used for prepulling %s: %v\", component, err)\n\t}\n\tfmt.Printf(\"[upgrade/prepull] Prepulled image for component %s.\\n\", component)\n\treturn nil\n}", "func (api *versionAPI) Delete(obj *cluster.Version) error {\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = apicl.ClusterV1().Version().Delete(context.Background(), &obj.ObjectMeta)\n\t\treturn err\n\t}\n\n\tapi.ct.handleVersionEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Deleted})\n\treturn nil\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_order_proto_rawDescGZIP(), []int{7}\n}", "func (r *remoteServiceObserver) OnDelete(key store.NamedKey) {\n\tif svc, ok := key.(*serviceStore.ClusterService); ok {\n\t\tscopedLog := log.WithFields(logrus.Fields{logfields.ServiceName: svc.String()})\n\t\tscopedLog.Debugf(\"Delete event of remote service %#v\", svc)\n\n\t\tmesh := r.remoteCluster.mesh\n\t\t// Short-circuit the deletion logic if the service was not present (i.e., not shared)\n\t\tif !mesh.globalServices.onDelete(svc) {\n\t\t\tscopedLog.Debugf(\"Ignoring remote service delete. Service was not shared\")\n\t\t\treturn\n\t\t}\n\n\t\tif merger := mesh.conf.ServiceMerger; merger != nil {\n\t\t\tmerger.MergeExternalServiceDelete(svc, r.swg)\n\t\t} else {\n\t\t\tscopedLog.Debugf(\"Ignoring remote service delete. Missing merger function\")\n\t\t}\n\t} else {\n\t\tlog.Warningf(\"Received unexpected remote service delete object %+v\", key)\n\t}\n}", "func (MemcacheDeleteResponse_DeleteStatusCode) EnumDescriptor() ([]byte, []int) {\n\treturn file_memcache_service_proto_rawDescGZIP(), []int{8, 0}\n}", "func Delete(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) {\n\t// Add your code here:\n\t// * Make API calls (use req.Session)\n\t// * Mutate the model\n\t// * Check/set any callback context (req.CallbackContext / response.CallbackContext)\n\n\t// Construct a new handler.ProgressEvent and return it\n\tresponse := handler.ProgressEvent{\n\t\tOperationStatus: handler.Success,\n\t\tMessage: \"Delete complete\",\n\t\tResourceModel: currentModel,\n\t}\n\n\treturn response, nil\n\n\t// Not implemented, return an empty handler.ProgressEvent\n\t// and an error\n\treturn handler.ProgressEvent{}, errors.New(\"Not implemented: Delete\")\n}", "func DeleteURL(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"DELETE URL\")\n\n\turlKey := mux.Vars(r)[urlKeyPattern]\n\n\tshortURLDao := NewDao(shortURLDbPath)\n\n\tstatus := shortURLDao.RemoveByKey(urlKey)\n\tif status == NotFound {\n\t\tsendJSON(w, http.StatusNotFound, genericResponse{\"No URL found for key \" + urlKey})\n\t\treturn\n\t}\n\n\tif status == Error {\n\t\tsendJSON(w, http.StatusInternalServerError, genericResponse{\"Internal Server Error\"})\n\t\treturn\n\t}\n\n\t// Once the url key has been removed, remove its count as well\n\tNewDao(shortURLCountDbPath).RemoveByKey(urlKey)\n\n\tsendJSON(w, http.StatusOK, genericResponse{\"URL successfully deleted for key \" + urlKey})\n}", "func deleteAppVersionHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tappid := vars[\"appid\"]\n\tversionid := vars[\"versionid\"]\n\n\tvar app App\n\terr := getApp(appid, &app)\n\tcheck(err)\n\n\tif deleteAppVersion(app, versionid) {\n\t\tresult := map[string]interface{}{\n\t\t\t\"message\": \"OK\",\n\t\t}\n\t\trespondWithResult(w, result)\n\t} else {\n\t\tresult := map[string]interface{}{\n\t\t\t\"message\": \"error: unable to delete version\",\n\t\t}\n\t\trespondWithResult(w, result)\n\t}\n}", "func (bot Bot) onDelete(s *discordgo.Session, msg *discordgo.MessageCreate) {\n\t//checks if user exists- last message contains a value\n\tif _, exists := bot.allVars.dm[bot.allVars.m[msg.Author.Username]]; exists {\n\t\ts.ChannelMessageDelete(bot.allVars.m[msg.Author.Username], bot.allVars.dm[bot.allVars.m[msg.Author.Username]])\n\t\ts.ChannelMessageSend(msg.ChannelID, \"The message has been deleted\")\n\t} else {\n\t\t//if user doesnt exist return\n\t\ts.ChannelMessageSend(msg.ChannelID, \"There is no prior message available to be deleted\")\n\t}\n}", "func (*MemberRuleSettingDeleteResp) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{98}\n}", "func (client MSIXPackagesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}", "func (*DeleteMetadataRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{19}\n}", "func (w *wireMap) AtomicDelete(wire *GRPCWire) error {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\tdelete(w.wires, linkKey{\n\t\tnamespace: wire.LocalPodNetNS,\n\t\tlinkUID: wire.UID,\n\t})\n\n\tdelete(w.handles, wire.LocalNodeIfaceID)\n\n\treturn nil\n}", "func (ctrler CtrlDefReactor) OnDSCProfileDelete(obj *DSCProfile) error {\n\tlog.Info(\"OnDSCProfileDelete is not implemented\")\n\treturn nil\n}", "func (m *Manifest) FetchManifest(c config.Config) (string, error) {\n\treturn fetch(c, m.GetPlaybackURL())\n}", "func (h Handler) Delete(ctx context.Context, request *proto.Identifier) (*proto.Message, error) {\n\terr := h.meta.SetStatus(ctx, request.UserID, deleted)\n\terr = errors.Wrap(err, \"Error while changing status\")\n\treturn &proto.Message{}, err\n}", "func (*DeleteResponse) Descriptor() ([]byte, []int) {\n\treturn file_mods_v1_mods_proto_rawDescGZIP(), []int{11}\n}", "func (*DeleteArtifactRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_metadata_service_proto_rawDescGZIP(), []int{12}\n}", "func Delete(instructionData reflect.Value, finished chan bool) int {\n\tfmt.Println(\"FIBER INFO: Deleting File ...\")\n\n\tpath, err := variable.GetValue(instructionData, \"PathVarName\", \"PathIsVar\", \"Path\")\n\tif err != nil {\n\t\tfinished <- true\n\t\treturn -1\n\t}\n\n\tos.Remove(path.(string))\n\tfinished <- true\n\treturn -1\n}", "func (ctrler CtrlDefReactor) OnDistributedServiceCardDelete(obj *DistributedServiceCard) error {\n\tlog.Info(\"OnDistributedServiceCardDelete is not implemented\")\n\treturn nil\n}", "func (proxier *Proxier) OnEndpointsDelete(endpoints *api.Endpoints) {\n\tnamespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}\n\tif proxier.endpointsChanges.Update(&namespacedName, endpoints, nil) && proxier.isInitialized() {\n//\t\tproxier.syncRunner.Run()\n\t\tproxier.syncProxyRules()\n\t}\n}", "func (api *snapshotrestoreAPI) SyncDelete(obj *cluster.SnapshotRestore) error {\n\tvar writeErr error\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, writeErr = apicl.ClusterV1().SnapshotRestore().Delete(context.Background(), &obj.ObjectMeta)\n\t}\n\n\tif writeErr == nil {\n\t\tapi.ct.handleSnapshotRestoreEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Deleted})\n\t}\n\n\treturn writeErr\n}" ]
[ "0.6862908", "0.65429384", "0.6432694", "0.6220848", "0.6173658", "0.58912337", "0.58835965", "0.58701575", "0.56467897", "0.56270117", "0.5577935", "0.5576489", "0.54664457", "0.54631424", "0.54426086", "0.5428752", "0.5319357", "0.5285182", "0.5155966", "0.5142746", "0.5140906", "0.50603706", "0.50146157", "0.4921993", "0.48950985", "0.48631954", "0.48616287", "0.48591006", "0.48068458", "0.47919852", "0.47837806", "0.47079244", "0.47040725", "0.46972805", "0.4605758", "0.4588802", "0.45790252", "0.45664632", "0.45644855", "0.4548501", "0.45449343", "0.453074", "0.45300567", "0.4527873", "0.4521918", "0.45212355", "0.4516822", "0.45147654", "0.45084625", "0.45029306", "0.45014173", "0.44994026", "0.44907528", "0.4488584", "0.44842845", "0.44839716", "0.44688374", "0.44664168", "0.44616598", "0.44600824", "0.4456013", "0.44547552", "0.44425032", "0.4423732", "0.44231135", "0.4420778", "0.44175854", "0.44132167", "0.44108817", "0.44058707", "0.439986", "0.43965876", "0.4394691", "0.43900394", "0.43835446", "0.438235", "0.43777174", "0.43770635", "0.43759772", "0.43732923", "0.43722388", "0.43673012", "0.43649334", "0.43639496", "0.43565685", "0.43552166", "0.4354625", "0.43505812", "0.43475202", "0.43429697", "0.4339558", "0.4339022", "0.43375394", "0.4336913", "0.4336511", "0.43266648", "0.4326237", "0.4325278", "0.43196714", "0.4316269" ]
0.5517863
12
committee_api GetCommitteeRequest api request get_committee_request
func (api *API) GetCommitteeRequest(id uint32, count int32) (*CommitteeObject, error) { var resp CommitteeObject err := api.call("committee_api", "get_committee_request", []interface{}{id, count}, &resp) return &resp, err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (_IOrakuruCore *IOrakuruCoreCallerSession) GetRequest(_requestId [32]byte) (struct {\n\tId [32]byte\n\tDataSource string\n\tSelector string\n\tCallbackAddr common.Address\n\tExecutionTimestamp *big.Int\n\tIsFulfilled bool\n\tAggrType uint8\n\tPrecision uint8\n}, error) {\n\treturn _IOrakuruCore.Contract.GetRequest(&_IOrakuruCore.CallOpts, _requestId)\n}", "func (_IOrakuruCore *IOrakuruCoreSession) GetRequest(_requestId [32]byte) (struct {\n\tId [32]byte\n\tDataSource string\n\tSelector string\n\tCallbackAddr common.Address\n\tExecutionTimestamp *big.Int\n\tIsFulfilled bool\n\tAggrType uint8\n\tPrecision uint8\n}, error) {\n\treturn _IOrakuruCore.Contract.GetRequest(&_IOrakuruCore.CallOpts, _requestId)\n}", "func (api *API) GetCommitteeRequestsList(status uint16) ([]*uint16, error) {\n\tvar resp []*uint16\n\terr := api.call(\"committee_api\", \"get_committee_requests_list\", []interface{}{status}, &resp)\n\treturn resp, err\n}", "func (s *PublicBlockChainAPI) GetCommittee(ctx context.Context, epoch int64) (map[string]interface{}, error) {\n\tcommittee, err := s.b.GetCommittee(big.NewInt(epoch))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvalidators := make([]map[string]interface{}, 0)\n\tfor _, validator := range committee.NodeList {\n\t\tvalidatorBalance := new(hexutil.Big)\n\t\tvalidatorBalance, err = s.b.GetBalance(validator.EcdsaAddress)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\toneAddress, err := internal_common.AddressToBech32(validator.EcdsaAddress)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvalidatorsFields := map[string]interface{}{\n\t\t\t\"address\": oneAddress,\n\t\t\t\"balance\": validatorBalance,\n\t\t}\n\t\tvalidators = append(validators, validatorsFields)\n\t}\n\tresult := map[string]interface{}{\n\t\t\"shardID\": committee.ShardID,\n\t\t\"validators\": validators,\n\t}\n\treturn result, nil\n}", "func (s *Service) GetRequest(identifiable DocIdentifiable) (*json.RawMessage, error) {\n\t// TODO: accept ctx as param\n\tget, err := s.Client.Get().\n\t\tIndex(s.RequestsIndex).\n\t\tType(\"_doc\").\n\t\tId(identifiable.DocID()).\n\t\tDo(context.TODO())\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn get.Source, nil\n}", "func (_CommitteeManager *CommitteeManagerCallerSession) Committee() (common.Address, error) {\n\treturn _CommitteeManager.Contract.Committee(&_CommitteeManager.CallOpts)\n}", "func (api *API) GetCommitteeRequestVotes(id uint32) ([]*CommitteeVoteState, error) {\n\tvar resp []*CommitteeVoteState\n\terr := api.call(\"committee_api\", \"get_committee_request_votes\", []interface{}{id}, &resp)\n\treturn resp, err\n}", "func (c *TogglHttpClient) GetRequest(endpoint string) (*json.RawMessage, error) {\n\treturn request(c, \"GET\", endpoint, nil)\n}", "func GetCmdQueryCommittee(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"committee [committee-id]\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tShort: \"Query details of a single committee\",\n\t\tExample: fmt.Sprintf(\"%s query %s committee 1\", version.ClientName, types.ModuleName),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\t// Prepare params for querier\n\t\t\tcommitteeID, err := strconv.ParseUint(args[0], 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"committee-id %s not a valid uint\", args[0])\n\t\t\t}\n\t\t\tbz, err := cdc.MarshalJSON(types.NewQueryCommitteeParams(committeeID))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Query\n\t\t\tres, _, err := cliCtx.QueryWithData(fmt.Sprintf(\"custom/%s/%s\", queryRoute, types.QueryCommittee), bz)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Decode and print result\n\t\t\tcommittee := types.Committee{}\n\t\t\tif err = cdc.UnmarshalJSON(res, &committee); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn cliCtx.PrintOutput(committee)\n\t\t},\n\t}\n\treturn cmd\n}", "func (_CommitteeManager *CommitteeManagerSession) Committee() (common.Address, error) {\n\treturn _CommitteeManager.Contract.Committee(&_CommitteeManager.CallOpts)\n}", "func (_IOrakuruCore *IOrakuruCoreCaller) GetRequest(opts *bind.CallOpts, _requestId [32]byte) (struct {\n\tId [32]byte\n\tDataSource string\n\tSelector string\n\tCallbackAddr common.Address\n\tExecutionTimestamp *big.Int\n\tIsFulfilled bool\n\tAggrType uint8\n\tPrecision uint8\n}, error) {\n\tvar out []interface{}\n\terr := _IOrakuruCore.contract.Call(opts, &out, \"getRequest\", _requestId)\n\n\toutstruct := new(struct {\n\t\tId [32]byte\n\t\tDataSource string\n\t\tSelector string\n\t\tCallbackAddr common.Address\n\t\tExecutionTimestamp *big.Int\n\t\tIsFulfilled bool\n\t\tAggrType uint8\n\t\tPrecision uint8\n\t})\n\tif err != nil {\n\t\treturn *outstruct, err\n\t}\n\n\toutstruct.Id = *abi.ConvertType(out[0], new([32]byte)).(*[32]byte)\n\toutstruct.DataSource = *abi.ConvertType(out[1], new(string)).(*string)\n\toutstruct.Selector = *abi.ConvertType(out[2], new(string)).(*string)\n\toutstruct.CallbackAddr = *abi.ConvertType(out[3], new(common.Address)).(*common.Address)\n\toutstruct.ExecutionTimestamp = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int)\n\toutstruct.IsFulfilled = *abi.ConvertType(out[5], new(bool)).(*bool)\n\toutstruct.AggrType = *abi.ConvertType(out[6], new(uint8)).(*uint8)\n\toutstruct.Precision = *abi.ConvertType(out[7], new(uint8)).(*uint8)\n\n\treturn *outstruct, err\n\n}", "func (client IdentityClient) getWorkRequest(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/workRequests/{workRequestId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetWorkRequestResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (_CommitteeManager *CommitteeManagerCaller) Committee(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _CommitteeManager.contract.Call(opts, out, \"_committee\")\n\treturn *ret0, err\n}", "func GetRequest(ctx context.Context, r *http.Request) (*skillserver.EchoRequest, error) {\n\tvar echoReq *skillserver.EchoRequest\n\terr := json.NewDecoder(r.Body).Decode(&echoReq)\n\treturn echoReq, err\n}", "func (c *Client) NewGetDraftRequest(ctx context.Context, path string) (*http.Request, error) {\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"http\"\n\t}\n\tu := url.URL{Host: c.Host, Scheme: scheme, Path: path}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func GetActionCommitment(ctx iris.Context) {\n\ty1, err := ctx.URLParamInt64(\"FirstYear\")\n\tif err != nil {\n\t\ty1 = int64(time.Now().Year()) + 1\n\t}\n\tvar resp models.ActionCommitments\n\tdb := ctx.Values().Get(\"db\").(*sql.DB)\n\tif err = resp.GetAll(y1, db); err != nil {\n\t\tctx.StatusCode(http.StatusInternalServerError)\n\t\tctx.JSON(jsonError{\"Prévisions AP par actions budgétaires, requête : \" + err.Error()})\n\t}\n\tctx.StatusCode(http.StatusOK)\n\tctx.JSON(resp)\n}", "func (client *CertificateOrdersClient) retrieveCertificateEmailHistoryCreateRequest(ctx context.Context, resourceGroupName string, name string, options *CertificateOrdersClientRetrieveCertificateEmailHistoryOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveEmailHistory\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (a *RepoAPI) getCommit(params interface{}) (resp *rpc.Response) {\n\tm := objx.New(cast.ToStringMap(params))\n\treturn rpc.Success(util.Map{\n\t\t\"commit\": a.mods.Repo.GetCommit(m.Get(\"name\").Str(), m.Get(\"hash\").Str()),\n\t})\n}", "func decodeGetRequest(_ context.Context, r *http1.Request) (interface{}, error) {\n\treq := endpoint.GetRequest{}\n\treturn req, nil\n}", "func decodeGetRequest(_ context.Context, r *http1.Request) (interface{}, error) {\n\treq := endpoint.GetRequest{}\n\treturn req, nil\n}", "func (c *ThreeScaleClient) buildGetReq(ep string) (*http.Request, error) {\n\treq, err := http.NewRequest(\"GET\", c.adminPortal.rawURL+ep, nil)\n\treq.Header.Set(\"Accept\", \"application/xml\")\n\treq.Header.Set(\"Authorization\", \"Basic \"+basicAuth(\"\", c.credential))\n\treturn req, err\n}", "func (client BastionClient) getWorkRequest(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/workRequests/{workRequestId}\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetWorkRequestResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client *RoleAssignmentsClient) getCreateRequest(ctx context.Context, vaultBaseURL string, scope string, roleAssignmentName string, options *RoleAssignmentsGetOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}\"\n\tif scope == \"\" {\n\t\treturn nil, errors.New(\"parameter scope cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif roleAssignmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter roleAssignmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{roleAssignmentName}\", url.PathEscape(roleAssignmentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AlertOperationClient) getCreateRequest(ctx context.Context, scope string, operationID string, options *AlertOperationClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleManagementAlertOperations/{operationId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\turlPath = strings.ReplaceAll(urlPath, \"{operationId}\", operationID)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ConsumerInvitationsClient) getCreateRequest(ctx context.Context, location string, invitationID string, options *ConsumerInvitationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.DataShare/locations/{location}/consumerInvitations/{invitationId}\"\n\tif location == \"\" {\n\t\treturn nil, errors.New(\"parameter location cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{location}\", url.PathEscape(location))\n\tif invitationID == \"\" {\n\t\treturn nil, errors.New(\"parameter invitationID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{invitationId}\", url.PathEscape(invitationID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func getRecipientTx(r *http.Request, pb transactionPb.TransactionService) (proto.Message, error) {\n\tr.ParseForm()\n\trecipientCryptoID := r.FormValue(\"recipientCryptoId\")\n\t// Create protobuf request\n\tpbRequest := &transactionPb.TxByRecipientReq{\n\t\tRecipientCryptoId: recipientCryptoID,\n\t}\n\t// Call RPC function and get protobuf response\n\tpbResponse, err := pb.GetRecipientTx(context.Background(), pbRequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Send back to Client\n\treturn pbResponse, nil\n}", "func GetDetailedActionCommitment(ctx iris.Context) {\n\ty1, err := ctx.URLParamInt64(\"FirstYear\")\n\tif err != nil {\n\t\ty1 = int64(time.Now().Year()) + 1\n\t}\n\tvar resp models.DetailedActionCommitments\n\tdb := ctx.Values().Get(\"db\").(*sql.DB)\n\tif err = resp.GetAll(y1, db); err != nil {\n\t\tctx.StatusCode(http.StatusInternalServerError)\n\t\tctx.JSON(jsonError{\"Prévisions AP détaillées par actions budgétaires, requête : \" + err.Error()})\n\t\treturn\n\t}\n\tctx.StatusCode(http.StatusOK)\n\tctx.JSON(resp)\n}", "func getServiceCmdRequest(cmd cmdType, cred credential, body []byte) (*http.Request, error) {\n\treq, err := newTestRequest(cmd.apiMethod(), \"/?service\", 0, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set body\n\treq.Body = ioutil.NopCloser(bytes.NewReader(body))\n\n\t// minioAdminOpHeader is to identify the request as a\n\t// management REST API request.\n\treq.Header.Set(minioAdminOpHeader, cmd.String())\n\treq.Header.Set(\"X-Amz-Content-Sha256\", getSHA256Hash(body))\n\n\t// management REST API uses signature V4 for authentication.\n\terr = signRequestV4(req, cred.AccessKey, cred.SecretKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (e *Endpoints) getOrgByRequest(r *http.Request) (*orgpb.Org, error) {\n\torgIDStr := r.Header.Get(httputil.OrgHeader)\n\tif orgIDStr == \"\" {\n\t\treturn nil, errors.Errorf(\"missing org id header\")\n\t}\n\n\torgResp, err := e.org.GetOrg(apis.WithInternalClientContext(context.Background(), discover.SvcCMP),\n\t\t&orgpb.GetOrgRequest{IdOrName: orgIDStr})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn orgResp.Data, nil\n}", "func (db *DB) GetRequest(id *record.ID) (record.Request, error) {\n\ttx := db.BeginTransaction(false)\n\tdefer tx.Discard()\n\treturn tx.GetRequest(id)\n}", "func getRequest(url string) ([]byte, error) {\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := makeRequest(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp == nil {\n\t\treturn nil, ErrResponseNil\n\t}\n\tdefer resp.Body.Close()\n\n\tbodyBytes, err := getBody(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, types.NewErrServiceClient(resp.StatusCode, bodyBytes)\n\t}\n\n\treturn bodyBytes, nil\n}", "func (method *Method) GetRequest() specs.Message {\n\tif method.Request == nil {\n\t\treturn make(specs.Message, 0)\n\t}\n\n\treturn method.Request.Definition.Property.Message\n}", "func NewGetRequest(payload *todo.GetPayload) *todopb.GetRequest {\n\tmessage := &todopb.GetRequest{\n\t\tId: payload.ID,\n\t}\n\treturn message\n}", "func (c *NATSTestClient) GetRequest(t *testing.T) *Request {\n\tselect {\n\tcase r := <-c.reqs:\n\t\treturn r\n\tcase <-time.After(timeoutSeconds * time.Second):\n\t\tif t == nil {\n\t\t\tpprof.Lookup(\"goroutine\").WriteTo(os.Stdout, 1)\n\t\t\tpanic(\"expected a request but found none\")\n\t\t} else {\n\t\t\tt.Fatal(\"expected a request but found none\")\n\t\t}\n\t}\n\treturn nil\n}", "func (m *ThreatAssessmentRequest) GetRequestSource()(*ThreatAssessmentRequestSource) {\n return m.requestSource\n}", "func (b *Handler) Committee(round uint64, step uint8) user.VotingCommittee {\n\treturn b.Handler.Committee(round, step, config.ConsensusSelectionCommitteeSize)\n}", "func (client *AlertsClient) getCreateRequest(ctx context.Context, scope string, alertID string, options *AlertsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleManagementAlerts/{alertId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\turlPath = strings.ReplaceAll(urlPath, \"{alertId}\", alertID)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ContainerAppsDiagnosticsClient) getRevisionCreateRequest(ctx context.Context, resourceGroupName string, containerAppName string, revisionName string, options *ContainerAppsDiagnosticsClientGetRevisionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/detectorProperties/revisionsApi/revisions/{revisionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif containerAppName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerAppName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerAppName}\", url.PathEscape(containerAppName))\n\tif revisionName == \"\" {\n\t\treturn nil, errors.New(\"parameter revisionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{revisionName}\", url.PathEscape(revisionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *Client) getCreateRequest(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, resourceType string, resourceName string, changeResourceID string, options *ClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Resources/changes/{changeResourceId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif resourceProviderNamespace == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceProviderNamespace cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceProviderNamespace}\", url.PathEscape(resourceProviderNamespace))\n\tif resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(resourceType))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\tif changeResourceID == \"\" {\n\t\treturn nil, errors.New(\"parameter changeResourceID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{changeResourceId}\", url.PathEscape(changeResourceID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (k Keeper) GetRequest(ctx sdk.Context, id uint64) (val types.Request, found bool) {\n\tstores := k.GetStoreRequestMap(ctx)\n\tfor _, store := range stores {\n\t\tb := store.Get(GetRequestIDBytes(id))\n\t\tif b == nil {\n\t\t\tcontinue\n\t\t}\n\t\tk.cdc.MustUnmarshal(b, &val)\n\t\tfound = true\n\t\tbreak\n\t}\n\treturn\n}", "func makeAuthRequest(apiKey string) string {\n\n\tclient := &http.Client{}\n\treq, _ := http.NewRequest(\"GET\", \"https://rep.checkpoint.com/rep-auth/service/v1.0/request\", nil)\n\treq.Header.Set(\"Client-Key\", apiKey)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\t// log.Println(string(body))\n\treturn string(body)\n}", "func (r *ApprovalWorkflowProviderRequestsCollectionRequest) Get(ctx context.Context) ([]RequestObject, error) {\n\treturn r.GetN(ctx, 0)\n}", "func (client *CassandraClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *CassandraClustersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (a *RequestServiceApiService) GetRequest(ctx _context.Context, uuid string) ApiGetRequestRequest {\n\treturn ApiGetRequestRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tuuid: uuid,\n\t}\n}", "func (_BaseLibrary *BaseLibraryCaller) GetPendingApprovalRequest(opts *bind.CallOpts, index *big.Int) (common.Address, error) {\n\tvar out []interface{}\n\terr := _BaseLibrary.contract.Call(opts, &out, \"getPendingApprovalRequest\", index)\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func (v GithubVCS) getPullRequest(ctx context.Context, runinfo RunInfo, prNumber int) (RunInfo, error) {\n\tpr, _, err := v.Client.PullRequests.Get(ctx, runinfo.Owner, runinfo.Repository, prNumber)\n\tif err != nil {\n\t\treturn runinfo, err\n\t}\n\t// Make sure to use the Base for Default BaseBranch or there would be a potential hijack\n\truninfo.DefaultBranch = pr.GetBase().GetRepo().GetDefaultBranch()\n\truninfo.URL = pr.GetBase().GetRepo().GetHTMLURL()\n\truninfo.SHA = pr.GetHead().GetSHA()\n\truninfo.SHAURL = fmt.Sprintf(\"%s/commit/%s\", pr.GetHTMLURL(), pr.GetHead().GetSHA())\n\t// TODO: Maybe if we wanted to allow rerequest from non approved user we\n\t// would use the CheckRun Sender instead of the rerequest sender, could it\n\t// be a room for abuse? 🤔\n\truninfo.Sender = pr.GetUser().GetLogin()\n\truninfo.HeadBranch = pr.GetHead().GetRef()\n\truninfo.BaseBranch = pr.GetBase().GetRef()\n\truninfo.EventType = \"pull_request\"\n\treturn runinfo, nil\n}", "func (c *CoinbaseAPIKeyAuth) makeRequest(req *http.Request) ([]byte, error) {\n\treq.Header.Set(\"CB-ACCESS-KEY\", c.APIKey)\n\treq.Header.Set(\"CB-VERSION\", time.Now().Format(\"20060102\"))\n\treq.Header.Set(\"CB-ACCESS-TIMESTAMP\", \"2017-03-27\")\n\tbv := []byte(time.Now().Format(\"20060102\") + \"GET\" + req.URL.String())\n\thasher := sha256.New()\n\thasher.Write(bv)\n\tsha := base64.URLEncoding.EncodeToString(hasher.Sum(nil))\n\treq.Header.Set(\"CB-ACCESS-SIGN\", sha)\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfmt.Println(req)\n\n\t// Make sure we close the body stream no matter what\n\tdefer resp.Body.Close()\n\n\t// Read body\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//fmt.Println(string(body))\n\n\t// Check status code\n\tif resp.StatusCode != 200 {\n\t\tfmt.Println(string(body))\n\t\treturn nil, fmt.Errorf(\"Invalid HTTP response code: %d\", resp.StatusCode)\n\t}\n\n\t// Return\n\treturn body, nil\n}", "func (r *PullsListCommitsReq) HTTPRequest(ctx context.Context, opt ...RequestOption) (*http.Request, error) {\n\treturn buildHTTPRequest(ctx, r, opt)\n}", "func (*GetTransactionByEventIdRequest) Descriptor() ([]byte, []int) {\n\treturn file_com_daml_ledger_api_v1_transaction_service_proto_rawDescGZIP(), []int{3}\n}", "func CreateGetKeywordChEcomRequest() (request *GetKeywordChEcomRequest) {\n\trequest = &GetKeywordChEcomRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetKeywordChEcom\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func GetTracerRequest(tracerID uint) ([]byte, error) {\n\tlog.Trace.Printf(\"Getting request for the given tracer ID.\")\n\tvar ret []byte\n\tvar err error\n\n\tvar request types.Request\n\tif err = store.DB.First(&request).Error; err == nil {\n\t\tlog.Trace.Printf(\"Successfully got the request: %+v\", request)\n\t\tret, err = json.Marshal(request)\n\t}\n\n\tif err != nil {\n\t\tlog.Warning.Printf(err.Error())\n\t}\n\n\treturn ret, err\n}", "func (client *PipelinesClient) getCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, pipelineName string, options *PipelinesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelines/{pipelineName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\tif pipelineName == \"\" {\n\t\treturn nil, errors.New(\"parameter pipelineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{pipelineName}\", url.PathEscape(pipelineName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfNoneMatch != nil {\n\t\treq.Raw().Header[\"If-None-Match\"] = []string{*options.IfNoneMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (pr ParallelRequests) GetRequest(t *testing.T, subject string) *Request {\n\tfor _, r := range pr {\n\t\tif r.Subject == subject {\n\t\t\treturn r\n\t\t}\n\t}\n\n\tt.Fatalf(\"expected parallel requests to contain subject %#v, but found none\", subject)\n\treturn nil\n}", "func (client *LocalRulestacksClient) getChangeLogCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientGetChangeLogOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/getChangeLog\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif localRulestackName == \"\" {\n\t\treturn nil, errors.New(\"parameter localRulestackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{localRulestackName}\", url.PathEscape(localRulestackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func getCrtcInfoRequest(c *xgb.Conn, Crtc Crtc, ConfigTimestamp xproto.Timestamp) []byte {\n\tsize := 12\n\tb := 0\n\tbuf := make([]byte, size)\n\n\tc.ExtLock.RLock()\n\tbuf[b] = c.Extensions[\"RANDR\"]\n\tc.ExtLock.RUnlock()\n\tb += 1\n\n\tbuf[b] = 20 // request opcode\n\tb += 1\n\n\txgb.Put16(buf[b:], uint16(size/4)) // write request size in 4-byte units\n\tb += 2\n\n\txgb.Put32(buf[b:], uint32(Crtc))\n\tb += 4\n\n\txgb.Put32(buf[b:], uint32(ConfigTimestamp))\n\tb += 4\n\n\treturn buf\n}", "func (oc *OpenshiftClient) GetRequest(path string) (*http.Response, error) {\n\treq, err := http.NewRequest(\"GET\", fmt.Sprintf(\"https://%s%s\", oc.MasterUrl, path), nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error occurred while creating new http request : %w\", err)\n\t}\n\tresp, err := oc.HTTPClient.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error occurred while performing http request : %w\", err)\n\t}\n\treturn resp, nil\n}", "func (*GetCommitRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{17}\n}", "func (t *SimpleChaincode) getRequest(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\n\tif len(args) != 2 {\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 2, \\\"RequestID\\\" : \\\"xxxxxx\\\"\")\n\t}\n\n\tif args[0] != \"RequestID\" {\n\t\treturn nil, errors.New(\"Unsupoprted query arguments [\" + args[0] + \"]\")\n\t}\n\n\tfmt.Println(\"Start to query a Request ...\")\n\n\trequestID := args[1]\n\n\tvar columns []shim.Column\n\tkeyCol1 := shim.Column{Value: &shim.Column_String_{String_: requestID}}\n\n\tcolumns = append(columns, keyCol1)\n\n\trow, err := stub.GetRow(\"Request\", columns)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Query one request (RequestID = %s) in the table Request failed\", requestID)\n\t}\n\n\tif len(row.Columns) == 0 {\n\t\treturn nil, errors.New(\"Request was NOT found\")\n\t}\n\n\tfmt.Println(\"Query one request (RequestID = \" + requestID + \") in the table Request successfully...\")\n\n\t// Convert to the structure Request, the returns would be key1:value1,key2:value2,key3:value3, ...\n\trequest := &Request{\n\t\trow.Columns[0].GetString_(),\n\t\trow.Columns[1].GetString_(),\n\t\trow.Columns[2].GetString_(),\n\t\trow.Columns[3].GetString_(),\n\t\trow.Columns[4].GetString_(),\n\t\trow.Columns[5].GetString_(),\n\t\trow.Columns[6].GetString_(),\n\t\trow.Columns[7].GetString_(),\n\t\trow.Columns[8].GetString_(),\n\t\trow.Columns[9].GetString_(),\n\t\trow.Columns[10].GetString_(),\n\t\trow.Columns[11].GetString_(),\n\t\trow.Columns[12].GetString_(),\n\t}\n\n\treturnRequest, err := json.Marshal(request)\n\n\tif err != nil {\n\t\treturn nil, errors.New(\"getRequest() json marshal error\")\n\t}\n\n\tfmt.Println(\"End to query a Request ...\")\n\n\treturn returnRequest, nil\n}", "func (*GetEmailRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{1}\n}", "func makeRequest(changes []store.Change) *store.ChangeRequest {\n\treq := new(store.ChangeRequest)\n\treq.RequestEntity = uint64(config.Id())\n\treq.RequestNode = config.Id()\n\treq.RequestId = store.AllocateRequestId()\n\treq.Changeset = changes\n\n\treturn req\n}", "func (store *Engine) Request(requestID string) (request *Request, err error) {\n\trequest = new(Request)\n\n\t_, err = store.api.\n\t\tURL(\"/workflow-engine/api/v1/requests/%s\", url.PathEscape(requestID)).\n\t\tGet(request)\n\n\treturn request, err\n}", "func (*GetUTXORequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{10}\n}", "func (_IOrakuruCore *IOrakuruCoreTransactor) MakeRequest(opts *bind.TransactOpts, _dataSource string, _selector string, _calldataAddr common.Address, _aggrType uint8, _precision uint8, _executionTimestamp *big.Int) (*types.Transaction, error) {\n\treturn _IOrakuruCore.contract.Transact(opts, \"makeRequest\", _dataSource, _selector, _calldataAddr, _aggrType, _precision, _executionTimestamp)\n}", "func (c *Client) NewShowContractRequest(ctx context.Context, path string) (*http.Request, error) {\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"http\"\n\t}\n\tu := url.URL{Host: c.Host, Scheme: scheme, Path: path}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (c Client) request(method string, u *url.URL, body io.Reader) (io.ReadCloser, error) {\n\treq, err := http.NewRequest(method, u.String(), body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating request failed: %s\", err.Error())\n\t}\n\treq.Header.Add(tokenHeader, c.token)\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"request failed: %s\", err.Error())\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"status code: %d, message: %s\", resp.StatusCode, decodeError(resp.Body))\n\t}\n\n\treturn resp.Body, nil\n}", "func (client *VirtualMachineScaleSetRollingUpgradesClient) getLatestCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesGetLatestOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client IdentityClient) getTaggingWorkRequest(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/taggingWorkRequests/{workRequestId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetTaggingWorkRequestResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func GetRequest(url string, endpoint string) (int, error) {\n\taddr := fmt.Sprintf(\"%s/%s/\", url, endpoint)\n\tr, err := http.NewRequest(\"GET\", addr, bytes.NewBuffer([]byte{}))\n\n\tif err != nil {\n\t\tlog.Println(\"Error creating collection request, error:\", err)\n\t\treturn 0, err\n\t}\n\n\tclient := &http.Client{\n\t\tTimeout: 5 * time.Second,\n\t}\n\tresp, err := client.Do(r)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn resp.StatusCode, err\n\n}", "func CreateGetIndustryCommerceInfoRequest() (request *GetIndustryCommerceInfoRequest) {\n\trequest = &GetIndustryCommerceInfoRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"companyreg\", \"2020-10-22\", \"GetIndustryCommerceInfo\", \"companyreg\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (client *Client) getChatCompletionsCreateRequest(ctx context.Context, body ChatCompletionsOptions, options *GetChatCompletionsOptions) (*policy.Request, error) {\n\turlPath := \"chat/completions\"\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, client.formatURL(urlPath, getDeploymentID(body)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, body); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func SendRequest(workflowID string) (string, error) {\n\taccessKey, clusterID, serverAddr, err := getAgentConfigMapData()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tpayload := `{\"query\": \"mutation { gitopsNotifer(clusterInfo: { cluster_id: \\\"` + clusterID + `\\\", access_key: \\\"` + accessKey + `\\\"}, workflow_id: \\\"` + workflowID + `\\\")\\n}\"}`\n\treq, err := http.NewRequest(\"POST\", serverAddr, bytes.NewBuffer([]byte(payload)))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn \"URL is not reachable or Bad request\", nil\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(body), nil\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_request_proto_rawDescGZIP(), []int{0}\n}", "func handleGetRequest(key string, s *Sailor, st *storage.State) (string, error) {\n\tgt := storage.GenerateTransaction(storage.GetOp, key, \"\")\n\treturn st.ApplyTransaction(gt)\n}", "func getProviderInfoRequest(c *xgb.Conn, Provider Provider, ConfigTimestamp xproto.Timestamp) []byte {\n\tsize := 12\n\tb := 0\n\tbuf := make([]byte, size)\n\n\tc.ExtLock.RLock()\n\tbuf[b] = c.Extensions[\"RANDR\"]\n\tc.ExtLock.RUnlock()\n\tb += 1\n\n\tbuf[b] = 33 // request opcode\n\tb += 1\n\n\txgb.Put16(buf[b:], uint16(size/4)) // write request size in 4-byte units\n\tb += 2\n\n\txgb.Put32(buf[b:], uint32(Provider))\n\tb += 4\n\n\txgb.Put32(buf[b:], uint32(ConfigTimestamp))\n\tb += 4\n\n\treturn buf\n}", "func (session *BobSession) GetRequest(requestFile string) error {\n\tif err := utils.CheckDirOfPathExistence(requestFile); err != nil {\n\t\treturn err\n\t}\n\n\thandle := C.handle_t(session.handle)\n\n\trequestFileCStr := C.CString(requestFile)\n\tdefer C.free(unsafe.Pointer(requestFileCStr))\n\n\tif ret := bool(\n\t\tC.E_TableOtComplaintBobGetRequest(handle, requestFileCStr)); !ret {\n\t\treturn fmt.Errorf(\"E_TableOtComplaintBobGetRequest(%v, %s) failed\",\n\t\t\thandle, requestFile)\n\t}\n\n\treturn nil\n}", "func CreateDescribeExplorerRequest() (request *DescribeExplorerRequest) {\n\trequest = &DescribeExplorerRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Baas\", \"2018-07-31\", \"DescribeExplorer\", \"\", \"\")\n\treturn\n}", "func getCommitID(r *http.Request) (vcs.CommitID, bool, error) {\n\treturn checkCommitID(mux.Vars(r)[\"CommitID\"])\n}", "func CreateGetOpenNLURequest() (request *GetOpenNLURequest) {\n\trequest = &GetOpenNLURequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetOpenNLU\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (*GetRepositoryCommitByReferenceRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_repository_commit_proto_rawDescGZIP(), []int{5}\n}", "func (client *Client) getCompletionsCreateRequest(ctx context.Context, body CompletionsOptions, options *GetCompletionsOptions) (*policy.Request, error) {\n\turlPath := \"completions\"\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, client.formatURL(urlPath, getDeploymentID(body)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, body); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func revisionFromRequest(recent types.FileContractRevision, pbcr modules.PayByContractRequest) types.FileContractRevision {\n\trev := recent\n\n\trev.NewRevisionNumber = pbcr.NewRevisionNumber\n\trev.NewValidProofOutputs = make([]types.SiacoinOutput, len(pbcr.NewValidProofValues))\n\tfor i, v := range pbcr.NewValidProofValues {\n\t\tif i >= len(recent.NewValidProofOutputs) {\n\t\t\tbreak\n\t\t}\n\t\trev.NewValidProofOutputs[i] = types.SiacoinOutput{\n\t\t\tValue: v,\n\t\t\tUnlockHash: recent.NewValidProofOutputs[i].UnlockHash,\n\t\t}\n\t}\n\n\trev.NewMissedProofOutputs = make([]types.SiacoinOutput, len(pbcr.NewMissedProofValues))\n\tfor i, v := range pbcr.NewMissedProofValues {\n\t\tif i >= len(recent.NewMissedProofOutputs) {\n\t\t\tbreak\n\t\t}\n\t\trev.NewMissedProofOutputs[i] = types.SiacoinOutput{\n\t\t\tValue: v,\n\t\t\tUnlockHash: recent.NewMissedProofOutputs[i].UnlockHash,\n\t\t}\n\t}\n\n\treturn rev\n}", "func (_BaseLibrary *BaseLibraryTransactor) AccessRequest(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BaseLibrary.contract.Transact(opts, \"accessRequest\")\n}", "func (*ERC20WithdrawalApprovalRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_trading_proto_rawDescGZIP(), []int{134}\n}", "func (rt *rtuTransport) ReadRequest() (req *pdu, err error) {\n\t// reading requests from RTU links is currently unsupported\n\terr\t= fmt.Errorf(\"unimplemented\")\n\n\treturn\n}", "func ValidateGetRequest(request *GetHistoryRequest) error {\n\tif request.NamespaceID == \"\" {\n\t\treturn errEmptyNamespaceID\n\t}\n\tif request.WorkflowID == \"\" {\n\t\treturn errEmptyWorkflowID\n\t}\n\tif request.RunID == \"\" {\n\t\treturn errEmptyRunID\n\t}\n\tif request.PageSize == 0 {\n\t\treturn errInvalidPageSize\n\t}\n\treturn nil\n}", "func (r *tee) Request(fc filters.FilterContext) {\n\treq := fc.Request()\n\tcopyOfRequest, tr, err := cloneRequest(r, req)\n\tif err != nil {\n\t\tfc.Logger().Warnf(\"tee: error while cloning the tee request %v\", err)\n\t\treturn\n\t}\n\n\treq.Body = tr\n\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif r.shadowRequestDone != nil {\n\t\t\t\tr.shadowRequestDone()\n\t\t\t}\n\t\t}()\n\n\t\trsp, err := r.client.Do(copyOfRequest)\n\t\tif err != nil {\n\t\t\tfc.Logger().Warnf(\"tee: error while tee request %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\trsp.Body.Close()\n\t}()\n}", "func (store *Engine) CreateRequest(request *Request) (string, error) {\n\tvar id struct {\n\t\tID string `json:\"id\"`\n\t}\n\n\t_, err := store.api.\n\t\tURL(\"/workflow-engine/api/v1/requests\").\n\t\tPost(&request, &id)\n\n\treturn id.ID, err\n}", "func (client *RoleDefinitionsClient) getByIDCreateRequest(ctx context.Context, roleID string, options *RoleDefinitionsGetByIDOptions) (*policy.Request, error) {\n\turlPath := \"/{roleId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{roleId}\", roleID)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (_e *MockPlcWriteResponse_Expecter) GetRequest() *MockPlcWriteResponse_GetRequest_Call {\n\treturn &MockPlcWriteResponse_GetRequest_Call{Call: _e.mock.On(\"GetRequest\")}\n}", "func (p *Printer) ReadRequest(ctx context.Context) ([]byte, error) {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil, errors.Wrap(ctx.Err(), \"ReadRequest timed out\")\n\tcase v, ok := <-p.ch:\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"p.ch is unexpectedly closed\")\n\t\t}\n\t\treturn v, nil\n\t}\n}", "func (dc DefaultContainer) GetRequest() *http.Request { return dc.Request }", "func (c *APIGateway) GetStagesRequest(input *GetStagesInput) (req *request.Request, output *GetStagesOutput) {\n\top := &request.Operation{\n\t\tName: opGetStages,\n\t\tHTTPMethod: \"GET\",\n\t\tHTTPPath: \"/restapis/{restapi_id}/stages\",\n\t}\n\n\tif input == nil {\n\t\tinput = &GetStagesInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &GetStagesOutput{}\n\treq.Data = output\n\treturn\n}", "func (r *ApprovalWorkflowProviderRequestsAwaitingMyDecisionCollectionRequest) Get(ctx context.Context) ([]RequestObject, error) {\n\treturn r.GetN(ctx, 0)\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_exercicio_proto_rawDescGZIP(), []int{3}\n}", "func (client *ClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/clusters/{clusterName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (_IOrakuruCore *IOrakuruCoreTransactorSession) MakeRequest(_dataSource string, _selector string, _calldataAddr common.Address, _aggrType uint8, _precision uint8, _executionTimestamp *big.Int) (*types.Transaction, error) {\n\treturn _IOrakuruCore.Contract.MakeRequest(&_IOrakuruCore.TransactOpts, _dataSource, _selector, _calldataAddr, _aggrType, _precision, _executionTimestamp)\n}", "func (client *IncidentsClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, incidentID string, options *IncidentsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif incidentID == \"\" {\n\t\treturn nil, errors.New(\"parameter incidentID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{incidentId}\", url.PathEscape(incidentID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-05-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (*GetRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_task_v1_task_proto_rawDescGZIP(), []int{3}\n}", "func (*GetProvisioningApprovalRequestRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha_provisioning_approval_request_service_proto_rawDescGZIP(), []int{0}\n}", "func (r *vtmClient) buildAPIRequest(method, uri string, reader io.Reader) (request *http.Request, err error) {\n\t// Create the endpoint URL\n\turl := fmt.Sprintf(\"%s/%s\", r.config.URL, uri)\n\n\t// Make the http request to VTM\n\trequest, err = http.NewRequest(method, url, reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Add any basic auth and the content headers\n\tif r.config.HTTPBasicAuthUser != \"\" && r.config.HTTPBasicPassword != \"\" {\n\t\trequest.SetBasicAuth(r.config.HTTPBasicAuthUser, r.config.HTTPBasicPassword)\n\t}\n\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\trequest.Header.Add(\"Accept\", \"application/json\")\n\n\treturn request, nil\n}" ]
[ "0.6219865", "0.6180947", "0.60726804", "0.57100993", "0.56391525", "0.55845666", "0.55674565", "0.55537844", "0.5511908", "0.5506123", "0.542247", "0.54088473", "0.5395237", "0.5330503", "0.53254104", "0.5315587", "0.52814925", "0.52538687", "0.524927", "0.524927", "0.521511", "0.5209663", "0.5195773", "0.518548", "0.5167153", "0.51520675", "0.514349", "0.51173383", "0.5104785", "0.5100844", "0.50975066", "0.50900525", "0.50721115", "0.50705534", "0.5060392", "0.504271", "0.50316775", "0.50188774", "0.5017002", "0.5013452", "0.5002642", "0.49971446", "0.49958268", "0.49917328", "0.49859953", "0.4982854", "0.49782956", "0.49763", "0.49692646", "0.49610773", "0.495777", "0.4957689", "0.49342757", "0.49311796", "0.49296695", "0.4922861", "0.492223", "0.49128973", "0.49116707", "0.49086472", "0.49038845", "0.4902123", "0.4898198", "0.4855922", "0.48557776", "0.48551196", "0.48514584", "0.48511663", "0.48506176", "0.48493704", "0.4846399", "0.48432586", "0.48403645", "0.48401505", "0.48357776", "0.48319831", "0.48259532", "0.4823683", "0.48197883", "0.48179218", "0.48143408", "0.4813552", "0.48045933", "0.4804029", "0.4801078", "0.47995347", "0.4795707", "0.4795607", "0.4794961", "0.47939512", "0.47927257", "0.47924608", "0.47890735", "0.4787683", "0.47876093", "0.47867587", "0.478554", "0.47833878", "0.47809857", "0.4780299" ]
0.8301529
0
GetCommitteeRequestVotes api request get_committee_request_votes
func (api *API) GetCommitteeRequestVotes(id uint32) ([]*CommitteeVoteState, error) { var resp []*CommitteeVoteState err := api.call("committee_api", "get_committee_request_votes", []interface{}{id}, &resp) return resp, err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (n *Node) requestVotes(currTerm uint64) (fallback, electionResult bool) {\n\t// TODO: Students should implement this method\n\treturn\n}", "func (api *API) GetCommitteeRequest(id uint32, count int32) (*CommitteeObject, error) {\n\tvar resp CommitteeObject\n\terr := api.call(\"committee_api\", \"get_committee_request\", []interface{}{id, count}, &resp)\n\treturn &resp, err\n}", "func (r *Node) requestVotes(electionResults chan bool, fallbackChan chan bool, currTerm uint64) {\n\t// Votes received\n\tremaining := 0\n\tresultChan := make(chan RequestVoteResult)\n\tfor _, peer := range r.Peers {\n\t\tif r.Self.GetId() == peer.GetId() {\n\t\t\tcontinue\n\t\t}\n\t\tmsg := rpc.RequestVoteRequest{\n\t\t\tTerm: currTerm,\n\t\t\tCandidate: r.Self,\n\t\t\tLastLogIndex: r.LastLogIndex(),\n\t\t\tLastLogTerm: r.GetLog(r.LastLogIndex()).GetTermId(),\n\t\t}\n\t\tremaining++\n\t\tgo r.requestPeerVote(peer, &msg, resultChan)\n\t}\n\n\tvote := 1\n\treject := 0\n\tmajority := r.config.ClusterSize/2 + 1\n\tif vote >= majority {\n\t\telectionResults <- true\n\t\treturn\n\t}\n\tfor remaining > 0 {\n\t\trequestVoteResult := <-resultChan\n\t\tremaining--\n\t\tif requestVoteResult == RequestVoteFallback {\n\t\t\tfallbackChan <- true\n\t\t\treturn\n\t\t}\n\t\tif requestVoteResult == RequestVoteSuccess {\n\t\t\tvote++\n\t\t\tif vote >= majority {\n\t\t\t\telectionResults <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\treject++\n\t\t\tif reject >= majority {\n\t\t\t\telectionResults <- false\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *RaftServer) RequestVote(_ context.Context, request *raftapi.RequestVoteMessage) (*raftapi.RequestVoteResponse, error) {\n\tlog.WithFields(s.LogFields()).Debugln(\"Received RequestVote\")\n\ts.lastHeartbeat = time.Now()\n\tterm := s.getTerm()\n\tif votedFor, has := s.votedOn[term]; !has || votedFor == request.Candidate {\n\t\tlogSize, _ := s.logRepo.LogSize()\n\t\tif term < request.Term || (term == request.Term && logSize < request.LogSize) {\n\t\t\ts.votedOn[term] = request.Candidate\n\t\t\treturn &raftapi.RequestVoteResponse{\n\t\t\t\tTerm: term,\n\t\t\t\tApproved: true,\n\t\t\t}, nil\n\t\t}\n\t}\n\treturn &raftapi.RequestVoteResponse{Term: term}, nil\n}", "func (r *Raft) serviceRequestVote(request RequestVote, state int) {\n\t//fmt.Println(\"In service RV method of \", r.Myconfig.Id)\n\tresponse := RequestVoteResponse{}\n\tcandidateId := request.CandidateId\n\tresponse.Id = r.Myconfig.Id\n\tif r.isDeservingCandidate(request) {\n\t\tresponse.VoteGranted = true\n\t\tr.myCV.VotedFor = candidateId\n\t\tr.myCV.CurrentTerm = request.Term\n\t} else {\n\t\tif request.Term > r.myCV.CurrentTerm {\n\t\t\tr.myCV.CurrentTerm = request.Term\n\t\t\tr.myCV.VotedFor = -1\n\t\t}\n\t\tresponse.VoteGranted = false\n\t}\n\tif request.Term > r.myCV.CurrentTerm {\n\t\tr.WriteCVToDisk()\n\t}\n\tresponse.Term = r.myCV.CurrentTerm\n\tr.send(candidateId, response) //send to sender using send(sender,response)\n}", "func (node *Node) ReceiveRequestVote(ctx context.Context, buffer []byte) (candidateAddress string, request RequestVote, err error) {\n candidateAddress, err = node.receive(ctx, buffer, &request)\n return\n}", "func (m *Member) RequestVote(ctx context.Context, leader string, term uint64, logSize uint64) (*raftapi.RequestVoteResponse, error) {\n\tlog.WithFields(log.Fields{\"member_name\": m.Name}).Debugln(\"Requesting vote from\")\n\tvar conn *grpc.ClientConn\n\tconn, err := grpc.Dial(m.Address(), grpc.WithInsecure())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\tapi := raftapi.NewRaftServiceClient(conn)\n\tresponse, err := api.RequestVote(ctx, &raftapi.RequestVoteMessage{\n\t\tTerm: term,\n\t\tCandidate: leader,\n\t\tLogSize: logSize,\n\t\tLastLogTerm: 0,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}", "func (r *Raft) serviceRequestVote(request RequestVote) {\n\t//fmt.Println(\"In service RV method of \", r.Myconfig.Id)\n\tresponse := RequestVoteResponse{} //prep response object,for responding back to requester\n\tcandidateId := request.candidateId\n\tresponse.id = r.Myconfig.Id\n\t//fmt.Println(\"Follower\", r.Myconfig.Id, \"log as complete?\", r.logAsGoodAsMine(request))\n\tif r.isDeservingCandidate(request) {\n\t\tresponse.voteGranted = true\n\t\tr.votedFor = candidateId\n\t\tr.currentTerm = request.term\n\n\t\t//Writing current term and voteFor to disk\n\t\tr.WriteCVToDisk()\n\n\t} else {\n\t\tresponse.voteGranted = false\n\t}\n\tresponse.term = r.currentTerm //to return self's term too\n\n\t//fmt.Println(\"Follower\", r.Myconfig.Id, \"voting\", response.voteGranted) //\"because votefor is\", r.votedFor, \"my and request terms are:\", r.currentTerm, request.term)\n\t//fmt.Println(\"Follower\", r.Myconfig.Id, \"Current term,request.term is\", r.currentTerm, request.term, \"Self lastLogIndex is\", r.myMetaData.lastLogIndex, \"VotedFor,request.lastLogTerm\", r.votedFor, request.lastLogTerm)\n\t//fmt.Println(\"VotedFor,request.lastLogTerm\", r.votedFor, request.lastLogTerm)\n\n\t//fmt.Printf(\"In serviceRV of %v, obj prep is %v \\n\", r.Myconfig.Id, response)\n\tsend(candidateId, response) //send to sender using send(sender,response)\n}", "func (q queryServer) Votes(ctx context.Context, req *v1.QueryVotesRequest) (*v1.QueryVotesResponse, error) {\n\tif req == nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"invalid request\")\n\t}\n\n\tif req.ProposalId == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"proposal id can not be 0\")\n\t}\n\n\tvotes, pageRes, err := query.CollectionPaginate(ctx, q.k.Votes, req.Pagination, func(_ collections.Pair[uint64, sdk.AccAddress], value v1.Vote) (vote *v1.Vote, err error) {\n\t\treturn &value, nil\n\t}, query.WithCollectionPaginationPairPrefix[uint64, sdk.AccAddress](req.ProposalId))\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\treturn &v1.QueryVotesResponse{Votes: votes, Pagination: pageRes}, nil\n}", "func (api *API) GetCommitteeRequestsList(status uint16) ([]*uint16, error) {\n\tvar resp []*uint16\n\terr := api.call(\"committee_api\", \"get_committee_requests_list\", []interface{}{status}, &resp)\n\treturn resp, err\n}", "func parseVoteRequest(r *http.Request) (electionID string, ballotID string, err error) {\n\t// Parse URL and route\n\turlparts := strings.Split(r.RequestURI, \"/\")\n\n\t// Check for the correct number of request parts\n\tif len(urlparts) < 3 || len(urlparts) > 4 {\n\t\terr = parseError{\"Invalid number of url parts. 404 Not Found.\", http.StatusNotFound}\n\t\treturn\n\t}\n\n\t// Get the electionID\n\telectionID = urlparts[2]\n\tif len(electionID) > MaxElectionIDSize || !ValidElectionID.MatchString(electionID) {\n\t\terr = parseError{\"Invalid Election ID. 404 Not Found.\", http.StatusNotFound}\n\t\treturn\n\t}\n\n\t// If we are only length 3, that's it, we are asking for a full report / ballot roll for an election\n\tif len(urlparts) == 3 || urlparts[3] == \"\" {\n\t\treturn\n\t}\n\n\t// Get the ballotID (hex encoded SHA512 of base64 encoded public-key)\n\tballotID = urlparts[3]\n\tif len(ballotID) > MaxBallotIDSize || !ValidBallotID.MatchString(ballotID) {\n\t\terr = parseError{\"Invalid Ballot ID. 404 Not Found.\", http.StatusNotFound}\n\t}\n\n\t// If the user has provided a signature of the request in the headers, verify it\n\tif r.Header.Get(\"X-Voteflow-Signature\") != \"\" {\n\t\t// Verify the signature headers, do a cryptographic check to make sure the header and Method / URL request is signed\n\t\tif suberr := verifySignatureHeaders(r); suberr != nil {\n\t\t\terr = parseError{suberr.Error(), http.StatusBadRequest}\n\t\t\treturn\n\t\t}\n\t}\n\n\t// All checks pass\n\treturn\n}", "func (*ObserveProposalVotesRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_trading_proto_rawDescGZIP(), []int{43}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\n\trf.mu.Lock()\n\trf.debug(\"***************Inside the RPC handler for sendRequestVote *********************\")\n\tdefer rf.mu.Unlock()\n\tvar lastIndex int\n\t//var lastTerm int\n\tif len(rf.log) > 0 {\n\t\tlastLogEntry := rf.log[len(rf.log)-1]\n\t\tlastIndex = lastLogEntry.LastLogIndex\n\t\t//lastTerm = lastLogEntry.lastLogTerm\n\t}else{\n\t\tlastIndex = 0\n\t\t//lastTerm = 0\n\t}\n\treply.Term = rf.currentTerm\n\t//rf.debug()\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\trf.debug(\"My term is higher than candidate's term, myTerm = %d, candidate's term = %d\", rf.currentTerm,args.Term )\n\t} else if (rf.votedFor == -1 || rf.votedFor == args.CandidateId) && args.LastLogIndex >= lastIndex {\n\t\trf.votedFor = args.CandidateId\n\t\treply.VoteGranted = true\n\t\trf.currentTerm = args.Term\n\t\trf.resetElectionTimer()\n\t\t//rf.debug(\"I am setting my currentTerm to -->\",args.Term,\"I am \",rf.me)\n\t}\n}", "func queryVotesOnProposalHandlerFn(cdc *wire.Codec) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := mux.Vars(r)\n\t\tstrProposalID := vars[RestProposalID]\n\n\t\tif len(strProposalID) == 0 {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\terr := errors.New(\"proposalId required but not specified\")\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn\n\t\t}\n\n\t\tproposalID, err := strconv.ParseInt(strProposalID, 10, 64)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\terr := errors.Errorf(\"proposalID [%s] is not positive\", proposalID)\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn\n\t\t}\n\n\t\tctx := context.NewCoreContextFromViper()\n\n\t\tres, err := ctx.QueryStore(gov.KeyProposal(proposalID), storeName)\n\t\tif err != nil || len(res) == 0 {\n\t\t\terr := errors.Errorf(\"proposalID [%d] does not exist\", proposalID)\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn\n\t\t}\n\n\t\tvar proposal gov.Proposal\n\t\tcdc.MustUnmarshalBinary(res, &proposal)\n\n\t\tif proposal.GetStatus() != gov.StatusVotingPeriod {\n\t\t\terr := errors.Errorf(\"proposal is not in Voting Period\", proposalID)\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn\n\t\t}\n\n\t\tres2, err := ctx.QuerySubspace(cdc, gov.KeyVotesSubspace(proposalID), storeName)\n\t\tif err != nil {\n\t\t\terr = errors.New(\"ProposalID doesn't exist\")\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn\n\t\t}\n\n\t\tvar votes []gov.Vote\n\n\t\tfor i := 0; i < len(res2); i++ {\n\t\t\tvar vote gov.Vote\n\t\t\tcdc.MustUnmarshalBinary(res2[i].Value, &vote)\n\t\t\tvotes = append(votes, vote)\n\t\t}\n\n\t\toutput, err := wire.MarshalJSONIndent(cdc, votes)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn\n\t\t}\n\t\tw.Write(output)\n\t}\n}", "func (t transporter) SendVoteRequest(server *raft.Server, peer *raft.Peer, req *raft.RequestVoteRequest) *raft.RequestVoteResponse {\n\tvar rvrsp *raft.RequestVoteResponse\n\tvar b bytes.Buffer\n\tjson.NewEncoder(&b).Encode(req)\n\n\tdebug(\"Send Vote to %s\", peer.Name())\n\n\tresp, err := t.Post(fmt.Sprintf(\"%s/vote\", peer.Name()), &b)\n\n\tif err != nil {\n\t\tdebug(\"Cannot send VoteRequest to %s : %s\", peer.Name(), err)\n\t}\n\n\tif resp != nil {\n\t\tdefer resp.Body.Close()\n\t\trvrsp := &raft.RequestVoteResponse{}\n\t\tif err := json.NewDecoder(resp.Body).Decode(&rvrsp); err == nil || err == io.EOF {\n\t\t\treturn rvrsp\n\t\t}\n\n\t}\n\treturn rvrsp\n}", "func (*GetVotesByPartyRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_trading_proto_rawDescGZIP(), []int{25}\n}", "func (rf *Raft) requestVote(req *opRequest) {\n\targs := req.args.(*pb.RequestVoteRequest)\n\treply := req.reply.(*pb.RequestVoteReply)\n\n\tif args.Term < rf.term {\n\t\treply.Term = rf.term\n\t\treply.Reject = true\n\t\treq.errorCh <- nil\n\t\treturn\n\t}\n\n\tif args.Term > rf.term {\n\t\trf.term = args.Term\n\t\trf.voteFor = 0\n\t}\n\tcanVote := rf.voteFor == args.Id ||\n\t\t(rf.voteFor == 0 && rf.raftLog.IsUptoDate(args.LastIndex, args.LastTerm))\n\tif canVote {\n\t\trf.voteFor = args.Id\n\t\trf.electionTimeoutCounter = 0\n\t\trf.becomeFollower(args.Term, 0)\n\t} else {\n\t\treply.Reject = true\n\t}\n\treply.Term = rf.term\n\treq.errorCh <- nil\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.Lock()\n\tdefer rf.Unlock()\n\tRaftInfo(\"Get Request from %s\", rf, args.CandidateId)\n\tlastIndex, lastTerm := rf.getLastEntryInfo()\n\tisLogUpToDate := func() bool {\n\t\tif lastTerm == args.LastLogTerm {\n\t\t\treturn lastIndex <= args.LastLogIndex\n\t\t} else {\n\t\t\treturn lastTerm < args.LastLogTerm\n\t\t}\n\t}()\n\n\treply.Term = rf.currentTerm\n\treply.Id = rf.id\n\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t} else if args.Term >= rf.currentTerm && isLogUpToDate {\n\t\trf.transitionToFollower(args.Term)\n\t\trf.voteForID = args.CandidateId\n\t\treply.VoteGranted = true\n\t} else if (rf.voteForID == \"\" || args.CandidateId == rf.voteForID) && isLogUpToDate {\n\t\trf.voteForID = args.CandidateId\n\t\treply.VoteGranted = true\n\t}\n\n\trf.persist()\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tcurrentTerm := rf.currentTerm\n\tif args == nil {\n\t\tDPrintf(\"Peer-%d received a null vote request.\", rf.me)\n\t\treturn\n\t}\n\tcandidateTerm := args.Term\n\tcandidateId := args.Candidate\n\tDPrintf(\"Peer-%d received a vote request %v from peer-%d.\", rf.me, *args, candidateId)\n\tif candidateTerm < currentTerm {\n\t\tDPrintf(\"Peer-%d's term=%d > candidate's term=%d.\\n\", rf.me, currentTerm, candidateTerm)\n\t\treply.Term = currentTerm\n\t\treply.VoteGrant = false\n\t\treturn\n\t} else if candidateTerm == currentTerm {\n\t\tif rf.voteFor != -1 && rf.voteFor != candidateId {\n\t\t\tDPrintf(\"Peer-%d has grant to peer-%d before this request from peer-%d.\", rf.me, rf.voteFor, candidateId)\n\t\t\treply.Term = currentTerm\n\t\t\treply.VoteGrant = false\n\t\t\treturn\n\t\t}\n\t\tDPrintf(\"Peer-%d's term=%d == candidate's term=%d, to check index.\\n\", rf.me, currentTerm, candidateTerm)\n\t} else {\n\t\tDPrintf(\"Peer-%d's term=%d < candidate's term=%d.\\n\", rf.me, currentTerm, candidateTerm)\n\t\t// begin to update status\n\t\trf.currentTerm = candidateTerm // find larger term, up to date\n\t\trf.transitionState(NewTerm) // transition to Follower.\n\t\tgo func() {\n\t\t\trf.eventChan <- NewTerm // tell the electionService to change state.\n\t\t}()\n\t}\n\t// check whose log is up-to-date\n\tcandiLastLogIndex := args.LastLogIndex\n\tcandiLastLogTerm := args.LastLogTerm\n\tlocalLastLogIndex := len(rf.log) - 1\n\tlocalLastLogTerm := -1\n\tif localLastLogIndex >= 0 {\n\t\tlocalLastLogTerm = rf.log[localLastLogIndex].Term\n\t}\n\t// check term first, if term is the same, then check the index.\n\tDPrintf(\"Peer-%d try to check last entry, loacl: index=%d;term=%d, candi: index=%d,term=%d.\", rf.me, localLastLogIndex, localLastLogTerm, candiLastLogIndex, candiLastLogTerm)\n\tif localLastLogTerm > candiLastLogTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGrant = false\n\t\treturn\n\t} else if localLastLogTerm == candiLastLogTerm {\n\t\tif localLastLogIndex > candiLastLogIndex {\n\t\t\treply.Term = rf.currentTerm\n\t\t\treply.VoteGrant = false\n\t\t\treturn\n\t\t}\n\t} else {\n\t}\n\t// heartbeat.\n\tgo func() {\n\t\trf.eventChan <- HeartBeat\n\t}()\n\t// local log are up-to-date, grant\n\t// before grant to candidate, we should reset ourselves state.\n\trf.transitionState(NewLeader)\n\trf.voteFor = candidateId\n\treply.Term = rf.currentTerm\n\treply.VoteGrant = true\n\tDPrintf(\"Peer-%d grant to peer-%d.\", rf.me, candidateId)\n\trf.persist()\n\treturn\n}", "func (q queryServer) Vote(ctx context.Context, req *v1.QueryVoteRequest) (*v1.QueryVoteResponse, error) {\n\tif req == nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"invalid request\")\n\t}\n\n\tif req.ProposalId == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"proposal id can not be 0\")\n\t}\n\n\tif req.Voter == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"empty voter address\")\n\t}\n\n\tvoter, err := q.k.authKeeper.AddressCodec().StringToBytes(req.Voter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvote, err := q.k.Votes.Get(ctx, collections.Join(req.ProposalId, sdk.AccAddress(voter)))\n\tif err != nil {\n\t\tif errors.IsOf(err, collections.ErrNotFound) {\n\t\t\treturn nil, status.Errorf(codes.InvalidArgument,\n\t\t\t\t\"voter: %v not found for proposal: %v\", req.Voter, req.ProposalId)\n\t\t}\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\treturn &v1.QueryVoteResponse{Vote: &vote}, nil\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\trf.updateTerm(args.Term)\n\treply.Term = rf.currentTerm\n\tlastLogIndex := rf.lastIncludedIndex + len(rf.log) - 1\n\tlastLogTerm := rf.log[len(rf.log)-1].Term\n\treply.VoteGranted = (rf.votedFor == -1 || rf.votedFor == args.CandidateId) && (lastLogTerm < args.LastLogTerm || lastLogTerm == args.LastLogTerm && lastLogIndex <= args.LastLogIndex)\n\tif reply.VoteGranted {\n\t\trf.votedFor = args.CandidateId\n\t}\n}", "func (_Bep20 *Bep20Caller) GetPriorVotes(opts *bind.CallOpts, account common.Address, blockNumber *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Bep20.contract.Call(opts, &out, \"getPriorVotes\", account, blockNumber)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) (transition bool) {\n\tr.peerLock.Lock()\n\tdefer r.peerLock.Unlock()\n\t// Setup a response\n\tpeers := make([][]byte, 0, len(r.peers))\n\tfor _, p := range r.peers {\n\t\tpeers = append(peers, []byte(p.String()))\n\t}\n\tresp := &RequestVoteResponse{\n\t\tTerm: r.getCurrentTerm(),\n\t\tGranted: false,\n\t\tPeers: peers,\n\t}\n\tvar err error\n\tdefer rpc.Respond(resp, err)\n\n\t// Ignore an older term\n\tif req.Term < r.getCurrentTerm() {\n\t\terr = errors.New(\"obsolete term\")\n\t\treturn\n\t}\n\n\t// Increase the term if we see a newer one\n\tif req.Term > r.getCurrentTerm() {\n\t\tif err := r.setCurrentTerm(req.Term); err != nil {\n\t\t\tr.logE.Printf(\"Failed to update current term: %w\", err)\n\t\t\treturn\n\t\t}\n\t\tresp.Term = req.Term\n\n\t\t// Ensure transition to follower\n\t\ttransition = true\n\t\tr.setState(Follower)\n\t}\n\n\t// Check if we have voted yet\n\tlastVoteTerm, err := r.stable.GetUint64(keyLastVoteTerm)\n\tif err != nil && err.Error() != \"not found\" {\n\t\tr.logE.Printf(\"raft: Failed to get last vote term: %w\", err)\n\t\treturn\n\t}\n\tlastVoteCandyBytes, err := r.stable.Get(keyLastVoteCand)\n\tif err != nil && err.Error() != \"not found\" {\n\t\tr.logE.Printf(\"raft: Failed to get last vote candidate: %w\", err)\n\t\treturn\n\t}\n\n\t// Check if we've voted in this election before\n\tif lastVoteTerm == req.Term && lastVoteCandyBytes != nil {\n\t\tr.logW.Printf(\"raft: Duplicate RequestVote for same term: %d\", req.Term)\n\t\tif bytes.Compare(lastVoteCandyBytes, req.Candidate) == 0 {\n\t\t\tr.logW.Printf(\"raft: Duplicate RequestVote from candidate: %s\", req.Candidate)\n\t\t\tresp.Granted = true\n\t\t}\n\t\treturn\n\t}\n\n\t// Reject if their term is older\n\tif r.getLastLogIndex() > 0 {\n\t\tvar lastLog Log\n\t\tif err := r.logs.GetLog(r.getLastLogIndex(), &lastLog); err != nil {\n\t\t\tr.logE.Printf(\"Failed to get last log: %d %v\",\n\t\t\t\tr.getLastLogIndex(), err)\n\t\t\treturn\n\t\t}\n\t\tif lastLog.Term > req.LastLogTerm {\n\t\t\tr.logW.Printf(\"Rejecting vote since our last term is greater\")\n\t\t\treturn\n\t\t}\n\n\t\tif lastLog.Index > req.LastLogIndex {\n\t\t\tr.logW.Printf(\"Rejecting vote since our last index is greater\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Persist a vote for safety\n\tif err := r.persistVote(req.Term, req.Candidate); err != nil {\n\t\tr.logE.Printf(\"raft: Failed to persist vote: %w\", err)\n\t\treturn\n\t}\n\n\tresp.Granted = true\n\treturn\n}", "func GetUserVotes(c *gin.Context) {\n\tuuid := c.Param(\"uuid\")\n\tvar user models.User\n\tvar votes []models.Vote\n\n\t// check if vote exists throw an not found error if not\n\tdb := db.GetDB()\n\tif err := db.Where(\"uuid = ?\", uuid).First(&user).Error; err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t\treturn\n\t}\n\n\t// // Find vote and associated voter\n\t// db.Model(&vote).Association(\"UUIDVote\").Find(&user.UUIDVote)\n\n\t// // Get only all uuid from voter list\n\t// voteUUIDs := []string{}\n\t// for _, v := range user.UUIDVote {\n\t// \tvoteUUIDs = append(voteUUIDs, v.UUID.String())\n\t// }\n\n\tdb.Model(&user).Related(&votes, \"Votes\")\n\n\t// return json data\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"user\": user,\n\t\t\"votes\": votes,\n\t})\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n fmt.Printf(\"\\n -> I the Peer %d in got Vote Request from cadidate %d!\\n\",rf.me, args.CandidateId)\n \n rf.mu.Lock()\n defer rf.mu.Unlock() // TODO: ask professor/TA about this atomisitc and if mutex is needed.\n \n reply.FollowerTerm = rf.currentTerm\n \n rf.CheckTerm(args.CandidateTerm) \n \n // 2B code - fix if needed\n logUpToDate := false\n if len(rf.log) == 0 {\n logUpToDate = true\n } else if rf.log[len(rf.log)-1].Term < args.LastLogTerm {\n logUpToDate = true\n } else if rf.log[len(rf.log)-1].Term == args.LastLogTerm && \n len(rf.log) <= (args.LastLogIndex+1) {\n logUpToDate = true\n }\n // 2B code end\n \n reply.VoteGranted = (rf.currentTerm <= args.CandidateTerm && \n (rf.votedFor == -1 || rf.votedFor == args.CandidateId) &&\n logUpToDate) \n\n if reply.VoteGranted {\n rf.votedFor = args.CandidateId\n fmt.Printf(\"-> I the Peer %d say: Vote for cadidate %d Granted!\\n\",rf.me, args.CandidateId)\n } else {\n fmt.Printf(\"-> I the Peer %d say: Vote for cadidate %d Denied :/\\n\",rf.me, args.CandidateId)\n }\n}", "func FetchVotes(c *gin.Context) {\n\tid, err := utils.GetSessionID(c)\n\tif err != nil || id != \"CEO\" {\n\t\tc.String(http.StatusForbidden, \"Only the CEO can access this.\")\n\t\treturn\n\t}\n\n\tvotes, err := ElectionDb.GetVotes()\n\tif err != nil {\n\t\tc.String(http.StatusInternalServerError, \"Error while fetching votes.\")\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, &votes)\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\tresp := make(chan interface{})\n\trf.rpcCh <- rpcCall{command: args, reply: resp}\n\n\t*reply = (<-resp).(RequestVoteReply)\n}", "func (*CommissionVotesRequest) Descriptor() ([]byte, []int) {\n\treturn file_resources_proto_rawDescGZIP(), []int{5}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tlastLogIndex, lastLogTerm := len(rf.log) + rf.compactIndex , 0\n\tif lastLogIndex > rf.compactIndex {\n\t\tlastLogTerm = rf.log[lastLogIndex - rf.compactIndex -1].Term\n\t} else if lastLogIndex == rf.compactIndex {\n\t\tlastLogTerm = rf.compactTerm\n\t}\n\n\tif args.Term < rf.currentTerm || (args.Term == rf.currentTerm && args.CandidateID != rf.votedFor) || args.LastLogTerm < lastLogTerm || (args.LastLogTerm == lastLogTerm && lastLogIndex > args.LastLogIndex) {\n\t\t// 1. The Term of RequestVote is out of date.\n\t\t// 2. The instance vote for other peer in this term.\n\t\t// 3. The log of Candidate is not the most update.\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t} else {\n\t\t// DPrintf(\"instance %d vote for %d, Term is %d, lastLogTerm is %d, args.LastLogTerm is %d, lastLogIndex is %d, args.LastLogIndex is %d, original votedFor is %d\", rf.me, args.CandidateID, args.Term, lastLogTerm, args.LastLogTerm, lastLogIndex, args.LastLogIndex, rf.votedFor)\n\t\trf.votedFor = args.CandidateID\n\t\trf.currentTerm = args.Term\n\n\t\treply.VoteGranted = true\n\t\treply.Term = rf.currentTerm\n\n\t\tif rf.role == Follower {\n\t\t\trf.validRpcTimestamp = time.Now()\n\t\t} else {\n\t\t\t// Notify the change of the role of instance.\n\t\t\tclose(rf.rollback)\n\t\t\trf.role = Follower\n\t\t}\n\t}\n\n\treturn\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\tgrantVote := false\n\trf.updateTerm(args.Term) // All servers: if args.Term > rf.currentTerm, set currentTerm, convert to follower\n\n\tswitch rf.state {\n\tcase Follower:\n\t\tif args.Term < rf.currentTerm {\n\t\t\tgrantVote = false\n\t\t} else if rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\t\tif len(rf.logs) == 0 {\n\t\t\t\tgrantVote = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlastLogTerm := rf.logs[len(rf.logs) - 1].Term\n\t\t\tif (lastLogTerm == args.LastLogTerm && len(rf.logs) <= args.LastLogIndex) || lastLogTerm < args.LastLogTerm {\n\t\t\t\tgrantVote = true\n\t\t\t}\n\t\t}\n\tcase Leader:\n\t\t// may need extra operation since the sender might be out-dated\n\tcase Candidate:\n\t\t// reject because rf has already voted for itself since it's in\n\t\t// Candidate state\n\t}\n\n\tif grantVote {\n\t\t// DPrintf(\"Peer %d: Granted RequestVote RPC from %d.(@%s state)\\n\", rf.me, args.CandidateId, rf.state)\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.CandidateId\n\t\t// reset election timeout\n\t\trf.hasHeartbeat = true\n\t} else {\n\t\t// DPrintf(\"Peer %d: Rejected RequestVote RPC from %d.(@%s state)\\n\", rf.me, args.CandidateId, rf.state)\n\t\treply.VoteGranted = false\n\t}\n\treply.VotersTerm = rf.currentTerm\n\n\t// when deal with cluster member changes, may also need to reject Request\n\t// within MINIMUM ELECTION TIMEOUT\n}", "func (r *Raft) broadcastRequestVote(ctx context.Context) {\n\tr.stateMutex.RLock()\n\trequest := &pb.RequestVoteRequest{\n\t\tTerm: r.stateManager.GetCurrentTerm(),\n\t\tCandidateId: r.settings.ID,\n\t\tLastLogIndex: r.logManager.GetLastLogIndex(),\n\t\tLastLogTerm: r.logManager.GetLastLogTerm(),\n\t}\n\tr.stateMutex.RUnlock()\n\n\tresponses := r.cluster.BroadcastRequestVoteRPCs(ctx, request)\n\n\t// Count of votes starts at one because the server always votes for itself\n\tcountOfVotes := 1\n\n\tr.stateMutex.Lock()\n\tdefer r.stateMutex.Unlock()\n\n\t// Check if the state hasn't been changed in the meantime\n\tif r.stateManager.GetRole() != CANDIDATE {\n\t\treturn\n\t}\n\n\t// Count the number of votes\n\tfor _, response := range responses {\n\t\t// Server was unable to respond\n\t\tif response == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If the response contains higher term, we convert to follower\n\t\tif response.GetTerm() > r.stateManager.GetCurrentTerm() {\n\t\t\tr.stateManager.SwitchPersistentState(response.GetTerm(), nil, FOLLOWER)\n\t\t\treturn\n\t\t}\n\n\t\tif response.GetVoteGranted() {\n\t\t\tcountOfVotes++\n\t\t}\n\t}\n\n\t// Check if majority reached\n\tif r.checkClusterMajority(countOfVotes) {\n\t\tr.stateManager.SwitchPersistentState(r.stateManager.GetCurrentTerm(), nil, LEADER)\n\t}\n}", "func (_Bep20 *Bep20CallerSession) GetPriorVotes(account common.Address, blockNumber *big.Int) (*big.Int, error) {\n\treturn _Bep20.Contract.GetPriorVotes(&_Bep20.CallOpts, account, blockNumber)\n}", "func (r *Raft) prepRequestVote() RequestVote {\n\tLastLogIndex := r.MyMetaData.LastLogIndex\n\t//if this is the request when log is empty\n\tvar lastLogTerm int\n\tif len(r.MyLog) == 0 {\n\t\tlastLogTerm = -1\n\t} else {\n\t\tlastLogTerm = r.MyLog[LastLogIndex].Term\n\t}\n\t//fmt.Println(\"here2\")\n\treqVoteObj := RequestVote{r.myCV.CurrentTerm, r.Myconfig.Id, LastLogIndex, lastLogTerm}\n\treturn reqVoteObj\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tif args.Term == rf.currentTerm {\n\t\tif rf.voteFor == -1 || rf.voteFor == args.CandidateId {\n\t\t\tlastLogTerm, lastLogIdx := rf.getLastLogTermAndIdx()\n\t\t\tif lastLogTerm < args.LastLogTerm || lastLogTerm == args.LastLogTerm && lastLogIdx <= args.LastLogIdx {\n\t\t\t\treply.VoteGranted = true\n\t\t\t\trf.voteFor = args.CandidateId\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\treply.VoteGranted = false\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\treply.VoteGranted = false\n\t\t\treturn\n\t\t}\n\t}\n\n\tif args.Term > rf.currentTerm {\n\t\t//收到更大的term,先更新状态;再判断日志的新旧来投票\n\t\trf.changeToFollower(args.Term)\n\t\t//fixbug: 忘记在收到更大的term时更新votefor\n\t\trf.voteFor = -1\n\n\t\treply.Term = args.Term\n\n\t\tlastLogTerm, lastLogIdx := rf.getLastLogTermAndIdx()\n\t\tif lastLogTerm < args.LastLogTerm || lastLogTerm == args.LastLogTerm && lastLogIdx <= args.LastLogIdx {\n\t\t\treply.VoteGranted = true\n\t\t\trf.voteFor = args.CandidateId\n\t\t\treturn\n\t\t} else {\n\t\t\treply.VoteGranted = false\n\t\t\treturn\n\t\t}\n\t}\n\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\t//fmt.Println(\"got vote request at server id: \", rf.me)\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif rf.currentTerm > args.Term {\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t} else if rf.currentTerm < args.Term {\n\t\trf.currentTerm = args.Term\n\t\treply.Term = rf.currentTerm\n\t\trf.state = follower\n\t}\n\t\n\tgranted := false\n\tif rf.votedFor == nil {\n\t\tgranted = true\n\t} else if *rf.votedFor == args.CandidateId {\n\t\tgranted = true\n\t}\n\tif !granted {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tif args.LastLogIndex != len(rf.log)-1 {\n\t\tgranted = false\n\t} else {\n\t\tif args.LastLogTerm != rf.log[len(rf.log)-1].Term {\n\t\t\tgranted = false\n\t\t}\n\t}\n\t\n\tif !granted {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\treply.VoteGranted = true\n\trf.rpcCh<-voteRpc\n\treturn\n}", "func (*ObservePartyVotesRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_trading_proto_rawDescGZIP(), []int{45}\n}", "func (_Bep20 *Bep20Session) GetPriorVotes(account common.Address, blockNumber *big.Int) (*big.Int, error) {\n\treturn _Bep20.Contract.GetPriorVotes(&_Bep20.CallOpts, account, blockNumber)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\tDPrintf(\"peer-%d gets a RequestVote RPC.\", rf.me)\n\t// Your code here (2A, 2B).\n\t// First, we need to detect obsolete information\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\tstepdown := false\n\t// step down and convert to follower, adopt the args.Term\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\told_state := rf.state\n\t\trf.state = Follower\n\t\tif old_state == Leader {\n\t\t\trf.nonleaderCh <- true\n\t\t}\n\t\trf.votedFor = -1\n\t\trf.persist()\n\t\tstepdown = true\n\t}\n\n\t// 5.4.1 Election restriction : if the requester's log isn't more up-to-date than this peer's, don't vote for it.\n\t// check whether the requester's log is more up-to-date.(5.4.1 last paragraph)\n\tif len(rf.log) > 0 { // At first, there's no log entry in rf.log\n\t\tif rf.log[len(rf.log)-1].Term > args.LastLogTerm {\n\t\t\t// this peer's log is more up-to-date than requester's.\n\t\t\treply.VoteGranted = false\n\t\t\treply.Term = rf.currentTerm\n\t\t\treturn\n\t\t} else if rf.log[len(rf.log)-1].Term == args.LastLogTerm {\n\t\t\tif len(rf.log) > args.LastLogIndex {\n\t\t\t\t// this peer's log is more up-to-date than requester's.\n\t\t\t\treply.VoteGranted = false\n\t\t\t\treply.Term = rf.currentTerm\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// requester's log is more up-to-date than requester's.\n\t// Then, we should check whether this server has voted for another server in the same term\n\tif stepdown {\n\t\trf.resetElectionTimeout()\n\t\t// now we need to reset the election timer.\n\t\trf.votedFor = args.CandidateId // First-come-first-served\n\t\trf.persist()\n\t\treply.VoteGranted = true\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\t/* Section 5.5 :\n\t * The server may crash after it completing an RPC but before responsing, then it will receive the same RPC again after it restarts.\n\t * Raft RPCs are idempotent, so this causes no harm.\n\t */\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\trf.votedFor = args.CandidateId\n\t\trf.persist()\n\t\treply.VoteGranted = true\n\t} else {\n\t\treply.VoteGranted = false // First-come-first-served, this server has voted for another server before.\n\t}\n\treply.Term = rf.currentTerm\n\treturn\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\t// defer DPrintf(\"%d(%d|term%d|vote%d) replyed %d(%d) with %s\", rf.me, rf.state, rf.currentTerm, rf.votedFor, args.CandidateId, args.Term, reply)\n\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tif (rf.votedFor == -1 || rf.votedFor == args.CandidateId) && args.LastLogIndex >= rf.lastApplied {\n\t\t//rf.resetHeartBeatsTimer()\n\n\t\treply.VoteGranted = true\n\t\t// rf.currentTerm += 1\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = args.CandidateId\n\t\trf.state = FOLLOWER\n\t\treturn\n\t} else {\n\t\treply.VoteGranted = false\n\t}\n\treply.Term = rf.currentTerm\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tcurrentTerm := rf.currentTerm\n\n\t//If RPC request or response contains term T > currentTerm:\n\t//set currentTerm = T, convert to follower\n\tif (args.Term > currentTerm) {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = NILVOTE\n\n\t\tif rf.role == LEADER {\n\t\t\tDPrintf(\"LeaderCondition sorry server %d term %d not a leader, logs %v, commitIndex %d\\n\",rf.me, rf.currentTerm, rf.log, rf.commitIndex) \n\t\t} \n\t\trf.role = FOLLOWER\n\t\trf.persist()\n\t}\n\n\tif args.Term < currentTerm {\n\t\t// Reply false if term < currentTerm \n\t\treply.VoteGranted = false\n\t\treply.Term = currentTerm \n\t}else {\n\t\t//If votedFor is null or candidateId,\n\t\t//and candidate’s log is at least as up-to-date as receiver’s log,\n\t\t//&& rf.atLeastUptodate(args.LastLogIndex, args.LastLogTerm)\n\t\tif (rf.votedFor == NILVOTE || rf.votedFor == args.CandidateId) && rf.atLeastUptodate(args.LastLogIndex, args.LastLogTerm) {\n\t\t\ti , t := rf.lastLogIdxAndTerm()\n\t\t\tPrefixDPrintf(rf, \"voted to candidate %d, args %v, lastlogIndex %d, lastlogTerm %d\\n\", args.CandidateId, args, i, t)\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.persist()\t\n\t\t\treply.VoteGranted = true\n\t\t\treply.Term = rf.currentTerm\n\t\t\t//you grant a vote to another peer.\n\t\t\trf.resetTimeoutEvent = makeTimestamp()\n\t\t}else {\n\t\t\treply.VoteGranted = false\n\t\t\treply.Term = rf.currentTerm\n\t\t}\t\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\n\t//defer rf.updateAppliedLock()\n\t//Your code here (2A, 2B).\n\tisALeader := rf.role == Leader\n\n\tif rf.updateTermLock(args.Term) && isALeader {\n\t\t//DPrintf(\"[DEBUG] Server %d from %d to Follower {requestVote : Term higher}\", rf.me, Leader)\n\t}\n\treply.VoteCranted = false\n\tvar votedFor interface{}\n\t//var isLeader bool\n\tvar candidateID, currentTerm, candidateTerm, currentLastLogIndex, candidateLastLogIndex, currentLastLogTerm, candidateLastLogTerm int\n\n\tcandidateID = args.CandidateID\n\tcandidateTerm = args.Term\n\tcandidateLastLogIndex = args.LastLogIndex\n\tcandidateLastLogTerm = args.LastLogTerm\n\n\trf.mu.Lock()\n\n\treply.Term = rf.currentTerm\n\tcurrentTerm = rf.currentTerm\n\tcurrentLastLogIndex = len(rf.logs) - 1 //TODO: fix the length corner case\n\tcurrentLastLogTerm = rf.logs[len(rf.logs)-1].Term\n\tvotedFor = rf.votedFor\n\tisFollower := rf.role == Follower\n\trf.mu.Unlock()\n\t//case 0 => I'm leader, so you must stop election\n\tif !isFollower {\n\t\tDPrintf(\"[DEBUG] Case0 I [%d] is Candidate than %d\", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\n\t//case 1 => the candidate is not suit to be voted\n\tif currentTerm > candidateTerm {\n\t\tDPrintf(\"[DEBUG] Case1 Follower %d > Candidate %d \", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\n\t//case 2 => the candidate's log is not lastest than the follwer\n\tif currentLastLogTerm > candidateLastLogTerm || (currentLastLogTerm == candidateLastLogTerm && currentLastLogIndex > candidateLastLogIndex) {\n\t\tDPrintf(\"[DEBUG] Case2 don't my[%d] newer than can[%d]\", rf.me, args.CandidateID)\n\t\treturn\n\t}\n\trf.mu.Lock()\n\t//case3 => I have voted and is not you\n\tif votedFor != nil && votedFor != candidateID {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t//now I will vote you\n\n\tvar notFollower bool\n\trf.votedFor = candidateID\n\tif rf.role != Follower {\n\t\tnotFollower = true\n\t}\n\tDPrintf(\"[Vote] Server[%d] vote to Can[%d]\", rf.me, args.CandidateID)\n\trf.role = Follower\n\treply.VoteCranted = true\n\trf.mu.Unlock()\n\trf.persist()\n\tif notFollower {\n\t\trf.msgChan <- RecivedVoteRequest\n\t} else {\n\t\trf.msgChan <- RecivedVoteRequest\n\t}\n\n\treturn\n}", "func (r *RaftNode) Vote(rv RequestVoteStruct, response *RPCResponse) error {\n\tr.Lock()\n\tdefer r.Unlock()\n\tif r.verbose {\n\t\tlog.Println(\"Vote()\")\n\t}\n\n\tdefer r.persistState()\n\n\tresponse.Term = r.CurrentTerm\n\n\tmyLastLogTerm := r.getLastLogTerm()\n\tmyLastLogIdx := r.getLastLogIndex()\n\n\tif r.verbose {\n\t\tlog.Printf(\"RequestVoteStruct: %s. \\nMy node: term: %d, votedFor %d, lastLogTerm: %d, lastLogIdx: %d\",\n\t\t\trv.String(), r.CurrentTerm, r.VotedFor, myLastLogTerm, myLastLogIdx)\n\t}\n\n\tlooksEligible := r.CandidateLooksEligible(rv.LastLogIdx, rv.LastLogTerm)\n\n\tif rv.Term > r.CurrentTerm {\n\t\tr.shiftToFollower(rv.Term, HostID(-1)) // We do not yet know who is leader for this term\n\t}\n\n\tif rv.Term < r.CurrentTerm {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"RV from prior term - do not grant vote\")\n\t\t}\n\t\tresponse.Success = false\n\t} else if (r.VotedFor == -1 || r.VotedFor == rv.CandidateID) && looksEligible {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"Grant vote\")\n\t\t}\n\t\tr.resetTickers()\n\t\tresponse.Success = true\n\t\tr.VotedFor = rv.CandidateID\n\t} else {\n\t\tif r.verbose {\n\t\t\tlog.Println(\"Do not grant vote\")\n\t\t}\n\t\tresponse.Success = false\n\t}\n\n\treturn nil\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tmay_grant_vote := true\n\tif len(rf.logs) > 0 {\n\t\t// rf.logs_term[len(rf.logs)-1] will always there, no matter snapshotedCount\n\t\tif rf.logs_term[len(rf.logs)-1] > args.LastLogTerm ||\n\t\t\t(rf.logs_term[len(rf.logs)-1] == args.LastLogTerm && len(rf.logs) > args.LogCount) {\n\t\t\tmay_grant_vote = false\n\t\t}\n\t}\n\trf.logger.Printf(\"Got vote request: %v, may grant vote: %v\\n\", args, may_grant_vote)\n\n\tif args.Term < rf.currentTerm {\n\t\trf.logger.Printf(\"Got vote request with term = %v, reject\\n\", args.Term)\n\t\treply.Term = rf.currentTerm\n\t\treply.Granted = false\n\t\treturn\n\t}\n\n\tif args.Term == rf.currentTerm {\n\t\trf.logger.Printf(\"Got vote request with current term, now voted for %v\\n\", rf.votedFor)\n\t\tif rf.votedFor == -1 && may_grant_vote {\n\t\t\trf.votedFor = args.CandidateID\n\t\t\trf.persist()\n\t\t}\n\t\treply.Granted = (rf.votedFor == args.CandidateID)\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.logger.Printf(\"Got vote request with term = %v, follow it\\n\", args.Term)\n\t\trf.state = FOLLOWER\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\tif may_grant_vote {\n\t\t\trf.votedFor = args.CandidateID\n\t\t\trf.persist()\n\t\t}\n\t\trf.resetTimer()\n\n\t\treply.Granted = (rf.votedFor == args.CandidateID)\n\t\treply.Term = args.Term\n\t\treturn\n\t}\n}", "func (_Bep20 *Bep20Caller) GetCurrentVotes(opts *bind.CallOpts, account common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Bep20.contract.Call(opts, &out, \"getCurrentVotes\", account)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func voteInv(c *cmdVoteInv) (map[string][]string, error) {\n\t// Setup client\n\topts := pclient.Opts{\n\t\tHTTPSCert: cfg.HTTPSCert,\n\t\tVerbose: cfg.Verbose,\n\t\tRawJSON: cfg.RawJSON,\n\t}\n\tpc, err := pclient.New(cfg.Host, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Setup status and page number\n\tvar status tkv1.VoteStatusT\n\tif c.Args.Status != \"\" {\n\t\t// Parse status. This can be either the numeric status code or the\n\t\t// human readable equivalent.\n\t\tstatus, err = parseVoteStatus(c.Args.Status)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If a status was given but no page number was give, default\n\t\t// to page number 1.\n\t\tif c.Args.Page == 0 {\n\t\t\tc.Args.Page = 1\n\t\t}\n\t}\n\n\t// Get vote inventory\n\ti := tkv1.Inventory{\n\t\tStatus: status,\n\t\tPage: c.Args.Page,\n\t}\n\tir, err := pc.TicketVoteInventory(i)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Print inventory\n\tprintJSON(ir)\n\n\treturn ir.Vetted, nil\n\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t//fmt.Printf(\"[::RequestVote]\\n\")\n\t// Your code here.\n\trf.mtx.Lock()\n\tdefer rf.mtx.Unlock()\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\n\t// case 1: check term\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\tif args.Term > rf.currentTerm { // set term to max. and then maybe become leader.\n\t\trf.currentTerm = args.Term\n\t\trf.state = STATE_FOLLOWER\n\t\trf.voteFor = -1\n\t}\n\treply.Term = rf.currentTerm\n\n\t// case 2: check log\n\tisNewer := false\n\tif args.LastLogTerm == rf.log[len(rf.log)-1].Term {\n\t\tisNewer = args.LastLogIndex >= rf.log[len(rf.log)-1].LogIndex\n\t} else {\n\t\tisNewer = args.LastLogTerm > rf.log[len(rf.log)-1].Term\n\t}\n\n\tif (rf.voteFor == -1 || rf.voteFor == args.CandidateId) && isNewer {\n\t\trf.chanVoteOther <- 1\n\t\trf.state = STATE_FOLLOWER\n\t\treply.VoteGranted = true\n\t\trf.voteFor = args.CandidateId\n\t}\n\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tbbit := true\n\tif len(rf.log) > 0 {\n\t\tlastLogTerm := rf.log[len(rf.log)-1].Term\n\t\tif lastLogTerm > args.LastLogTerm {\n\t\t\tbbit = false\n\t\t} else if lastLogTerm == args.LastLogTerm &&\n\t\t\tlen(rf.log)-1 > args.LastLogIndex {\n\t\t\tbbit = false\n\t\t}\n\t}\n\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\tif args.Term == rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\tif rf.votedFor == -1 && bbit {\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.persist()\n\t\t\treply.VoteGranted = true\n\t\t} else {\n\t\t\treply.VoteGranted = false\n\t\t}\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.state = FOLLOWER\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\trf.timer.Reset(properTimeDuration(rf.state))\n\t\treply.Term = args.Term\n\t\tif bbit {\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.persist()\n\t\t\treply.VoteGranted = true\n\t\t} else {\n\t\t\treply.VoteGranted = false\n\t\t}\n\t\treturn\n\t}\n\treturn\n}", "func (_Contract *ContractCaller) GetVote(opts *bind.CallOpts, from common.Address, delegatedTo common.Address, proposalID *big.Int) (struct {\n\tWeight *big.Int\n\tChoices []*big.Int\n}, error) {\n\tvar out []interface{}\n\terr := _Contract.contract.Call(opts, &out, \"getVote\", from, delegatedTo, proposalID)\n\n\toutstruct := new(struct {\n\t\tWeight *big.Int\n\t\tChoices []*big.Int\n\t})\n\tif err != nil {\n\t\treturn *outstruct, err\n\t}\n\n\toutstruct.Weight = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\toutstruct.Choices = *abi.ConvertType(out[1], new([]*big.Int)).(*[]*big.Int)\n\n\treturn *outstruct, err\n\n}", "func (remote *RemoteNode) RequestVoteRPC(local *RemoteNode, request *RequestVoteRequest) (*RequestVoteReply, error) {\n\t// if local.NetworkPolicy.IsDenied(*local.Self, *remote) {\n\t// \treturn nil, ErrorNetworkPolicyDenied\n\t// }\n\n\tcc, err := remote.RaftRPCClientConn()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treply, err := cc.RequestVoteCaller(context.Background(), request)\n\treturn reply, remote.connCheck(err)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\treply.VoterId = rf.peerId\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\tif args.Term > rf.currentTerm {\n\t\trf.stepDownToFollower(args.Term)\n\t}\n\tlastLog := rf.getLastLog()\n\tif (rf.votedFor == \"\" || rf.votedFor == args.CandidateId) && (lastLog.Term < args.LastLogTerm || (lastLog.Index <= args.LastLogIndex && lastLog.Term == args.LastLogTerm)) {\n\t\treply.Term = rf.currentTerm\n\t\trf.grantCh <- true\n\t\treply.VoteGranted = true\n\t\t// set voteFor\n\t\trf.votedFor = args.CandidateId\n\t\tlog.Printf(\"peer %v elect peer %v as leader\\n\", rf.peerId, args.CandidateId)\n\t}\n\treturn\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) { //RequestVote handdler\r\n\t// Your code here.\r\n\trf.mu.Lock() //get the lock\r\n\tdefer rf.mu.Unlock()\r\n\tif args.Term < rf.currentTerm {\r\n\t\treply.VoteGranted = false\r\n\t\treply.Term = rf.currentTerm\r\n\t}else if args.Term > rf.currentTerm {\r\n\t\trf.currentTerm = args.Term\r\n\t\trf.updateStateTo(FOLLOWER)\r\n\t\trf.votedFor = args.CandidateId\r\n\t\treply.VoteGranted = true\r\n\t}else {\r\n\t\tif rf.votedFor == -1 {//haven't vote for anyone\r\n\t\t\trf.votedFor = args.CandidateId\r\n\t\t\treply.VoteGranted = true\r\n\t\t}else {\r\n\t\t\treply.VoteGranted = false\r\n\t\t}\r\n\t}\r\n\tif reply.VoteGranted == true { // vote for current requester\r\n\t\tgo func() { rf.voteCh <- struct{}{} }() //send the struct{}{} to the voteCh channel\r\n\t}\t\r\n}", "func (_Contract *ContractCallerSession) GetVote(from common.Address, delegatedTo common.Address, proposalID *big.Int) (struct {\n\tWeight *big.Int\n\tChoices []*big.Int\n}, error) {\n\treturn _Contract.Contract.GetVote(&_Contract.CallOpts, from, delegatedTo, proposalID)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\t/*\n\tIf votedFor is null or candidateId, and candidate’s\n\tlog is at least as up-to-date as receiver’s log, grant vote\n\t */\n\tif rf.isCandidateUpToDate(args) &&\n\t\t(rf.votedFor == -1 || rf.votedFor == args.CandidateId) {\n\t\t// grant vote and update rf's term.\n\t\trf.currentTerm = args.Term\n\n\t\treply.Term = args.Term\n\n\t\treply.VoteGranted = true\n\t} else {\n\t\t// don't grant vote to the candidate.\n\t\treply.Term = rf.currentTerm\n\n\t\treply.VoteGranted = false\n\t}\n\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\trf.mu.Lock()\n\tdefer rf.persist()\n\tdefer rf.mu.Unlock()\n\treply.Term = rf.CurrentTerm\n\n\tif args.Term < rf.CurrentTerm {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tif args.Term > rf.CurrentTerm {\n\t\trf.VotedFor = -1\n\t\trf.CurrentTerm = args.Term\n\t\trf.identity = FOLLOWER\n\t}\n\n\tif rf.VotedFor != -1 && rf.VotedFor != args.CandidateId {\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\tvar rfLogIndex int\n\tvar rfLogTerm int\n\tif len(rf.Log) > 0 {\n\t\trfLogIndex = rf.Log[len(rf.Log)-1].Index\n\t\trfLogTerm = rf.Log[len(rf.Log)-1].Term\n\t} else {\n\t\trfLogIndex = rf.lastIncludedIndex\n\t\trfLogTerm = rf.lastIncludedTerm\n\t}\n\n\tif args.LastLogTerm > rfLogTerm || args.LastLogTerm == rfLogTerm && args.LastLogIndex >= rfLogIndex {\n\t\treply.VoteGranted = true\n\t\trf.VotedFor = args.CandidateId\n\t\trf.identity = FOLLOWER\n\t\trf.hasVoted <- true\n\t} else {\n\t\treply.VoteGranted = false\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\tDPrintf(\"[reject] %v currentTerm:%v vote reject for:%v term:%v\",rf.me,rf.currentTerm,args.CandidateId,args.Term)\n\t\treturn\n\t}\n\n\tif args.Term > rf.currentTerm {\n\t\trf.state = FOLLOWER\n\t\trf.votedFor = -1\n\t\trf.currentTerm = args.Term\n\t}\n\n\treply.Term = rf.currentTerm\n\n\tlastLogTerm := rf.getLastLogTerm()\n\tlastLogIndex := rf.getLastLogIndex()\n\n\tlogFlag := false\n\tif (args.LastLogTerm > lastLogTerm) || (args.LastLogTerm == lastLogTerm && args.LastLogIndex >= lastLogIndex) {\n\t\tlogFlag = true\n\t}\n\n\tif (-1 == rf.votedFor || args.CandidateId == rf.votedFor) && logFlag {\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.CandidateId\n\t\trf.voteChan <- true\n\t\trf.state = FOLLOWER\n\t}\n\t//DPrintf(\"[RequestVote]: server %v send %v\", rf.me, args.CandidateId)\n}", "func (*UpdateVotesRequest) Descriptor() ([]byte, []int) {\n\treturn file_resources_proto_rawDescGZIP(), []int{14}\n}", "func (node *Node) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) error {\n\tnode.mu.Lock()\n\tdefer node.mu.Unlock()\n\tif node.state == dead {\n\t\treturn nil\n\t}\n\n\tlog.Printf(\"RequestVote args: %+v\\ncurrentTerm=%d\\nvotedFor=%d\", args, node.currentTerm, node.votedFor)\n\n\t// If the RPC term is less than the current term then we must reject the\n\t// vote request.\n\tif args.term < node.currentTerm {\n\t\treply.term = node.currentTerm\n\t\treply.voteGranted = false\n\t\tlog.Printf(\"RequestVote has been rejected by %d\", node.id)\n\t\treturn nil\n\t}\n\n\tif args.term > node.currentTerm {\n\t\t// Update the current node's state to follower.\n\t\tnode.updateStateToFollower(args.term)\n\t}\n\n\t// If the above condition was not true then we have to ensure that we have\n\t// not voted for some other node with the same term.\n\tif args.term == node.currentTerm && (node.votedFor == -1 || node.votedFor == args.candidateID) {\n\t\treply.voteGranted = true\n\t\tnode.votedFor = args.candidateID\n\t\tnode.timeSinceTillLastReset = time.Now()\n\t} else {\n\t\treply.voteGranted = false\n\t}\n\treply.term = node.currentTerm\n\tlog.Printf(\"RequestVote reply: %+v\", reply)\n\treturn nil\n}", "func GetCmdQueryVotes(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: budget.QueryVotes,\n\t\tShort: \"Query votes, filtered by voterAddress \",\n\t\tLong: strings.TrimSpace(`\nQuery vote details for a single program by its identifier.\n\nExample:\n$ terracli query budget votes 1\n`),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\tparams := budget.QueryVotesParams{}\n\n\t\t\t// Get voting address\n\t\t\tvoterAddrStr := viper.GetString(flagVoter)\n\t\t\tif len(voterAddrStr) > 0 {\n\t\t\t\tacc, err := cliCtx.GetAccount([]byte(voterAddrStr))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tparams.Voter = acc.GetAddress()\n\t\t\t}\n\n\t\t\tprogramIDStr := viper.GetString(flagProgramID)\n\t\t\tif len(programIDStr) > 0 {\n\t\t\t\t// validate that the program id is a uint\n\t\t\t\tprogramID, err := strconv.ParseUint(args[0], 10, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"program-id %s not a valid int, please input a valid program-id\", args[0])\n\t\t\t\t}\n\n\t\t\t\tparams.ProgramID = programID\n\t\t\t}\n\n\t\t\tbz, err := cdc.MarshalJSON(params)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tres, err := cliCtx.QueryWithData(fmt.Sprintf(\"custom/%s/%s\", queryRoute, budget.QueryVotes), bz)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvar matchingVotes []budget.MsgVoteProgram\n\t\t\tcdc.MustUnmarshalJSON(res, &matchingVotes)\n\n\t\t\tif len(matchingVotes) == 0 {\n\t\t\t\tfmt.Println(\"No matching votes found\")\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfor _, vote := range matchingVotes {\n\t\t\t\tfmt.Println(vote.String())\n\t\t\t}\n\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tcmd.Flags().String(flagVoter, \"\", \"voter for the program\")\n\n\treturn cmd\n}", "func (r *Raft) prepRequestVote() RequestVote {\n\tlastLogIndex := r.myMetaData.lastLogIndex\n\t//if this is the request when log is empty\n\tvar lastLogTerm int\n\tif len(r.myLog) == 0 {\n\t\t//fmt.Println(\"In if of prepRV()\")\n\t\t//lastLogTerm = -1 //Just for now--Modify later\n\t\tlastLogTerm = r.currentTerm\n\t} else {\n\t\t//fmt.Println(\"In else of prepRV()\")\n\t\tlastLogTerm = r.myLog[lastLogIndex].Term\n\t}\n\t//fmt.Println(\"here2\")\n\treqVoteObj := RequestVote{r.currentTerm, r.Myconfig.Id, lastLogIndex, lastLogTerm}\n\treturn reqVoteObj\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\tDPrintf(\"before voted reply is %v, me id is %d, votedFor is %d, candidateId is %d, current term is %v, \" +\n\t\t\"args term is %v args log is %v log is %v\", reply, rf.me, rf.votedFor, args.CandidateId,\n\t\trf.currentTerm, args.LastLogTerm, args.LastLogIndex, rf.addLastIncludedIndex(len(rf.log)-1))\n\n\tif rf.currentTerm < args.Term {\n\t\trf.votedFor = -1\n\t\trf.currentTerm = args.Term\n\t\trf.raftState = Follower\n\t\trf.resetTimer()\n\t}\n\tif rf.votedFor == args.CandidateId || rf.votedFor == -1 {\n\t\tlastIndex := len(rf.log) - 1\n\t\tlastLogTerm := rf.log[lastIndex].Term\n\t\tif (args.LastLogTerm > lastLogTerm) ||\n\t\t\t(args.LastLogTerm == lastLogTerm && args.LastLogIndex >= rf.addLastIncludedIndex(lastIndex)) {\n\t\t\trf.votedFor = args.CandidateId\n\t\t\trf.raftState = Follower\n\t\t\treply.VoteGranted = true\n\t\t\trf.resetTimer()\n\t\t}\n\t}\n\trf.persist()\n}", "func (r *Raft) callRequestVote(server int, args requestVoteArgs, reply *requestVoteReply) bool {\n\t// When there are no peers, return a test response, if any.\n\tif len(r.peers) == 0 {\n\t\t// Under test, return injected reply.\n\t\tglog.V(2).Infof(\"Under test, returning injected reply %v\", reply)\n\t\tif r.testRequestvotesuccess {\n\t\t\t*reply = *r.testRequestvotereply\n\t\t}\n\t\treturn r.testRequestvotesuccess\n\t}\n\tok := r.peers[server].Call(\"Raft.RequestVote\", args, reply)\n\treturn ok\n}", "func (*VoteRequest) Descriptor() ([]byte, []int) {\n\treturn file_vote_proto_rawDescGZIP(), []int{2}\n}", "func (_Votes *VotesCaller) GetCandidates(opts *bind.CallOpts) (struct {\n\tAddresses []common.Address\n\tTickets []*big.Int\n}, error) {\n\tret := new(struct {\n\t\tAddresses []common.Address\n\t\tTickets []*big.Int\n\t})\n\tout := ret\n\terr := _Votes.contract.Call(opts, out, \"getCandidates\")\n\treturn *ret, err\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.currentTerm > args.Term {\n\t\treply.VoteGranted = false\n\t\treply.Term = rf.currentTerm\n\t\treturn\n\t}\n\n\tif rf.currentTerm < args.Term {\n\t\trf.currentTerm = args.Term\n\t\trf.updateStateTo(FOLLOWER)\n\t\t//妈的咋突然少了段代码~~ 这里要变为follower状态\n\t\t//var wg sync.WaitGroup\n\t\t//wg.Add(1)\n\t\tgo func() {\n\t\t\t//\tdefer wg.Done()\n\t\t\trf.stateChangeCh <- struct{}{}\n\t\t}()\n\n\t\t//wg.Wait()\n\n\t\t//直接return,等待下一轮投票会导致活锁,比如node 1 ,2,3 。 node 1 加term为2,发请求给node2,3,term1。 node2,3更新term拒绝投票\n\t\t//return\n\t}\n\n\t//此处if 在 currentTerm < args.Term下必然成立,在currentTerm等于args.Term下不一定成立\n\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidatedId {\n\t\t//if candidate的log 至少 as up-to-date as reveiver's log\n\t\tlastLogIndex := len(rf.logEntries) - 1\n\t\t//fmt.Println(lastLogIndex,rf.me,rf.logEntries )\n\t\tlastLogTerm := rf.logEntries[len(rf.logEntries)-1].Term\n\t\t//fmt.Println(lastLogIndex,lastLogTerm , args.LastLogIndex,args.LastLogTerm)\n\t\tif lastLogTerm < args.LastLogTerm || (lastLogTerm == args.LastLogTerm && lastLogIndex <= args.LastLogIndex) {\n\t\t\trf.votedFor = args.CandidatedId\n\t\t\treply.Term = rf.currentTerm\n\t\t\treply.VoteGranted = true\n\t\t\t//fmt.Printf(\"[Term %d],Node %d Reply 值为%v. Term= %d , lastIndex = %d <= args.lastLogIndex %d\\n\", rf.currentTerm, rf.me, reply, args.LastLogTerm, lastLogIndex, args.LastLogIndex)\n\t\t\tif rf.status == FOLLOWER {\n\t\t\t\tgo func() { rf.giveVoteCh <- struct{}{} }()\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(lastLogIndex, lastLogTerm, args.LastLogIndex, args.LastLogTerm)\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\t//fmt.Printf(\"[Term %d] Node %d Reply 值为%v,rf.votefor=%d,\\n\", rf.currentTerm, rf.me, reply, rf.votedFor)\n\n}", "func (c *gitlabClient) PullRequestEvents(context.Context, string, []interface{}) ([]sdk.VCSPullRequestEvent, error) {\n\treturn nil, fmt.Errorf(\"Not implemented on Gitlab\")\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// TODO: fail this rpc when killed\n\n\t// Your code here (2A, 2B).\n\tisGoodRequestVote := false\n\trf.mu.Lock()\n\n\tdefer func() {\n\t\tAssertF(reply.Term >= args.Term, \"reply.Term {%d} >= args.Term {%d}\", reply.Term, args.Term)\n\t\trf.mu.Unlock()\n\t\trf.resetElectionTimerIf(isGoodRequestVote)\n\t}()\n\n\tif args.Term < rf.currentTerm {\n\t\t*reply = RequestVoteReply{Term: rf.currentTerm, VoteGranted: false}\n\t\treturn\n\t}\n\n\tif args.Term > rf.currentTerm {\n\t\trf.transitionToFollower(args.Term, -1)\n\t}\n\n\tAssertF(args.Term == rf.currentTerm, \"\")\n\n\tif (rf.votedFor == -1 || rf.votedFor == args.CandidateId) && rf.isUptoDate(args.LastLogIndex, args.LastLogTerm) {\n\t\tisGoodRequestVote = true\n\t\trf.votedFor = args.CandidateId\n\t\t*reply = RequestVoteReply{Term: args.Term, VoteGranted: true}\n\t} else {\n\t\t*reply = RequestVoteReply{Term: args.Term, VoteGranted: false}\n\t}\n\n\trf.persist()\n}", "func GetCmdQueryVotes(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: oracle.QueryVotes,\n\t\tArgs: cobra.NoArgs,\n\t\tShort: \"Query outstanding oracle votes, filtered by denom and voter address.\",\n\t\tLong: strings.TrimSpace(`\nQuery outstanding oracle votes, filtered by denom and voter address.\n\n$ terracli query oracle votes --denom=\"uusd\" --validator=\"terravaloper...\"\n\nreturns oracle votes submitted by the validator for the denom uusd \n`),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\tdenom := viper.GetString(flagDenom)\n\n\t\t\t// Check voter address exists, then valids\n\t\t\tvar voterAddress sdk.ValAddress\n\n\t\t\tbechVoterAddr := viper.GetString(flagValidator)\n\t\t\tif len(bechVoterAddr) != 0 {\n\t\t\t\tvar err error\n\n\t\t\t\tvoterAddress, err = sdk.ValAddressFromBech32(bechVoterAddr)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tparams := oracle.NewQueryVotesParams(voterAddress, denom)\n\t\t\tbz, err := cdc.MarshalJSON(params)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tres, err := cliCtx.QueryWithData(fmt.Sprintf(\"custom/%s/%s\", queryRoute, oracle.QueryVotes), bz)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvar matchingVotes oracle.QueryVotesResponse\n\t\t\tcdc.MustUnmarshalJSON(res, &matchingVotes)\n\n\t\t\treturn cliCtx.PrintOutput(matchingVotes)\n\t\t},\n\t}\n\n\tcmd.Flags().String(flagDenom, \"\", \"filter by votes matching the denom\")\n\tcmd.Flags().String(flagValidator, \"\", \"(optional) filter by votes by validator\")\n\n\tcmd.MarkFlagRequired(flagDenom)\n\n\treturn cmd\n}", "func (rf *Raft) createRequestVoteArgs() *RequestVoteArgs {\n\treturn &RequestVoteArgs{\n\t\tTerm: rf.currentTerm,\n\t\tCandidateID: rf.me,\n\t\tLastLogIndex: rf.lastLogEntryIndex(),\n\t\tLastLogTerm: rf.lastLogEntryTerm(),\n\t}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// follow the second rule in \"Rules for Servers\" in figure 2 before handling an incoming RPC\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.state = FOLLOWER\n\t\trf.votedFor = -1\n\t\trf.persist()\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = true\n\t// deny vote if already voted\n\tif rf.votedFor != -1 {\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// deny vote if consistency check fails (candidate is less up-to-date)\n\tlastLog := rf.log[len(rf.log)-1]\n\tif args.LastLogTerm < lastLog.Term || (args.LastLogTerm == lastLog.Term && args.LastLogIndex < len(rf.log)-1) {\n\t\treply.VoteGranted = false\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\t// now this peer must vote for the candidate\n\trf.votedFor = args.CandidateID\n\trf.mu.Unlock()\n\n\trf.resetTimer()\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\tswitch {\n\tcase args.Term < rf.currentTerm:\n\t\treply.VoteGranted = false\n\t\treturn\n\tcase args.Term > rf.currentTerm:\n\t\trf.setTerm(args.Term) // only reset term (and votedFor) if rf is behind\n\t}\n\n\treply.Term = rf.currentTerm\n\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidateId && rf.AtLeastAsUpToDate(args) {\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.CandidateId\n\t} else {\n\t\treply.VoteGranted = false\n\t}\n\n\t// TODO move me somewhere else\n\tif reply.VoteGranted {\n\t\trf.requestVoteCh <- struct{}{}\n\t}\n}", "func GetCmdQueryVotes(cdc *codec.Codec) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"query-votes\",\n\t\tShort: \"Query votes on a proposal\",\n\t\tExample: \"iriscli gov query-votes --proposal-id=1\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\t\t\tproposalID := uint64(viper.GetInt64(flagProposalID))\n\n\t\t\tparams := gov.QueryVotesParams{\n\t\t\t\tProposalID: proposalID,\n\t\t\t}\n\t\t\tbz, err := cdc.MarshalJSON(params)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tres, err := cliCtx.QueryWithData(fmt.Sprintf(\"custom/%s/votes\", protocol.GovRoute), bz)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvar votes gov.Votes\n\t\t\tif err := cdc.UnmarshalJSON(res, &votes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn cliCtx.PrintOutput(votes)\n\t\t},\n\t}\n\n\tcmd.Flags().String(flagProposalID, \"\", \"proposalID of which proposal's votes are being queried\")\n\tcmd.MarkFlagRequired(flagProposalID)\n\treturn cmd\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\n\tif args.Term < rf.currentTerm {\n\t\t\treply.Term = rf.currentTerm\n\t\t\treply.VoteGranted = false\n\t\t\treturn\n\t\t}\n\tif args.Term > rf.currentTerm{\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\trf.role = 0\n\t\trf.roleChan <- 0\n\t\t}\n\treply.Term = args.Term\n\tfmt.Printf(\"LastLogTerm:%v rf.log:%v sever:%v \\n\", args.LastLogTerm, rf.log[len(rf.log)-1].Term, rf.me)\n\tif rf.votedFor != -1 && rf.votedFor != args.CandidateId {\n\t reply.VoteGranted = false \n\t }else if rf.log[len(rf.log)-1].Term > args.LastLogTerm{\n\t \treply.VoteGranted = false\n\t }else if rf.log[len(rf.log)-1].Index > args.LastLogIndex && rf.log[len(rf.log)-1].Term == args.LastLogTerm{\n\t \treply.VoteGranted = false\n\t }else{\n\t fmt.Printf(\"Server %v vote for server %v \\n\", rf.me, args.CandidateId)\n\t reply.VoteGranted = true\n\t rf.votedFor = args.CandidateId\n\t rf.GrantVote <- true\n\t }\n\n\t}", "func (r *Raft) sendVoteRequestRpc(value ServerConfig, VoteCount chan int) error {\n\n\tclient, err := rpc.Dial(\"tcp\", \"localhost:\"+strconv.Itoa(value.LogPort))\n\tlog.Println(\"Dialing vote request rpc from:\",r.Id,\" to:\",value.Id)\n\n\t if err != nil {\n\t\tlog.Print(\"Error Dialing sendVoteRequestRpc:\", err)\n\t\tVoteCount<-0\n\t\treturn err\n\t }\n\n\t logLen:= len(r.Log)\n\t var lastLogIndex int\n\t var lastLogTerm int\n\n\t if logLen >0 { // if log is not empty, send index and term of last log\n\t \tlastLogIndex=logLen-1\t \t\n\t \tlastLogTerm = r.Log[lastLogIndex].Term\n\t } else { // if log is empty, send index and term as 0\n\t \tlastLogIndex=0\n\t \tlastLogTerm=0\n\t }\n\n\t // Prepare argumenst to be sent to follower\n\t args:= RequestVoteRPCArgs{r.CurrentTerm,r.Id,lastLogTerm,lastLogIndex,}\n\n\tvar reply bool // reply variable will reciece the vote from other server, true is voted, false otherwise\n\tdefer client.Close()\n\terr1 := client.Call(\"RPC.VoteForLeader\", &args, &reply) \n\n\tif err1 != nil {\n\t\tlog.Print(\"Remote Method Invocation Error:Vote Request:\", err1)\n\t}\n\tif(reply) { // if reply is positive infrom the candiate \n\t\t//fmt.Println(\"Received reply of vote request from:\",value.Id,\" for:\",r.Id)\n\t\tVoteCount <-1\t\n\t}else{\n\t\tVoteCount <-0 // if reply is negative infrom the candiate \n\t\t//fmt.Println(\"Received Negative reply of vote request from:\",value.Id,\" for:\",r.Id)\n\t}\n\treturn nil\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\n\n\t//fmt.Printf(\"成功调用RequestVote!\\n\")\n\t// Your code here (2A, 2B).\n\t//rf.mu.Lock()\n\t//current_time:=time.Now().UnixNano()/1e6\n\t//&&current_time-rf.voted_time>800\n\trf.mu.Lock()\n\n\tif (rf.term>args.Candidate_term)&&((args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth)){\n\t\trf.term=args.Candidate_term\n\t\trf.state=0\n\t}\n\n\n\t/*\n\t\tif args.Append==true&&((args.Newest_log.Log_Term<rf.Last_log_term)||(args.Newest_log.Log_Term==rf.Last_log_term&&args.Last_log_term_lenth<rf.Last_log_term)){\n\t\t\treply.Term=args.Candidate_term+1\n\t\t\treply.Last_log_term=rf.Last_log_term\n\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\treply.Append_success=false\n\t\t\trf.mu.Unlock()\n\t\t\treturn\n\t\t}\n\t*/\n\t//if args.Second==true{\n\t//\tfmt.Printf(\"!\\n!\\n!\\n!\\n!\\n编号为%d的raft实例收到编号为%d的leader的second请求!本机term是%d,leader term是%d,args.Append是%v\\n\",rf.me,args.From,rf.term,args.Candidate_term,args.Append)\n\t//}\n\n\tif rf.state==2&&((rf.term<args.Candidate_term)||(rf.term==args.Candidate_term&&args.Last_log_term<rf.Last_log_term))&&args.Votemsg==false{\n\t\t//fmt.Printf(\"分区恢复后编号为%d的raft实例的term是%d,发现自己已经不是leader!leader是%d,leader的term是%d\\n\",rf.me,rf.term,args.From,args.Candidate_term)\n\t\trf.state=0\n\t\trf.leaderID=args.From\n\t}\n\n\n\n\tif args.Candidate_term>=rf.term{\n\t\t//rf.term=args.Candidate_term\n\t\t//if args.Second==true{\n\t\t//\tfmt.Printf(\"服务器上的SECOND进入第一个大括号\\n\")\n\t\t//}\n\t\tif args.Append == false {\n\t\t\tif args.Votemsg == true && rf.voted[args.Candidate_term] == 0&&((args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth)) { //合法投票请求\n\t\t\t\t//fmt.Printf(\"编号为%d的raft实例对投票请求的回答为true,term统一更新为为%d\\n\",rf.me,rf.term)\n\n\t\t\t\t//rf.term = args.Candidate_term\n\t\t\t\trf.voted[args.Candidate_term] = 1\n\t\t\t\treply.Vote_sent = true\n\n\t\t\t\t//rf.voted_time=time.Now().UnixNano()/1e6\n\n\t\t\t}else if args.Votemsg==true{ //合法的纯heartbeat\n\t\t\t\tif rf.voted[args.Candidate_term]==1 {\n\t\t\t\t\treply.Voted = true\n\t\t\t\t}\n\t\t\t\t//fmt.Printf(\"请求方的term是%d,本机的term是%d,来自%d的投票请求被%d拒绝!rf.last_log_term是%d,rf.last_log_lenth是%d,本机的rf.last_log_term是%d,rf.last_log_lenth是%d\\n\",args.Candidate_term,rf.term,args.From,rf.me,args.Last_log_term,args.Last_log_term_lenth,rf.Last_log_term,rf.last_term_log_lenth)\n\t\t\t}\n\t\t\treply.Term=rf.term\n\n\t\t\t//rf.term=args.Candidate_term//!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\t\t\t//if args.Votemsg==true{//!!!!!!!!!!!!!!\n\t\t\t//\trf.term=args.Candidate_term//!!!!!!!!!!!!\n\t\t\t//}//!!!!!!!!!!!!!!!!!\n\n\t\t} else { //这条是关于日志的\n\t\t\t//这个请求是日志同步请求,接收方需要将自己的日志最后一条和leader发过来的声称的进行比较,如果leader的更新且leader的PREV和自己的LAST相同就接受\n\t\t\t//还得找到最后一个一致的日志位置,然后将后面的全部更新为和leader一致的,这意味着中间多次的RPC通信\n\n\t\t\t/*\n\t\t\tif args.Newest_log.Log_Term<rf.Last_log_term{\n\t\t\t\treply.Wrong_leader=true\n\t\t\t\treply.Term=rf.term\n\t\t\t\treply.Append_success=false\n\t\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\t\treturn\n\t\t\t}\n*/\n\n\t\t\tif (rf.Last_log_term>args.Last_log_term)||(rf.Last_log_term==args.Last_log_term&&rf.last_term_log_lenth>args.Last_log_term_lenth){\n\t\t\t\treply.Append_success=false\n\t\t\t\treply.Last_log_term=rf.Last_log_term\n\t\t\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\t\t\trf.mu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\n\n\t\t\trf.term=args.Candidate_term\n\t\t\tif args.Second==true{\n\t\t\t\t//\tfmt.Printf(\"在服务器端进入second阶段!\\n\")\n\t\t\t\trf.log=rf.log[:args.Second_position]\n\t\t\t\trf.log=append(rf.log,args.Second_log...)\n\t\t\t\treply.Append_success=true\n\t\t\t\trf.Last_log_term=args.Last_log_term\n\t\t\t\trf.last_term_log_lenth=args.Last_log_term_lenth\n\t\t\t\trf.Last_log_index=len(rf.log)-1\n\t\t\t\trf.Log_Term=args.Log_Term\n\t\t\t\t//fmt.Printf(\"Second APPend在服务器端成功!现在编号为%d的raft实例的log是%v, last_log_term是%d,term是%d\\n\",rf.me,rf.log,rf.Last_log_term,rf.term)\n\t\t\t}else{\n\t\t\t\tif args.Append_Try == false {//try用于表示是否是第一次append失败了现在正在沟通\n\t\t\t\t\trf.append_try_log_index = rf.Last_log_index\n\t\t\t\t\trf.append_try_log_term=rf.Last_log_term\n\t\t\t\t}\n\t\t\t\tif args.Prev_log_index != rf.append_try_log_index || args.Prev_log_term != rf.append_try_log_term{\n\t\t\t\t\t//fmt.Printf(\"匹配失败!!!%d号leader发过来的PREV_log_index是%d,本机%d的last_log_index是%d,PREV_term是%d,本机的last_log_term是%d!\\n\",args.From,args.Prev_log_index,rf.me,rf.append_try_log_index,args.Prev_log_term,rf.append_try_log_term)\n\t\t\t\t\treply.Vote_sent = false//匹配失败后进入双方沟通try\n\t\t\t\t\treply.Append_success = false\n\n\t\t\t\t\treply.Log_Term=rf.Log_Term\n\n\t\t\t\t\trf.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t} else { //说明没问题。可以更新\n\t\t\t\t\t//fmt.Printf(\"匹配成功!!!%d号是leader,发过来的PREV_log_index是%d,本机的last_log_index是%d,PREV_term是%d,本机的last_log_term是%d,准备更新本机日志!!\\n\", args.From, args.Prev_log_index, rf.append_try_log_index, args.Prev_log_term, rf.append_try_log_term)\n\t\t\t\t\t//rf.Last_log_term = args.Last_log_term\n\t\t\t\t\trf.last_term_log_lenth=args.Last_log_term_lenth\n\t\t\t\t\trf.log = append(rf.log, args.Newest_log)\n\t\t\t\t\trf.Last_log_index += 1\n\t\t\t\t\trf.Log_Term = args.Log_Term\n\t\t\t\t\trf.Last_log_term=args.Newest_log.Log_Term\n\t\t\t\t\treply.Append_success = true\n\t\t\t\t\t//fmt.Printf(\"APPend成功,现在编号为%d的raft实例的log是%v,last_log_term是%d,term是%d\\n\",rf.me,rf.log,rf.Last_log_term,rf.term)\n\t\t\t\t}\n\t\t\t}\n\t\t\trf.log_added_content = args.Newest_log\n\t\t\trf.last_term_log_lenth=0\n\n\t\t\tfor cc:=len(rf.log)-1;cc>-1;cc--{\n\t\t\t\tif rf.log[cc].Log_Term!=rf.Last_log_term{\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\trf.last_term_log_lenth+=1\n\t\t\t}\n\n\n\t\t}\n\n\t\t//fmt.Printf(\"在更新heartbeat之前\\n\")\n\t\tif args.Votemsg==false {//加上个约束条件更严谨,加上了表示是在heartbeat开始之后认同了这个是leader,否则在投票阶段就认同了\n\t\t\t//fmt.Printf(\"rf.last_log_term %d, args.last_log_term %d\\n\",rf.Last_log_term,args.Last_log_term)\n\t\t\tif args.Last_log_term==rf.Last_log_term {//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\t\t\t\tif args.Commit_MSG == true {\n\t\t\t\t\t//if len(rf.Log_Term)==len(args.Log_Term)&&rf.Log_Term[len(rf.Log_Term)-1]==args.Log_Term[len(args.Log_Term)-1]{\n\t\t\t\t\t//if len(args.Log_Term)==len(rf.Log_Term)&&args.Last_log_term==rf.Last_log_term {\n\t\t\t\t\tfor cc := rf.committed_index + 1; cc <= rf.Last_log_index; cc++ {\n\t\t\t\t\t\trf.committed_index = cc\n\t\t\t\t\t\t//!-------------------------fmt.Printf(\"在follower %d 上进行commit,commit_index是%d,commit的内容是%v,commit的term是%d,last_log_term是%d, rf.log是太长暂时鸽了\\n\", rf.me, cc, rf.log[cc].Log_Command, rf.log[cc].Log_Term, rf.Last_log_term)\n\t\t\t\t\t\trf.applych <- ApplyMsg{true, rf.log[rf.committed_index].Log_Command, rf.committed_index}\n\t\t\t\t\t}\n\n\t\t\t\t\treply.Commit_finished = true\n\t\t\t\t\t//}else{\n\t\t\t\t\t//}\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}//!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n\t\t\trf.leaderID = args.From\n\t\t\trf.term = args.Candidate_term\n\t\t\trf.leaderID=args.From\n\n\n\t\t}\n\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\treply.Last_log_term=rf.Last_log_term\n\n\t\tif args.Votemsg==false {\n\t\t\tif rf.state == 0 {\n\t\t\t\trf.last_heartbeat <- 1\n\t\t\t}\n\t\t}\n\n\t}else{\n\t\t//fmt.Printf(\"term都不符,明显是非法的!\\n\")\n\t\treply.Vote_sent = false\n\t\treply.Append_success = false\n\t\treply.Term=rf.term\n\t\treply.Last_log_lenth=rf.last_term_log_lenth\n\t\treply.Last_log_term=rf.Last_log_term\n\t\t//-------------------if (args.Last_log_term>rf.Last_log_term)||(args.Last_log_term==rf.Last_log_term&&args.Last_log_term_lenth>=rf.last_term_log_lenth){\n\t\t//----------------------\treply.You_are_true=true\n\t\t//------------------------}\n\t}\n\trf.mu.Unlock()\n\t//fmt.Printf(\"编号为%d的raft实例通过RequestVote()收到了heartbeat\\n\",rf.me)\n\t//reply.voted<-true\n\t//rf.mu.Unlock()\n}", "func (_Contract *ContractSession) GetVote(from common.Address, delegatedTo common.Address, proposalID *big.Int) (struct {\n\tWeight *big.Int\n\tChoices []*big.Int\n}, error) {\n\treturn _Contract.Contract.GetVote(&_Contract.CallOpts, from, delegatedTo, proposalID)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer DPrintf(\"%d received RequestVote from %d, args.Term : %d, args.LastLogIndex: %d, args.LastLogTerm: %d, rf.log: %v, rf.voteFor: %d, \" +\n\t\t\"reply: %v\", rf.me, args.CandidatedId, args.Term, args.LastLogIndex, args.LastLogTerm, rf.log, rf.voteFor, reply)\n\t// Your code here (2A, 2B).\n\trf.resetElectionTimer()\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\tlastLogIndex := rf.log[len(rf.log)-1].Index\n\tlastLogTerm := rf.log[lastLogIndex].Term\n\n\tif lastLogTerm > args.LastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex < lastLogIndex) {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t// 5.1 Reply false if term < currentTerm\n\tif args.Term < rf.currentTerm {\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\tif (args.Term == rf.currentTerm && rf.state == \"leader\") || (args.Term == rf.currentTerm && rf.voteFor != -1){\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\tif args.Term == rf.currentTerm && rf.voteFor == args.CandidatedId {\n\t\treply.VoteGranted = true\n\t\trf.mu.Unlock()\n\t\treturn\n\t}\n\n\t// Rules for Servers\n\t// All Servers\n\t// If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.voteFor = -1\n\t\trf.mu.Unlock()\n\t\trf.changeState(\"follower\")\n\t\trf.mu.Lock()\n\t}\n\n\trf.voteFor = args.CandidatedId\n\treply.VoteGranted = true\n\t//rf.persist()\n\trf.mu.Unlock()\n\treturn\n}", "func (_Contracts *ContractsCallerSession) AllVotes(arg0 *big.Int) (struct {\n\tProposalId *big.Int\n\tPositionId *big.Int\n\tCandidateId *big.Int\n\tVoterId *big.Int\n}, error) {\n\treturn _Contracts.Contract.AllVotes(&_Contracts.CallOpts, arg0)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.executeLock.Lock()\n\tdefer rf.executeLock.Unlock()\n\n\t//DPrintf(\"[ReceiveRequestVote] [me %v] from [peer %v] start\", rf.me, args.CandidateId)\n\trf.stateLock.Lock()\n\n\tdebugVoteArgs := &RequestVoteArgs{\n\t\tTerm: rf.currentTerm,\n\t\tCandidateId: rf.votedFor,\n\t\tLastLogIndex: int32(len(rf.log) - 1),\n\t\tLastLogTerm: rf.log[len(rf.log)-1].Term,\n\t}\n\tDPrintf(\"[ReceiveRequestVote] [me %#v] self info: %#v from [peer %#v] start\", rf.me, debugVoteArgs, args)\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\treply.LastLog = int32(len(rf.log) - 1)\n\treply.LastLogTerm = rf.log[reply.LastLog].Term\n\tif args.Term < rf.currentTerm {\n\t\tDPrintf(\"[ReceiveRequestVote] [me %v] from %v Term :%v <= currentTerm: %v, return\", rf.me, args.CandidateId, args.Term, rf.currentTerm)\n\t\trf.stateLock.Unlock()\n\t\treturn\n\t}\n\n\tconvrt2Follower := false\n\tif args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\tconvrt2Follower = true\n\t\trf.persist()\n\t}\n\n\tif rf.votedFor == -1 || rf.votedFor == args.CandidateId {\n\t\tlastLogIndex := int32(len(rf.log) - 1)\n\t\tlastLogTerm := rf.log[lastLogIndex].Term\n\n\t\tif args.LastLogTerm < lastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex < lastLogIndex) {\n\t\t\trf.votedFor = -1\n\t\t\trf.lastHeartbeat = time.Now()\n\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] index from [%v] is oldest, return\", rf.me, args.CandidateId)\n\n\t\t\tif convrt2Follower && rf.role != _Follower {\n\t\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] from %v Term :%v (non-follower) > currentTerm: %v, return\", rf.me, args.CandidateId, args.Term, rf.currentTerm)\n\t\t\t\trf.role = _Unknown\n\t\t\t\trf.stateLock.Unlock()\n\t\t\t\tselect {\n\t\t\t\tcase <-rf.closeCh:\n\t\t\t\tcase rf.roleCh <- _Follower:\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trf.stateLock.Unlock()\n\t\t\treturn\n\t\t}\n\n\t\trf.votedFor = args.CandidateId\n\t\t// [WARNING] 一旦授权,应该重置超时\n\t\trf.lastHeartbeat = time.Now()\n\t\treply.VoteGranted = true\n\t\tDPrintf(\"[ReceiveRequestVote] [me %v] granted vote for %v\", rf.me, args.CandidateId)\n\t\tif rf.role != _Follower {\n\t\t\tDPrintf(\"[ReceiveRequestVote] [me %v] become follower\", rf.me)\n\t\t\trf.role = _Unknown\n\t\t\trf.stateLock.Unlock()\n\t\t\tselect {\n\t\t\tcase <-rf.closeCh:\n\t\t\t\treturn\n\t\t\tcase rf.roleCh <- _Follower:\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\trf.stateLock.Unlock()\n\t\treturn\n\t}\n\tDPrintf(\"[ReceiveRequestVote] [me %v] have voted: %v, return\", rf.me, rf.votedFor)\n\trf.stateLock.Unlock()\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\n\treply.VoteGranted = false\n\treply.Term = rf.currentTerm\n\n\t// Rule for all servers: If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower (§5.1)\n\tif args.Term > rf.currentTerm {\n\t\trf.convertToFollower(args.Term)\n\t}\n\n\t// 1. Reply false if term < currentTerm (§5.1)\n\tif args.Term < rf.currentTerm {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Discarded Vote | Received Lower Term \"), rf.currentTerm, rf.me, args.CandidateID, args.CandidateID)\n\t\treturn\n\t}\n\n\t/* 2. If\n\t *\t\t1. votedFor is null or candidateId\n\t *\t\t2. candidate’s log is at least as up-to-date as receiver’s log\n\t *\tgrant vote (§5.2, §5.4)\n\t */\n\n\t// Check 1 vote: should be able to vote or voted for candidate\n\tvoteCheck := rf.votedFor == noVote || rf.votedFor == args.CandidateID\n\t// Check 2 up-to-date = (same indices OR candidate's lastLogIndex > current peer's lastLogIndex)\n\tlastLogIndex, lastLogTerm := rf.lastLogEntryIndex(), rf.lastLogEntryTerm()\n\tlogCheck := lastLogTerm < args.LastLogTerm || (lastLogTerm == args.LastLogTerm && lastLogIndex <= args.LastLogIndex)\n\n\t// Both checks should be true to grant vote\n\tif voteCheck && logCheck {\n\t\treply.VoteGranted = true\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Successful\"), rf.currentTerm, rf.me, args.CandidateID)\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = args.CandidateID\n\t} else if !voteCheck {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Failure | Already voted for %v\"), rf.currentTerm, rf.me, args.CandidateID, rf.votedFor)\n\t} else {\n\t\t_, _ = DPrintf(vote(\"[T%v] %v: Received RequestVote from %v | Vote Failure | No Up-To-Date Log | Received {LastLogTerm: %v, LastLogIndex: %v} | Current {LastLogTerm: %v, LastLogIndex: %v}\"),\n\t\t\trf.currentTerm, rf.me, args.CandidateID, args.LastLogTerm, args.LastLogIndex, lastLogTerm, lastLogIndex)\n\t}\n\trf.resetTTL()\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\n\t// Your code here (2A, 2B).\n\n\tDPrintf(\" before %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n\t//log.Printf(\" before %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\tlog.Printf(\" before %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n\n\tDPrintf(\" %v's requesetvote args is %v, and the reciever %v currentTerm is %v\", args.CandidateId, *args, rf.me, rf.currentTerm)\n\t//log.Printf(\" %v's requesetvote args is %v, and the reciever %v currentTerm is %v\", args.CandidateId, *args, rf.me, rf.currentTerm)\n\n\t// all servers\n\tif rf.currentTerm < args.Term {\n\t\trf.convertToFollower(args.Term)\n\t}\n\n\t_voteGranted := false\n\tif rf.currentTerm == args.Term && (rf.voteFor == VOTENULL || rf.voteFor == args.CandidateId) && (rf.getLastLogTerm() < args.LastLogTerm || (rf.getLastLogTerm() == args.LastLogTerm && rf.getLastLogIndex() <= args.LastLogIndex)) {\n\t\trf.state = Follower\n\t\tdropAndSet(rf.grantVoteCh)\n\t\t_voteGranted = true\n\t\trf.voteFor = args.CandidateId\n\t}\n\n\treply.VoteGranted = _voteGranted\n\treply.Term = rf.currentTerm\n\n\tDPrintf(\" after %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n\tlog.Printf(\" after %v 's request,%v 's votefor is %v\", args.CandidateId, rf.me, rf.voteFor)\n}", "func (_Contracts *ContractsCaller) AllVotes(opts *bind.CallOpts, arg0 *big.Int) (struct {\n\tProposalId *big.Int\n\tPositionId *big.Int\n\tCandidateId *big.Int\n\tVoterId *big.Int\n}, error) {\n\tret := new(struct {\n\t\tProposalId *big.Int\n\t\tPositionId *big.Int\n\t\tCandidateId *big.Int\n\t\tVoterId *big.Int\n\t})\n\tout := ret\n\terr := _Contracts.contract.Call(opts, out, \"allVotes\", arg0)\n\treturn *ret, err\n}", "func (*RequestVoteRequest) Descriptor() ([]byte, []int) {\n\treturn file_request_vote_request_proto_rawDescGZIP(), []int{0}\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tdefer rf.persist()\n\n\tif args.Term > rf.currentTerm {\n\t\trf.convert2Follower(args.Term)\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\n\tif args.Term < rf.currentTerm {\n\t\treturn\n\t}\n\n\tlastLogTerm := rf.getLastLogTerm()\n\tlastLogIndex := rf.getLastLogIndex()\n\t// voted-none && least-up-to-date\n\n\tup2Date := false\n\tif lastLogTerm < args.LastLogTerm {\n\t\tup2Date = true\n\t}\n\tif lastLogTerm == args.LastLogTerm && lastLogIndex <= args.LastLogIndex {\n\t\tup2Date = true\n\t}\n\n\tif up2Date && (rf.votedFor == -1 || rf.votedFor == args.CandidateId) {\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.CandidateId\n\t\t// DPrintf(\"Server [%v] vote [%v] for Term [%v]\", rf.me, args.CandidateId, rf.currentTerm)\n\t}\n}", "func (*CUserAccount_GetAvailableValveDiscountPromotions_Request) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_useraccount_steamclient_proto_rawDescGZIP(), []int{0}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\tif rf.currentTerm < args.Term {\n\t\trf.debug(\"Updating term to new term %v\\n\", args.Term)\n\t\trf.currentTerm = args.Term\n\t\tatomic.StoreInt32(&rf.state, FOLLOWER)\n\t\trf.votedFor = LEADER_UNKNOWN\n\t}\n\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\n\t// late candidates\n\tif args.Term < rf.currentTerm {\n\t\trf.debug(\"Rejecting candidate %v. Reason: late term=%v\\n\", args.CandidateId, args.Term)\n\t\treturn\n\t}\n\n\t// avoid double vote\n\tif rf.votedFor != LEADER_UNKNOWN && rf.votedFor != args.CandidateId {\n\t\trf.debug(\"Rejecting candidate %v. Reason: already voted\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\tlastLogIndex := rf.lastEntryIndex()\n\n\t// reject old logs\n\tif rf.index(lastLogIndex).Term > args.LastLogTerm {\n\t\trf.debug(\"Rejecting candidate %v. Reason: old log\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\t// log is smaller\n\tif rf.index(lastLogIndex).Term == args.LastLogTerm && args.LastLogIndex < lastLogIndex {\n\t\trf.debug(\"Rejecting candidate %v. Reason: small log\\n\", args.CandidateId)\n\t\treturn\n\t}\n\n\trf.votedFor = args.CandidateId\n\trf.gotContacted = true\n\n\trf.debug(\"Granting vote to %v. me=(%v,%v), candidate=(%v,%v)\\n\", args.CandidateId, lastLogIndex, rf.index(lastLogIndex).Term, args.LastLogIndex, args.LastLogTerm)\n\treply.VoteGranted = true\n\n\t// save state\n\trf.persist(false)\n}", "func Vote(w http.ResponseWriter, r *http.Request) {\n\tvar data voteRequest // Create struct to store data.\n\terr := json.NewDecoder(r.Body).Decode(&data) // Decode response to struct.\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\thelpers.ThrowErr(w, r, \"JSON decoding error\", err)\n\t\treturn\n\t}\n\n\t// Secure our request with reCAPTCHA v2 and v3.\n\tif !captcha.V3(data.CaptchaV2, data.Captcha, r.Header.Get(\"CF-Connecting-IP\"), \"vote\") {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(r)\n\n\tpost, err := db.GetPost(vars[\"uuid\"])\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\thelpers.ThrowErr(w, r, \"Getting post from DB error\", err)\n\t\treturn\n\t}\n\n\tif post.Creation == 0 {\n\t\t// Post has been deleted.\n\t\tw.WriteHeader(http.StatusGone)\n\t\treturn\n\t}\n\n\tscore, err := db.SetVote(post, context.Get(r, \"uuid\").(string), data.Upvote)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\thelpers.ThrowErr(w, r, \"Setting vote error\", err)\n\t\treturn\n\t}\n\n\thelpers.JSONResponse(voteResponse{\n\t\tScore: score,\n\t}, w)\n\treturn\n}", "func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here.\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.state == Follower {\n\t\trf.ResetHeartBeatTimer()\n\t}\n\n\t// term in candidate old than this follower\n\tif args.Term < rf.currentTerm {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tif args.Term > rf.currentTerm {\n\t\trf.UpdateNewTerm(args.Term)\n\t\trf.stateCh <- Follower\n\t}\n\n\tlogIndexSelf := len(rf.log) - 1\n\n\tvar isNew bool\n\t// the term is equal check the index\n\tif args.LastLogTerm == rf.log[logIndexSelf].Term {\n\t\tisNew = args.LastLogIndex >= logIndexSelf\n\t} else {\n\t\tisNew = args.LastLogTerm > rf.log[logIndexSelf].Term\n\t}\n\n\tif (rf.votedFor == -1 || rf.me == args.CandidateId) && isNew {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = true\n\t\trf.votedFor = args.CandidateId\n\t\trf.persist()\n\t\treturn\n\t} else {\n\t\treply.Term = rf.currentTerm\n\t\treply.VoteGranted = false\n\t}\n\n}", "func (r *ApprovalWorkflowProviderRequestsAwaitingMyDecisionCollectionRequest) Get(ctx context.Context) ([]RequestObject, error) {\n\treturn r.GetN(ctx, 0)\n}", "func (s *PullRequestsService) RequestReviewers(ctx context.Context, owner, repo string, number int, logins []string) (*PullRequest, *Response, error) {\n\tu := fmt.Sprintf(\"repos/%s/%s/pulls/%d/requested_reviewers\", owner, repo, number)\n\n\treviewers := struct {\n\t\tReviewers []string `json:\"reviewers,omitempty\"`\n\t}{\n\t\tReviewers: logins,\n\t}\n\treq, err := s.client.NewRequest(\"POST\", u, &reviewers)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tr := new(PullRequest)\n\tresp, err := s.client.Do(ctx, req, r)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn r, resp, nil\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n DPrintf(\"%d: %d recieve RequestVote from %d:%d\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate)\n // Your code here (2A, 2B).\n rf.mu.Lock()\n defer rf.mu.Unlock()\n if args.Term < rf.currentTerm {\n \n reply.VoteGranted = false\n reply.Term = rf.currentTerm\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n\n if args.Term > rf.currentTerm {\n rf.votedFor = -1\n rf.currentTerm = args.Term\n }\n\n if rf.votedFor == -1 || rf.votedFor == args.Candidate {\n // election restriction\n if args.LastLogTerm < rf.log[len(rf.log) - 1].Term ||\n (args.LastLogTerm == rf.log[len(rf.log) - 1].Term &&\n args.LastLogIndex < len(rf.log) - 1) {\n rf.votedFor = -1\n reply.VoteGranted = false\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n\n \n if rf.state == FOLLOWER {\n rf.heartbeat <- true\n }\n rf.state = FOLLOWER\n rf.resetTimeout()\n rf.votedFor = args.Candidate\n\n \n reply.VoteGranted = true\n reply.Term = args.Term\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n return\n }\n reply.VoteGranted = false\n reply.Term = args.Term\n DPrintf(\"%d: %d recieve voteRequest from %d:%d %v\\n\", rf.currentTerm, rf.me, args.Term, args.Candidate, reply.VoteGranted)\n}", "func (*VoteRequest) Descriptor() ([]byte, []int) {\n\treturn file_internal_proto_crypto_proto_rawDescGZIP(), []int{1}\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\trf.lock()\n\tdefer rf.unLock()\n\treply.Term = rf.currentTerm\n\treply.VoteGranted = false\n\tif args.Term < rf.currentTerm {\n\t\treturn\n\t} else if args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\trf.votedFor = -1\n\t\trf.myState = FollowerState\n\t\trf.persist()\n\t}\n\n\tif rf.votedFor < 0 || rf.votedFor == args.CandidateId {\n\t\t// candidate's logEntries is at least as up-to-date as receiver's logEntries, grant vote\n\t\tlastLogTerm := -1\n\t\tif len(rf.logEntries) != 0 {\n\t\t\tlastLogTerm = rf.logEntries[len(rf.logEntries)-1].Term\n\t\t} else {\n\t\t\tlastLogTerm = rf.lastIncludedTerm\n\t\t}\n\t\tif args.LastLogTerm < lastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex < rf.lastIncludedIndex+len(rf.logEntries)) {\n\t\t\treturn\n\t\t} else {\n\t\t\trf.votedFor = args.CandidateId\n\t\t\treply.VoteGranted = true\n\t\t\trf.timerReset = time.Now()\n\t\t\trf.persist()\n\t\t\treturn\n\t\t}\n\t}\n\t// Your code here (2A, 2B).\n}", "func (c *RepoAPI) VoteProposal(body *api.BodyRepoVote) (*api.ResultHash, error) {\n\n\tif body.SigningKey == nil {\n\t\treturn nil, errors.ReqErr(400, ErrCodeBadParam, \"signingKey\", \"signing key is required\")\n\t}\n\n\ttx := txns.NewBareRepoProposalVote()\n\ttx.RepoName = body.RepoName\n\ttx.ProposalID = body.ProposalID\n\ttx.Vote = body.Vote\n\ttx.Nonce = body.Nonce\n\ttx.Fee = util.String(cast.ToString(body.Fee))\n\ttx.Timestamp = time.Now().Unix()\n\ttx.SenderPubKey = body.SigningKey.PubKey().ToPublicKey()\n\n\tvar err error\n\ttx.Sig, err = tx.Sign(body.SigningKey.PrivKey().Base58())\n\tif err != nil {\n\t\treturn nil, errors.ReqErr(400, ErrCodeClient, \"privkey\", err.Error())\n\t}\n\n\tresp, statusCode, err := c.c.call(\"repo_vote\", tx.ToMap())\n\tif err != nil {\n\t\treturn nil, makeReqErrFromCallErr(statusCode, err)\n\t}\n\n\tvar r api.ResultHash\n\tif err = util.DecodeMap(resp, &r); err != nil {\n\t\treturn nil, errors.ReqErr(500, ErrCodeDecodeFailed, \"\", err.Error())\n\t}\n\n\treturn &r, nil\n}", "func vote(w http.ResponseWriter, req *http.Request, upvote bool, isComment bool, t VoteRequest) error {\n\tif t.Id == \"\" {\n\t\treturn fmt.Errorf(\"missing post id\")\n\t}\n\ttable := \"posts\"\n\tif isComment {\n\t\ttable = \"comments\"\n\t}\n\trsp, err := client.DbService.Read(&db.ReadRequest{\n\t\tTable: table,\n\t\tId: t.Id,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(rsp.Records) == 0 {\n\t\treturn fmt.Errorf(\"post or comment not found\")\n\t}\n\n\t// auth\n\tsessionRsp, err := client.UserService.ReadSession(&user.ReadSessionRequest{\n\t\tSessionId: t.SessionID,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif sessionRsp.Session.UserId == \"\" {\n\t\treturn fmt.Errorf(\"user id not found\")\n\t}\n\n\t// prevent double votes\n\tcheckTable := table + \"votecheck\"\n\tcheckId := t.Id + sessionRsp.Session.UserId\n\tcheckRsp, err := client.DbService.Read(&db.ReadRequest{\n\t\tTable: checkTable,\n\t\tId: checkId,\n\t})\n\tmod := isMod(sessionRsp.Session.UserId, mods)\n\tif err == nil && (checkRsp != nil && len(checkRsp.Records) > 0) {\n\t\tif !mod {\n\t\t\treturn fmt.Errorf(\"already voted\")\n\t\t}\n\t}\n\tval := float64(1)\n\tif mod {\n\t\trand.Seed(time.Now().UnixNano())\n\t\tval = float64(rand.Intn(17-4) + 4)\n\t}\n\n\tif !mod {\n\t\t_, err = client.DbService.Create(&db.CreateRequest{\n\t\t\tTable: checkTable,\n\t\t\tRecord: map[string]interface{}{\n\t\t\t\t\"id\": checkId,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tobj := rsp.Records[0]\n\tkey := \"upvotes\"\n\tif !upvote {\n\t\tkey = \"downvotes\"\n\t}\n\n\tif _, ok := obj[\"upvotes\"].(float64); !ok {\n\t\tobj[\"upvotes\"] = float64(0)\n\t}\n\tif _, ok := obj[\"downvotes\"].(float64); !ok {\n\t\tobj[\"downvotes\"] = float64(0)\n\t}\n\n\tobj[key] = obj[key].(float64) + val\n\tobj[\"score\"] = obj[\"upvotes\"].(float64) - obj[\"downvotes\"].(float64)\n\n\t_, err = client.DbService.Update(&db.UpdateRequest{\n\t\tTable: table,\n\t\tId: t.Id,\n\t\tRecord: obj,\n\t})\n\treturn err\n}", "func (commit *Commit) VoteBlockRequestID() []byte {\n\trequestIDMessage := []byte(\"dpbvote\")\n\theightByteArray := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(heightByteArray, uint64(commit.Height))\n\troundByteArray := make([]byte, 4)\n\tbinary.LittleEndian.PutUint32(roundByteArray, uint32(commit.Round))\n\n\trequestIDMessage = append(requestIDMessage, heightByteArray...)\n\trequestIDMessage = append(requestIDMessage, roundByteArray...)\n\n\treturn crypto.Sha256(requestIDMessage)\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\n\t//fmt.Printf(\"Server %d: log is %v\\n\", rf.me, rf.log)\n\n\tvar newer bool\n\n\tif args.Term > rf.currentTerm {\n\t\trf.votedFor = -1\n\t}\n\n\tif len(rf.log) == 0 || args.LastLogTerm > rf.log[len(rf.log)-1].Term {\n\t\tnewer = true\n\t} else if args.LastLogTerm == rf.log[len(rf.log)-1].Term && len(rf.log) <= args.LastLogIndex+1 {\n\t\tnewer = true\n\t}\n\n\tif newer == true && (rf.votedFor == -1 || rf.votedFor == args.CandidateID) {\n\t\treply.VoteGranted = true\n\t} else {\n\t\treply.VoteGranted = false\n\t}\n\n\tvar votedFor int\n\tif reply.VoteGranted {\n\t\tvotedFor = args.CandidateID\n\t} else {\n\t\tvotedFor = -1\n\t}\n\trf.votedFor = votedFor\n\n\tif args.Term < rf.currentTerm {\n\t\treply.VoteGranted = false\n\t} else if args.Term > rf.currentTerm {\n\t\trf.currentTerm = args.Term\n\t\tif rf.state != Follower {\n\t\t\trf.convertToFollower(rf.currentTerm, votedFor)\n\t\t}\n\t}\n\n\treply.Term = rf.currentTerm\n\n\trf.persist()\n\n\tif reply.VoteGranted == true {\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-rf.grantVoteCh:\n\t\t\tdefault:\n\t\t\t}\n\t\t\trf.grantVoteCh <- true\n\t\t}()\n\t}\n}", "func revisionFromRequest(recent types.FileContractRevision, pbcr modules.PayByContractRequest) types.FileContractRevision {\n\trev := recent\n\n\trev.NewRevisionNumber = pbcr.NewRevisionNumber\n\trev.NewValidProofOutputs = make([]types.SiacoinOutput, len(pbcr.NewValidProofValues))\n\tfor i, v := range pbcr.NewValidProofValues {\n\t\tif i >= len(recent.NewValidProofOutputs) {\n\t\t\tbreak\n\t\t}\n\t\trev.NewValidProofOutputs[i] = types.SiacoinOutput{\n\t\t\tValue: v,\n\t\t\tUnlockHash: recent.NewValidProofOutputs[i].UnlockHash,\n\t\t}\n\t}\n\n\trev.NewMissedProofOutputs = make([]types.SiacoinOutput, len(pbcr.NewMissedProofValues))\n\tfor i, v := range pbcr.NewMissedProofValues {\n\t\tif i >= len(recent.NewMissedProofOutputs) {\n\t\t\tbreak\n\t\t}\n\t\trev.NewMissedProofOutputs[i] = types.SiacoinOutput{\n\t\t\tValue: v,\n\t\t\tUnlockHash: recent.NewMissedProofOutputs[i].UnlockHash,\n\t\t}\n\t}\n\n\treturn rev\n}", "func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply, voteCount *int32) bool {\n\tok := rf.peers[server].Call(\"Raft.RequestVote\", args, reply)\n\tlog.Printf(\"peer %v request vote to peer %v result %v\", rf.peerId, reply.VoterId, reply)\n\tif !ok {\n\t\treturn ok\n\t}\n\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\tif rf.getState() != Candidate || args.Term != rf.currentTerm {\n\t\treturn ok\n\t}\n\tif reply.Term > rf.currentTerm {\n\t\trf.stepDownToFollower(reply.Term)\n\t}\n\tif reply.VoteGranted {\n\t\tatomic.AddInt32(voteCount, 1)\n\t}\n\tif int(atomic.LoadInt32(voteCount)) > len(rf.peers)/2 {\n\t\trf.setState(Leader)\n\t\trf.electAsLeaderCh <- true\n\t}\n\treturn ok\n}", "func (keeper Keeper) GetVotes(ctx sdk.Context, proposalID uint64) (votes types.Votes) {\n\tkeeper.IterateVotes(ctx, proposalID, func(vote types.Vote) bool {\n\t\tvotes = append(votes, vote)\n\t\treturn false\n\t})\n\treturn\n}", "func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {\n\t// Your code here (2A, 2B).\n\tDPrintf(\"Raft node (%d) handles with RequestVote, candidateId: %v\\n\", rf.me, args.CandidateId)\n\trf.mu.Lock()\n\tdefer rf.mu.Unlock()\n\n\treply.Term = rf.currentTerm\n\treply.PeerId = rf.me\n\n\tif rf.currentTerm == args.Term && rf.votedFor != -1 && rf.votedFor != args.CandidateId {\n\t\tDPrintf(\"Raft node (%v) denied vote, votedFor: %v, candidateId: %v.\\n\", rf.me,\n\t\t\trf.votedFor, args.CandidateId)\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\tlastLogIndex := len(rf.logs) - 1\n\tlastLogEntry := rf.logs[lastLogIndex]\n\tif lastLogEntry.Term > args.LastLogTerm || lastLogIndex > args.LastLogIndex {\n\t\t// If this node is more up-to-date than candidate, then reject vote\n\t\t//DPrintf(\"Raft node (%v) LastLogIndex: %v, LastLogTerm: %v, args (%v, %v)\\n\", rf.me,\n\t\t//\tlastLogIndex, lastLogEntry.Term, args.LastLogIndex, args.LastLogTerm)\n\t\treply.VoteGranted = false\n\t\treturn\n\t}\n\n\trf.tryEnterFollowState(args.Term)\n\n\trf.currentTerm = args.Term\n\trf.votedFor = args.CandidateId\n\treply.VoteGranted = true\n}" ]
[ "0.61757547", "0.6035921", "0.55281144", "0.5522556", "0.5502034", "0.5381857", "0.5226636", "0.51511484", "0.5136846", "0.5118304", "0.50843143", "0.5066021", "0.5065501", "0.50638837", "0.49484923", "0.49452785", "0.49406174", "0.4903563", "0.48684135", "0.48664993", "0.4847217", "0.48467264", "0.4831342", "0.4828591", "0.480906", "0.48023817", "0.47939476", "0.47804308", "0.47768643", "0.47753012", "0.47674584", "0.47615245", "0.4754029", "0.4747477", "0.4720985", "0.4713356", "0.47130835", "0.47129223", "0.46952382", "0.4693013", "0.46927628", "0.4684932", "0.46797743", "0.4668367", "0.4667689", "0.4653999", "0.46521637", "0.46504027", "0.46496215", "0.46457985", "0.4638288", "0.46163607", "0.46157253", "0.46091697", "0.45999536", "0.459412", "0.4586752", "0.45862198", "0.45751578", "0.45709395", "0.45701525", "0.4566862", "0.4562645", "0.4559903", "0.45585206", "0.45517704", "0.45489812", "0.45478618", "0.45359105", "0.45341852", "0.45084196", "0.4505923", "0.4502941", "0.45018262", "0.4501159", "0.4498306", "0.44977093", "0.44928256", "0.44832876", "0.44832322", "0.4473499", "0.44729653", "0.44680732", "0.44612536", "0.44552764", "0.44506845", "0.4449672", "0.4445938", "0.44399658", "0.44380847", "0.44315034", "0.44278756", "0.44266808", "0.44148165", "0.44077054", "0.4406762", "0.44053656", "0.4399242", "0.43938738", "0.43867618" ]
0.8009276
0
GetCommitteeRequestsList api request get_committee_requests_list
func (api *API) GetCommitteeRequestsList(status uint16) ([]*uint16, error) { var resp []*uint16 err := api.call("committee_api", "get_committee_requests_list", []interface{}{status}, &resp) return resp, err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (api *API) GetCommitteeRequest(id uint32, count int32) (*CommitteeObject, error) {\n\tvar resp CommitteeObject\n\terr := api.call(\"committee_api\", \"get_committee_request\", []interface{}{id, count}, &resp)\n\treturn &resp, err\n}", "func (api *API) GetCommitteeRequestVotes(id uint32) ([]*CommitteeVoteState, error) {\n\tvar resp []*CommitteeVoteState\n\terr := api.call(\"committee_api\", \"get_committee_request_votes\", []interface{}{id}, &resp)\n\treturn resp, err\n}", "func (c *Client) ListPullRequestCommits(owner, repo string, index int64, opt ListPullRequestCommitsOptions) ([]*Commit, *Response, error) {\n\tif err := escapeValidatePathSegments(&owner, &repo); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tlink, _ := url.Parse(fmt.Sprintf(\"/repos/%s/%s/pulls/%d/commits\", owner, repo, index))\n\topt.setDefaults()\n\tcommits := make([]*Commit, 0, opt.PageSize)\n\tlink.RawQuery = opt.getURLQuery().Encode()\n\tresp, err := c.getParsedResponse(\"GET\", link.String(), nil, nil, &commits)\n\treturn commits, resp, err\n}", "func (r *ApprovalWorkflowProviderRequestsCollectionRequest) Get(ctx context.Context) ([]RequestObject, error) {\n\treturn r.GetN(ctx, 0)\n}", "func (client IdentityClient) listWorkRequests(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/workRequests\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListWorkRequestsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client BastionClient) listWorkRequests(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/workRequests\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListWorkRequestsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (api MWSAPI) GetReportRequestList(params map[string]string) (string, error) {\n\t// params := make(map[string]string)\n\tparams[\"MarketplaceId\"] = string(api.MarketplaceID)\n\treturn api.genSignAndFetch(\"GetReportRequestList\", reportAPI, params)\n}", "func (r *ScheduleOpenShiftChangeRequestsCollectionRequest) Get(ctx context.Context) ([]OpenShiftChangeRequestObject, error) {\n\treturn r.GetN(ctx, 0)\n}", "func (c APIClient) ListCommit(repoName string, to string, from string, number uint64) ([]*pfs.CommitInfo, error) {\n\tvar result []*pfs.CommitInfo\n\tif err := c.ListCommitF(repoName, to, from, number, func(ci *pfs.CommitInfo) error {\n\t\tresult = append(result, ci)\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}", "func (client IdentityClient) ListWorkRequests(ctx context.Context, request ListWorkRequestsRequest) (response ListWorkRequestsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listWorkRequests, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListWorkRequestsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListWorkRequestsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListWorkRequestsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListWorkRequestsResponse\")\n\t}\n\treturn\n}", "func (client IdentityClient) listTaggingWorkRequests(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/taggingWorkRequests\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListTaggingWorkRequestsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client BastionClient) listWorkRequestLogs(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/workRequests/{workRequestId}/logs\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListWorkRequestLogsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client BastionClient) ListWorkRequests(ctx context.Context, request ListWorkRequestsRequest) (response ListWorkRequestsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listWorkRequests, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListWorkRequestsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListWorkRequestsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListWorkRequestsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListWorkRequestsResponse\")\n\t}\n\treturn\n}", "func (store *Engine) Requests(offset, limit, filter string) ([]Request, error) {\n\tresult := requestsResult{}\n\tfilters := Params{\n\t\tOffset: offset,\n\t\tLimit: limit,\n\t\tFilter: filter,\n\t}\n\n\t_, err := store.api.\n\t\tURL(\"/workflow-engine/api/v1/requests\").\n\t\tQuery(&filters).\n\t\tGet(&result)\n\n\treturn result.Items, err\n}", "func (g *Gatherer) listCommits(branch, start, end string) ([]*gogithub.RepositoryCommit, error) {\n\tstartCommit, _, err := g.client.GetCommit(g.context, g.options.GithubOrg, g.options.GithubRepo, start)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"retrieve start commit: %w\", err)\n\t}\n\n\tendCommit, _, err := g.client.GetCommit(g.context, g.options.GithubOrg, g.options.GithubRepo, end)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"retrieve end commit: %w\", err)\n\t}\n\n\tallCommits := &commitList{}\n\n\tworker := func(clo *gogithub.CommitsListOptions) (\n\t\tcommits []*gogithub.RepositoryCommit, resp *gogithub.Response, err error,\n\t) {\n\t\tfor {\n\t\t\tcommits, resp, err = g.client.ListCommits(g.context, g.options.GithubOrg, g.options.GithubRepo, clo)\n\t\t\tif err != nil {\n\t\t\t\tif !canWaitAndRetry(resp, err) {\n\t\t\t\t\treturn nil, nil, err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treturn commits, resp, err\n\t}\n\n\tclo := gogithub.CommitsListOptions{\n\t\tSHA: branch,\n\t\tSince: startCommit.GetCommitter().GetDate().Time,\n\t\tUntil: endCommit.GetCommitter().GetDate().Time,\n\t\tListOptions: gogithub.ListOptions{\n\t\t\tPage: 1,\n\t\t\tPerPage: 100,\n\t\t},\n\t}\n\n\tcommits, resp, err := worker(&clo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tallCommits.Add(commits)\n\n\tremainingPages := resp.LastPage - 1\n\tif remainingPages < 1 {\n\t\treturn allCommits.List(), nil\n\t}\n\n\tt := throttler.New(maxParallelRequests, remainingPages)\n\tfor page := 2; page <= resp.LastPage; page++ {\n\t\tclo := clo\n\t\tclo.ListOptions.Page = page\n\n\t\tgo func() {\n\t\t\tcommits, _, err := worker(&clo)\n\t\t\tif err == nil {\n\t\t\t\tallCommits.Add(commits)\n\t\t\t}\n\t\t\tt.Done(err)\n\t\t}()\n\n\t\t// abort all, if we got one error\n\t\tif t.Throttle() > 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err := t.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn allCommits.List(), nil\n}", "func FetchRequestList(db *sql.DB, appID string, entriesPerPage, offset int) []SummarizedRequest {\n\tvar result []SummarizedRequest\n\n\trows, err := queryGetRequests(db, appID, entriesPerPage, offset)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\n\t\treturn result\n\t}\n\n\tif rows.Err() != nil {\n\t\tlog.Println(rows.Err().Error())\n\n\t\treturn result\n\t}\n\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar request SummarizedRequest\n\n\t\terr := rows.Scan(\n\t\t\t&request.UID,\n\t\t\t&request.Method,\n\t\t\t&request.Path,\n\t\t\t&request.Time,\n\t\t\t&request.ResponseStatus,\n\t\t)\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t\treturn result\n\t\t}\n\n\t\tresult = append(result, request)\n\t}\n\n\treturn result\n}", "func (p *PullRequestsService) ListCommits(owner, repoSlug string, pullRequestID int64, opts ...interface{}) (*Commits, *Response, error) {\n\tresult := new(Commits)\n\turlStr := p.client.requestURL(\"/repositories/%s/%s/pullrequests/%v/commits\", owner, repoSlug, pullRequestID)\n\turlStr, addOptErr := addQueryParams(urlStr, opts...)\n\tif addOptErr != nil {\n\t\treturn nil, nil, addOptErr\n\t}\n\n\tresponse, err := p.client.execute(\"GET\", urlStr, result, nil)\n\n\treturn result, response, err\n}", "func NewListRequest() *todopb.ListRequest {\n\tmessage := &todopb.ListRequest{}\n\treturn message\n}", "func (client BastionClient) ListWorkRequestLogs(ctx context.Context, request ListWorkRequestLogsRequest) (response ListWorkRequestLogsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listWorkRequestLogs, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListWorkRequestLogsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListWorkRequestLogsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListWorkRequestLogsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListWorkRequestLogsResponse\")\n\t}\n\treturn\n}", "func (a *RepoAPI) listMergeRequests(params interface{}) (resp *rpc.Response) {\n\tm := objx.New(cast.ToStringMap(params))\n\tname := m.Get(\"name\").Str()\n\treturn rpc.Success(util.Map{\n\t\t\"data\": a.mods.Repo.ListIssues(name),\n\t})\n}", "func MergeRequestListCmd(targetRepo pl.LocalRepo, args *MergeRequestListArgs) (pl.Posts, error) {\n\n\t// Get merge requests posts\n\tmergeReqs, err := args.PostGetter(targetRepo, func(ref plumbing.ReferenceName) bool {\n\t\treturn pl.IsMergeRequestReference(ref.String())\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get merge requests posts\")\n\t}\n\n\t// Sort by their first post time\n\tmergeReqs.SortByFirstPostCreationTimeDesc()\n\n\t// Reverse merge requests if requested\n\tif args.Reverse {\n\t\tmergeReqs.Reverse()\n\t}\n\n\t// Limited the merge requests if requested\n\tif args.Limit > 0 && args.Limit < len(mergeReqs) {\n\t\tmergeReqs = mergeReqs[:args.Limit]\n\t}\n\n\treturn mergeReqs, nil\n}", "func (server *RepositoriesService) ListPullRequests(ctx context.Context, project, repo string, opt *PullRequestListOpts) (*PullRequests, *http.Response, error) {\n\t// Detailed info: https://docs.atlassian.com/bitbucket-server/rest/6.5.1/bitbucket-rest.html#idp254\n\tu := fmt.Sprintf(\"rest/api/1.0/projects/%s/repos/%s/pull-requests\", project, repo)\n\treq, err := server.v1Client.NewRequest(http.MethodGet, u, nil, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar prs PullRequests\n\tresp, err := server.v1Client.Do(req, &prs)\n\treturn &prs, resp, err\n}", "func (client IdentityClient) listIamWorkRequests(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/iamWorkRequests\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListIamWorkRequestsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) {\n\tresp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath)\n\tif err != nil {\n\t\treturn nil, awserr.New(\"EC2RoleRequestError\", \"no EC2 instance role found\", err)\n\t}\n\n\tcredsList := []string{}\n\ts := bufio.NewScanner(strings.NewReader(resp))\n\tfor s.Scan() {\n\t\tcredsList = append(credsList, s.Text())\n\t}\n\n\tif err := s.Err(); err != nil {\n\t\treturn nil, awserr.New(request.ErrCodeSerialization,\n\t\t\t\"failed to read EC2 instance role from metadata service\", err)\n\t}\n\n\treturn credsList, nil\n}", "func ListRequest(baseURL, resourceType string) (*http.Request, error) {\n\tu, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error parsing URL: %v\", err)\n\t}\n\tsetPath(u, resourceType)\n\n\treturn NewRequest(\"GET\", u.String(), nil)\n}", "func GetRequestListing(tx *sql.Tx, options *RequestListingOptions) ([]*model.RequestSummary, error) {\n\n\t// Prepare the primary listing query as a subquery.\n\tsubquery := psql.Select().Distinct().\n\t\tColumn(\"r.id\").\n\t\tColumn(\"regexp_replace(u.username, '@.*', '') AS username\").\n\t\tColumn(\"rt.name AS request_type\").\n\t\tColumn(\"first(ru.created_date) OVER w AS created_date\").\n\t\tColumn(\"last(rsc.name) OVER w AS status\").\n\t\tColumn(\"last(rsc.display_name) OVER w AS display_status\").\n\t\tColumn(\"last(ru.created_date) OVER w AS updated_date\").\n\t\tColumn(\"CAST(r.details AS text) AS details\").\n\t\tFrom(\"requests r\").\n\t\tJoin(\"users u ON r.requesting_user_id = u.id\").\n\t\tJoin(\"request_types rt ON r.request_type_id = rt.id\").\n\t\tJoin(\"request_updates ru ON r.id = ru.request_id\").\n\t\tJoin(\"request_status_codes rsc ON ru.request_status_code_id = rsc.id\").\n\t\tSuffix(\"WINDOW w AS (\" +\n\t\t\t\"PARTITION BY ru.request_id \" +\n\t\t\t\"ORDER BY ru.created_date \" +\n\t\t\t\"RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)\")\n\n\t// Prepare the base query.\n\tbase := psql.Select().\n\t\tColumn(\"id\").\n\t\tColumn(\"username\").\n\t\tColumn(\"request_type\").\n\t\tColumn(\"created_date\").\n\t\tColumn(\"display_status\").\n\t\tColumn(\"updated_date\").\n\t\tColumn(\"details\").\n\t\tFromSelect(subquery, \"subquery\").\n\t\tOrderBy(\"created_date\")\n\n\t// Add the filter to omit completed requests if we're not supposed to include them in the listing.\n\tif !options.IncludeCompletedRequests {\n\t\tbase = base.Where(sq.NotEq{\"status\": []string{\"approved\", \"rejected\"}})\n\t}\n\n\t// Add the filter to limit the listing to requests of a given type if applicable.\n\tif options.RequestType != \"\" {\n\t\tbase = base.Where(sq.Eq{\"request_type\": options.RequestType})\n\t}\n\n\t// Add the filter to limit the listing to requests submitted by a user if applicable.\n\tif options.RequestingUser != \"\" {\n\t\tbase = base.Where(sq.Eq{\"username\": options.RequestingUser})\n\t}\n\n\t// Build the query.\n\tquery, args, err := base.ToSql()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Query the database.\n\trows, err := tx.Query(query, args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\t// Build the listing from the result set.\n\tlisting := make([]*model.RequestSummary, 0)\n\tfor rows.Next() {\n\t\tvar request model.RequestSummary\n\t\tvar requestDetails string\n\t\terr = rows.Scan(\n\t\t\t&request.ID,\n\t\t\t&request.RequestingUser,\n\t\t\t&request.RequestType,\n\t\t\t&request.CreatedDate,\n\t\t\t&request.Status,\n\t\t\t&request.UpdatedDate,\n\t\t\t&requestDetails,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = json.Unmarshal([]byte(requestDetails), &request.Details)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlisting = append(listing, &request)\n\t}\n\n\treturn listing, nil\n}", "func (ag *TSMClient) ListTechSupportRequests() []*tsproto.TechSupportRequestEvent {\n\treturn ag.notifications\n}", "func (r *ApprovalWorkflowProviderRequestsAwaitingMyDecisionCollectionRequest) Get(ctx context.Context) ([]RequestObject, error) {\n\treturn r.GetN(ctx, 0)\n}", "func (r *ScheduleSwapShiftsChangeRequestsCollectionRequest) Get(ctx context.Context) ([]SwapShiftsChangeRequestObject, error) {\n\treturn r.GetN(ctx, 0)\n}", "func (client IdentityClient) listTaggingWorkRequestLogs(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/taggingWorkRequests/{workRequestId}/logs\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListTaggingWorkRequestLogsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func GetTRBRequests(ctx context.Context, archived bool, store *storage.Store) ([]*models.TRBRequest, error) {\n\tTRBRequests, err := store.GetTRBRequests(ctx, archived)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn TRBRequests, err\n}", "func (client IdentityClient) listIamWorkRequestLogs(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/iamWorkRequests/{iamWorkRequestId}/logs\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListIamWorkRequestLogsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client IdentityClient) ListTaggingWorkRequests(ctx context.Context, request ListTaggingWorkRequestsRequest) (response ListTaggingWorkRequestsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listTaggingWorkRequests, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListTaggingWorkRequestsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListTaggingWorkRequestsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListTaggingWorkRequestsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListTaggingWorkRequestsResponse\")\n\t}\n\treturn\n}", "func (c *client) GetPullRequestChanges(org, repo string, number int) ([]PullRequestChange, error) {\n\tdurationLogger := c.log(\"GetPullRequestChanges\", org, repo, number)\n\tdefer durationLogger()\n\n\tif c.fake {\n\t\treturn []PullRequestChange{}, nil\n\t}\n\tpath := fmt.Sprintf(\"/repos/%s/%s/pulls/%d/files\", org, repo, number)\n\tvar changes []PullRequestChange\n\terr := c.readPaginatedResults(\n\t\tpath,\n\t\tacceptNone,\n\t\torg,\n\t\tfunc() interface{} {\n\t\t\treturn &[]PullRequestChange{}\n\t\t},\n\t\tfunc(obj interface{}) {\n\t\t\tchanges = append(changes, *(obj.(*[]PullRequestChange))...)\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn changes, nil\n}", "func NewListRequest() *rolespb.ListRequest {\n\tmessage := &rolespb.ListRequest{}\n\treturn message\n}", "func (k Keeper) GetAllRequest(ctx sdk.Context) (list []types.Request) {\n\tstores := k.GetStoreRequestMap(ctx)\n\tfor _, store := range stores {\n\t\tfunc(store prefix.Store) {\n\t\t\titerator := sdk.KVStorePrefixIterator(store, []byte{})\n\t\t\tdefer iterator.Close()\n\n\t\t\tfor ; iterator.Valid(); iterator.Next() {\n\t\t\t\tvar val types.Request\n\t\t\t\tk.cdc.MustUnmarshal(iterator.Value(), &val)\n\t\t\t\tlist = append(list, val)\n\t\t\t}\n\t\t}(store)\n\t}\n\treturn\n}", "func (s *PublicBlockChainAPI) GetCommittee(ctx context.Context, epoch int64) (map[string]interface{}, error) {\n\tcommittee, err := s.b.GetCommittee(big.NewInt(epoch))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvalidators := make([]map[string]interface{}, 0)\n\tfor _, validator := range committee.NodeList {\n\t\tvalidatorBalance := new(hexutil.Big)\n\t\tvalidatorBalance, err = s.b.GetBalance(validator.EcdsaAddress)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\toneAddress, err := internal_common.AddressToBech32(validator.EcdsaAddress)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvalidatorsFields := map[string]interface{}{\n\t\t\t\"address\": oneAddress,\n\t\t\t\"balance\": validatorBalance,\n\t\t}\n\t\tvalidators = append(validators, validatorsFields)\n\t}\n\tresult := map[string]interface{}{\n\t\t\"shardID\": committee.ShardID,\n\t\t\"validators\": validators,\n\t}\n\treturn result, nil\n}", "func (c *ECS) ListTasksRequest(input *ListTasksInput) (req *aws.Request, output *ListTasksOutput) {\n\toprw.Lock()\n\tdefer oprw.Unlock()\n\n\tif opListTasks == nil {\n\t\topListTasks = &aws.Operation{\n\t\t\tName: \"ListTasks\",\n\t\t\tHTTPMethod: \"POST\",\n\t\t\tHTTPPath: \"/\",\n\t\t}\n\t}\n\n\treq = c.newRequest(opListTasks, input, output)\n\toutput = &ListTasksOutput{}\n\treq.Data = output\n\treturn\n}", "func GetRequests(c *gin.Context) {}", "func (a *BulkApiService) GetBulkRequestList(ctx context.Context) ApiGetBulkRequestListRequest {\n\treturn ApiGetBulkRequestListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func (r *friendRepository) RequestList(id string) (*[]model.FriendRequest, error) {\n\tvar requests []model.FriendRequest\n\n\tresult := r.DB.\n\t\tRaw(`\n\t\t select u.id, u.username, u.image, 1 as \"type\" from users u\n\t\t join friend_requests fr on u.id = fr.\"sender_id\"\n\t\t where fr.\"receiver_id\" = @id\n\t\t UNION\n\t\t select u.id, u.username, u.image, 0 as \"type\" from users u\n\t\t join friend_requests fr on u.id = fr.\"receiver_id\"\n\t\t where fr.\"sender_id\" = @id\n\t\t order by username;\n\t\t`, sql.Named(\"id\", id)).\n\t\tFind(&requests)\n\n\treturn &requests, result.Error\n}", "func (b *ApprovalWorkflowProviderRequestBuilder) Requests() *ApprovalWorkflowProviderRequestsCollectionRequestBuilder {\n\tbb := &ApprovalWorkflowProviderRequestsCollectionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/requests\"\n\treturn bb\n}", "func (client IdentityClient) ListTaggingWorkRequestLogs(ctx context.Context, request ListTaggingWorkRequestLogsRequest) (response ListTaggingWorkRequestLogsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listTaggingWorkRequestLogs, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListTaggingWorkRequestLogsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListTaggingWorkRequestLogsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListTaggingWorkRequestLogsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListTaggingWorkRequestLogsResponse\")\n\t}\n\treturn\n}", "func (c *gitlabClient) PullRequestEvents(context.Context, string, []interface{}) ([]sdk.VCSPullRequestEvent, error) {\n\treturn nil, fmt.Errorf(\"Not implemented on Gitlab\")\n}", "func (c APIClient) ListCommitF(repoName string, to string, from string, number uint64, f func(*pfs.CommitInfo) error) error {\n\treq := &pfs.ListCommitRequest{\n\t\tRepo: NewRepo(repoName),\n\t\tNumber: number,\n\t}\n\tif from != \"\" {\n\t\treq.From = NewCommit(repoName, from)\n\t}\n\tif to != \"\" {\n\t\treq.To = NewCommit(repoName, to)\n\t}\n\tstream, err := c.PfsAPIClient.ListCommitStream(c.Ctx(), req)\n\tif err != nil {\n\t\treturn grpcutil.ScrubGRPC(err)\n\t}\n\tfor {\n\t\tci, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn grpcutil.ScrubGRPC(err)\n\t\t}\n\t\tif err := f(ci); err != nil {\n\t\t\tif err == errutil.ErrBreak {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (a *API) DoctorRequestsList(r *gin.Context) {\n\tvar (\n\t\taccountIDString = r.Query(\"account_id\")\n\t\tstatus = r.Query(\"status\")\n\t\tincludeAccounts = r.Query(\"include_accounts\") == \"true\"\n\t\tskipAnswer = r.Query(\"skip_answer\") == \"true\"\n\t\toffsetString = r.Query(\"offset\")\n\t\tlimitString = r.Query(\"limit\")\n\t\torderBy = r.DefaultQuery(\"order_by\", \"requests.created_at\")\n\t)\n\n\tvar (\n\t\toffset int\n\t\tlimit = 50\n\t)\n\n\tif parsedOffset, err := strconv.Atoi(offsetString); err == nil {\n\t\toffset = parsedOffset\n\t}\n\n\tif parsedLimit, err := strconv.Atoi(limitString); err == nil {\n\t\tlimit = parsedLimit\n\t}\n\n\tquery := psql.Select(\n\t\t\"requests.id\",\n\t\t\"requests.account_id\",\n\t\t\"requests.status\",\n\t\t\"requests.answer_text\",\n\t\t\"requests.answered_by\",\n\t\t\"requests.answered_at\",\n\t\t\"requests.created_at\",\n\t\t\"requests.updated_at\",\n\t\t\"accounts.id\",\n\t\t\"accounts.name\",\n\t\t\"accounts.email\",\n\t\t\"accounts.phone\",\n\t\t\"accounts.gender\",\n\t\t\"accounts.birth_date\",\n\t\t\"accounts.created_at\",\n\t\t\"accounts.updated_at\",\n\t).From(\"requests\").\n\t\tLeftJoin(\"accounts ON accounts.id = requests.account_id\").\n\t\tOrderBy(orderBy)\n\n\tquery = query.Columns(`COUNT(*) OVER() AS \"total_count\"`)\n\n\tif accountIDString != \"\" {\n\t\taccountID, err := types.StringToUUID(accountIDString)\n\t\tif err != nil {\n\t\t\tr.AbortWithStatusJSON(http.StatusBadRequest, gin.H{\n\t\t\t\t\"error\": \"Invalid account ID\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t\tquery = query.Where(\"requests.account_id = ?\", &accountID)\n\t}\n\n\tif status != \"\" {\n\t\tquery = query.Where(\"requests.status = ?\", status)\n\t}\n\n\tif filters := r.QueryMap(\"filters\"); len(filters) > 0 {\n\t\tfor key, value := range filters {\n\t\t\tvar (\n\t\t\t\tparts = strings.SplitN(value, \":\", 2)\n\t\t\t\tdesired = []interface{}{}\n\t\t\t\tsymbol string\n\t\t\t)\n\t\t\tswitch parts[0] {\n\t\t\tcase \"eq\":\n\t\t\t\tsymbol = \"= ?\"\n\t\t\t\tdesired = append(desired, tryParse(key, parts[1]))\n\t\t\tcase \"like\":\n\t\t\t\tsymbol = \"LIKE ?\"\n\t\t\t\tdesired = append(desired, tryParse(key, parts[1]))\n\t\t\tcase \"ilike\":\n\t\t\t\tsymbol = \"ILIKE ?\"\n\t\t\t\tdesired = append(desired, tryParse(key, parts[1]))\n\t\t\tcase \"contains\":\n\t\t\t\tsymbol = \"LIKE ?\"\n\t\t\t\tdesired = append(desired, \"%\"+parts[1]+\"%\")\n\t\t\tcase \"icontains\":\n\t\t\t\tsymbol = \"ILIKE ?\"\n\t\t\t\tdesired = append(desired, \"%\"+parts[1]+\"%\")\n\t\t\tcase \"ne\":\n\t\t\t\tsymbol = \"!= ?\"\n\t\t\t\tdesired = append(desired, tryParse(key, parts[1]))\n\t\t\tcase \"gt\":\n\t\t\t\tsymbol = \"> ?\"\n\t\t\t\tdesired = append(desired, tryParse(key, parts[1]))\n\t\t\tcase \"gte\":\n\t\t\t\tsymbol = \">= ?\"\n\t\t\t\tdesired = append(desired, tryParse(key, parts[1]))\n\t\t\tcase \"lt\":\n\t\t\t\tsymbol = \"< ?\"\n\t\t\t\tdesired = append(desired, tryParse(key, parts[1]))\n\t\t\tcase \"lte\":\n\t\t\t\tsymbol = \"<= ?\"\n\t\t\t\tdesired = append(desired, tryParse(key, parts[1]))\n\t\t\tcase \"between\":\n\t\t\t\tsymbol = \"BETWEEN ? AND ?\"\n\t\t\t\tsubparts := strings.SplitN(parts[1], \":\", 2)\n\t\t\t\tdesired = append(desired, tryParse(key, subparts[0]), tryParse(key, subparts[1]))\n\t\t\t}\n\n\t\t\tquery = query.Where(key+\" \"+symbol, desired...)\n\t\t}\n\t}\n\n\tquery = query.Offset(uint64(offset)).Limit(uint64(limit))\n\n\tsql, args, err := query.ToSql()\n\tif err != nil {\n\t\tlog.Println(\"Error while building the ListRequests query\", err)\n\t\tr.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"error\": \"Unable to build the query\",\n\t\t})\n\t\treturn\n\t}\n\n\trows, err := a.DB.QueryEx(r, sql, nil, args...)\n\tif err != nil {\n\t\tlog.Println(\"Error while executing the ListRequests query\", err)\n\t\tr.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"error\": \"Unable to list requests, \" + err.Error(),\n\t\t})\n\t\treturn\n\t}\n\n\tresponse := &extResponse{\n\t\tData: []*extRequest{},\n\t}\n\n\tfor rows.Next() {\n\t\titem := &extRequest{\n\t\t\tRequest: &models.Request{},\n\t\t\tAccount: &models.Account{},\n\t\t}\n\n\t\tif err := rows.Scan(\n\t\t\t&item.Request.ID,\n\t\t\t&item.Request.AccountID,\n\t\t\t&item.Request.Status,\n\t\t\t&item.Request.AnswerText,\n\t\t\t&item.Request.AnsweredBy,\n\t\t\t&item.Request.AnsweredAt,\n\t\t\t&item.Request.CreatedAt,\n\t\t\t&item.Request.UpdatedAt,\n\t\t\t&item.Account.ID,\n\t\t\t&item.Account.Name,\n\t\t\t&item.Account.Email,\n\t\t\t&item.Account.Phone,\n\t\t\t&item.Account.Gender,\n\t\t\t&item.Account.BirthDate,\n\t\t\t&item.Account.CreatedAt,\n\t\t\t&item.Account.UpdatedAt,\n\t\t\t&response.TotalCount,\n\t\t); err != nil {\n\t\t\tr.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{\n\t\t\t\t\"error\": \"Unable to scan requests\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\tif skipAnswer {\n\t\t\titem.AnswerText = types.ExtendedText{\n\t\t\t\tStatus: pgtype.Null,\n\t\t\t}\n\t\t}\n\n\t\tif !includeAccounts {\n\t\t\titem.Account = nil\n\t\t}\n\n\t\tresponse.Data = append(response.Data, item)\n\t}\n\n\tr.JSON(http.StatusOK, response)\n}", "func (s *PullRequestsService) RequestReviewers(ctx context.Context, owner, repo string, number int, logins []string) (*PullRequest, *Response, error) {\n\tu := fmt.Sprintf(\"repos/%s/%s/pulls/%d/requested_reviewers\", owner, repo, number)\n\n\treviewers := struct {\n\t\tReviewers []string `json:\"reviewers,omitempty\"`\n\t}{\n\t\tReviewers: logins,\n\t}\n\treq, err := s.client.NewRequest(\"POST\", u, &reviewers)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tr := new(PullRequest)\n\tresp, err := s.client.Do(ctx, req, r)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn r, resp, nil\n}", "func (r *InformationProtectionThreatAssessmentRequestsCollectionRequest) Get(ctx context.Context) ([]ThreatAssessmentRequestObject, error) {\n\treturn r.GetN(ctx, 0)\n}", "func (client *DiskEncryptionSetsClient) listCreateRequest(ctx context.Context, options *DiskEncryptionSetsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (dal *UserDAL) GetUserRequests(id int, before time.Time, size int) ([]models.Request, error) {\n\treqs := []models.Request{}\n\terr := dal.db.Select(&reqs,\n\t\t`SELECT * FROM request WHERE userid = $1 AND timestamp < $2\n\t\t\tORDER BY timestamp DESC\n\t\t\tLIMIT $3`, id, before.UTC(), size)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn reqs, nil\n}", "func (g *channelClient) GetTicketList(ctx context.Context, in *types.ReqNil) ([]*ty.Ticket, error) {\n\tinn := *in\n\tdata, err := g.ExecWalletFunc(ty.TicketX, \"WalletGetTickets\", &inn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data.(*ty.ReplyWalletTickets).Tickets, nil\n}", "func (_BaseLibrary *BaseLibraryCaller) ApprovalRequests(opts *bind.CallOpts, arg0 *big.Int) (common.Address, error) {\n\tvar out []interface{}\n\terr := _BaseLibrary.contract.Call(opts, &out, \"approvalRequests\", arg0)\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func (client *Client) listCreateRequest(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, resourceType string, resourceName string, options *ClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Resources/changes\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif resourceProviderNamespace == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceProviderNamespace cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceProviderNamespace}\", url.PathEscape(resourceProviderNamespace))\n\tif resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(resourceType))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-05-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(*options.Top, 10))\n\t}\n\tif options != nil && options.SkipToken != nil {\n\t\treqQP.Set(\"$skipToken\", *options.SkipToken)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (c *Client) CommittedTickets(ct *walletrpc.CommittedTicketsRequest) (*walletrpc.CommittedTicketsResponse, error) {\n\tif c.wallet == nil {\n\t\treturn nil, fmt.Errorf(\"walletrpc client not loaded\")\n\t}\n\n\tif c.cfg.Verbose {\n\t\tfmt.Printf(\"walletrpc %v CommittedTickets\\n\", c.cfg.WalletHost)\n\t}\n\n\tctr, err := c.wallet.CommittedTickets(c.ctx, ct)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c.cfg.Verbose {\n\t\terr := prettyPrintJSON(ctr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn ctr, nil\n}", "func GetCommitList(dbOwner, dbFolder, dbName string) (map[string]CommitEntry, error) {\n\tdbQuery := `\n\t\tWITH u AS (\n\t\t\tSELECT user_id\n\t\t\tFROM users\n\t\t\tWHERE lower(user_name) = lower($1)\n\t\t)\n\t\tSELECT commit_list as commits\n\t\tFROM sqlite_databases AS db, u\n\t\tWHERE db.user_id = u.user_id\n\t\t\tAND db.folder = $2\n\t\t\tAND db.db_name = $3\n\t\t\tAND db.is_deleted = false`\n\tvar l map[string]CommitEntry\n\terr := pdb.QueryRow(dbQuery, dbOwner, dbFolder, dbName).Scan(&l)\n\tif err != nil {\n\t\tlog.Printf(\"Retrieving commit list for '%s%s%s' failed: %v\\n\", dbOwner, dbFolder, dbName, err)\n\t\treturn map[string]CommitEntry{}, err\n\t}\n\treturn l, nil\n}", "func (r *PullsListCommitsReq) HTTPRequest(ctx context.Context, opt ...RequestOption) (*http.Request, error) {\n\treturn buildHTTPRequest(ctx, r, opt)\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListLogPath()}\n\treq, err := http.NewRequest(\"POST\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"log\", \"list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (r *ScheduleTimeOffRequestsCollectionRequest) Get(ctx context.Context) ([]TimeOffRequestObject, error) {\n\treturn r.GetN(ctx, 0)\n}", "func (client IdentityClient) ListIamWorkRequests(ctx context.Context, request ListIamWorkRequestsRequest) (response ListIamWorkRequestsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listIamWorkRequests, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListIamWorkRequestsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListIamWorkRequestsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListIamWorkRequestsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListIamWorkRequestsResponse\")\n\t}\n\treturn\n}", "func (*ListProvisioningApprovalRequestsRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha_provisioning_approval_request_service_proto_rawDescGZIP(), []int{3}\n}", "func (client *ManagedClustersClient) listCreateRequest(ctx context.Context, options *ManagedClustersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CertificateOrdersClient) listCreateRequest(ctx context.Context, options *CertificateOrdersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/certificateOrders\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (c *ExplorerController) GetCrossTxList() {\n\t// get parameter\n\tvar crossTxListReq model.CrossTxListReq\n\tvar err error\n\tif err = json.Unmarshal(c.Ctx.Input.RequestBody, &crossTxListReq); err != nil {\n\t\tc.Data[\"json\"] = model.MakeErrorRsp(fmt.Sprintf(\"request parameter is invalid!\"))\n\t\tc.Ctx.ResponseWriter.WriteHeader(400)\n\t\tc.ServeJSON()\n\t}\n\n\tsrcPolyDstRelations := make([]*model.SrcPolyDstRelation, 0)\n\tdb.Model(&model.PolyTransaction{}).\n\t\tSelect(\"src_transactions.hash as src_hash, poly_transactions.hash as poly_hash, dst_transactions.hash as dst_hash\").\n\t\tWhere(\"src_transactions.standard = ?\", 0).\n\t\tJoins(\"left join src_transactions on src_transactions.hash = poly_transactions.src_hash\").\n\t\tJoins(\"left join dst_transactions on poly_transactions.hash = dst_transactions.poly_hash\").\n\t\tPreload(\"SrcTransaction\").\n\t\tPreload(\"SrcTransaction.SrcTransfer\").\n\t\tPreload(\"PolyTransaction\").\n\t\tPreload(\"DstTransaction\").\n\t\tPreload(\"DstTransaction.DstTransfer\").\n\t\tLimit(crossTxListReq.PageSize).Offset(crossTxListReq.PageSize * crossTxListReq.PageNo).\n\t\tFind(&srcPolyDstRelations)\n\n\tvar transactionNum int64\n\tdb.Model(&model.PolyTransaction{}).Where(\"src_transactions.standard = ?\", 0).\n\t\tJoins(\"left join src_transactions on src_transactions.hash = poly_transactions.src_hash\").Count(&transactionNum)\n\n\tc.Data[\"json\"] = model.MakeCrossTxListResp(srcPolyDstRelations)\n\tc.ServeJSON()\n}", "func (o *FiltersApiLog) GetRequestIds() []string {\n\tif o == nil || o.RequestIds == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.RequestIds\n}", "func (c *Client) NewListAssignedResourceRolesRequest(ctx context.Context, path string) (*http.Request, error) {\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"http\"\n\t}\n\tu := url.URL{Host: c.Host, Scheme: scheme, Path: path}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif c.JWTSigner != nil {\n\t\tc.JWTSigner.Sign(req)\n\t}\n\treturn req, nil\n}", "func (o *CommitteeInfoResponse) GetCommitteeNodes() []CommitteeNode {\n\tif o == nil {\n\t\tvar ret []CommitteeNode\n\t\treturn ret\n\t}\n\n\treturn o.CommitteeNodes\n}", "func NewListEventsRequest(server string, params *ListEventsParams) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/event\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryValues := queryUrl.Query()\n\n\tif params.From != nil {\n\n\t\tif queryFrag, err := runtime.StyleParam(\"form\", true, \"from\", *params.From); err != nil {\n\t\t\treturn nil, err\n\t\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tfor k, v := range parsed {\n\t\t\t\tfor _, v2 := range v {\n\t\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif params.To != nil {\n\n\t\tif queryFrag, err := runtime.StyleParam(\"form\", true, \"to\", *params.To); err != nil {\n\t\t\treturn nil, err\n\t\t} else if parsed, err := url.ParseQuery(queryFrag); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tfor k, v := range parsed {\n\t\t\t\tfor _, v2 := range v {\n\t\t\t\t\tqueryValues.Add(k, v2)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tqueryUrl.RawQuery = queryValues.Encode()\n\n\treq, err := http.NewRequest(\"GET\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func (c *Client) ListQualificationRequests(ctx context.Context, params *ListQualificationRequestsInput, optFns ...func(*Options)) (*ListQualificationRequestsOutput, error) {\n\tif params == nil {\n\t\tparams = &ListQualificationRequestsInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"ListQualificationRequests\", params, optFns, addOperationListQualificationRequestsMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*ListQualificationRequestsOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func (c *Client) NewListRolesRequest(ctx context.Context, path string, resourceType *string) (*http.Request, error) {\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"http\"\n\t}\n\tu := url.URL{Host: c.Host, Scheme: scheme, Path: path}\n\tvalues := u.Query()\n\tif resourceType != nil {\n\t\tvalues.Set(\"resource_type\", *resourceType)\n\t}\n\tu.RawQuery = values.Encode()\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif c.JWTSigner != nil {\n\t\tc.JWTSigner.Sign(req)\n\t}\n\treturn req, nil\n}", "func (n *QriNode) RequestPeersList(id peer.ID) {\n\tres, err := n.SendMessage(id, &Message{\n\t\tType: MtPeers,\n\t\tPayload: &PeersReqParams{\n\t\t\tOffset: 0,\n\t\t\tLimit: 10,\n\t\t},\n\t})\n\n\tif err != nil {\n\t\tfmt.Println(\"send peers message error:\", err.Error())\n\t\treturn\n\t}\n\n\tif res.Phase == MpResponse {\n\t\tif err := n.handlePeersResponse(res); err != nil {\n\t\t\tfmt.Println(\"peers response error\", err.Error())\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *NATSTestClient) GetParallelRequests(t *testing.T, n int) ParallelRequests {\n\tpr := make(ParallelRequests, n)\n\tfor i := 0; i < n; i++ {\n\t\tpr[i] = c.GetRequest(t)\n\t}\n\treturn pr\n}", "func (client IdentityClient) listCompartments(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/compartments\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListCompartmentsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (ct *CountingTransport) Requests() []*http.Request {\n\treturn ct.reqs\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListRecorderPath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"recorder\", \"list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (_IOrakuruCore *IOrakuruCoreCaller) GetPendingRequests(opts *bind.CallOpts) ([][32]byte, error) {\n\tvar out []interface{}\n\terr := _IOrakuruCore.contract.Call(opts, &out, \"getPendingRequests\")\n\n\tif err != nil {\n\t\treturn *new([][32]byte), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new([][32]byte)).(*[][32]byte)\n\n\treturn out0, err\n\n}", "func (_Bridge *BridgeSession) GetInterchainRequests(b []byte) (string, error) {\n\treturn _Bridge.Contract.GetInterchainRequests(&_Bridge.CallOpts, b)\n}", "func (b *ScheduleRequestBuilder) OpenShiftChangeRequests() *ScheduleOpenShiftChangeRequestsCollectionRequestBuilder {\n\tbb := &ScheduleOpenShiftChangeRequestsCollectionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/openShiftChangeRequests\"\n\treturn bb\n}", "func listSystemCountersRequest(c *xgb.Conn) []byte {\n\tsize := 4\n\tb := 0\n\tbuf := make([]byte, size)\n\n\tbuf[b] = c.Extensions[\"SYNC\"]\n\tb += 1\n\n\tbuf[b] = 1 // request opcode\n\tb += 1\n\n\txgb.Put16(buf[b:], uint16(size/4)) // write request size in 4-byte units\n\tb += 2\n\n\treturn buf\n}", "func (client *OperationsClient) listCreateRequest(ctx context.Context, options *OperationsListOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.RecoveryServices/operations\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (r *SynchronizationJobsCollectionRequest) Get(ctx context.Context) ([]SynchronizationJob, error) {\n\treturn r.GetN(ctx, 0)\n}", "func (ftm *FtmBridge) withdrawRequestsList(addr *common.Address, staker *big.Int) ([]*types.WithdrawRequest, error) {\n\t// we need to have the address to continue\n\tif addr == nil {\n\t\tftm.log.Error(\"can not pull withdraw requests for empty address\")\n\t\treturn nil, fmt.Errorf(\"withdraw requests address filter not defined\")\n\t}\n\n\t// prepare to interact with the SFC contract\n\tcontract, err := contracts.NewSfcContract(ftm.sfcConfig.SFCContract, ftm.eth)\n\tif err != nil {\n\t\tftm.log.Criticalf(\"failed to instantiate SFC contract: %v\", err)\n\t\treturn nil, err\n\t}\n\n\t// get a list of finalized requests\n\tfin, err := ftm.withdrawnByRequest(contract, *addr, staker)\n\tif err != nil {\n\t\tftm.log.Error(\"can not pull finalized withdraw requests; %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\t// get a list of requests\n\treturn ftm.createdWithdrawRequests(contract, *addr, staker, fin)\n}", "func (r *Review) ListCommits() ([]string, error) {\n\tbaseCommit, err := r.GetBaseCommit()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\theadCommit, err := r.GetHeadCommit()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn r.Repo.ListCommitsBetween(baseCommit, headCommit)\n}", "func (gc *GithubClient) ListPullRequests(org, repo, head, base string) ([]*github.PullRequest, error) {\n\tPRsListOptions := github.PullRequestListOptions{\n\t\tState: string(PullRequestAllState),\n\t\tHead: head,\n\t\tBase: base,\n\t}\n\n\tgenericList, err := gc.depaginate(\n\t\tfmt.Sprintf(\"listing Pull Requests with head '%s' and base '%s'\", head, base),\n\t\tmaxRetryCount,\n\t\t&PRsListOptions.ListOptions,\n\t\tfunc() ([]interface{}, *github.Response, error) {\n\t\t\tpage, resp, err := gc.Client.PullRequests.List(ctx, org, repo, &PRsListOptions)\n\t\t\tvar interfaceList []interface{}\n\t\t\tif nil == err {\n\t\t\t\tfor _, PR := range page {\n\t\t\t\t\tinterfaceList = append(interfaceList, PR)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn interfaceList, resp, err\n\t\t},\n\t)\n\tres := make([]*github.PullRequest, len(genericList))\n\tfor i, elem := range genericList {\n\t\tres[i] = elem.(*github.PullRequest)\n\t}\n\treturn res, err\n}", "func (_IOrakuruCore *IOrakuruCoreSession) GetPendingRequests() ([][32]byte, error) {\n\treturn _IOrakuruCore.Contract.GetPendingRequests(&_IOrakuruCore.CallOpts)\n}", "func (client IdentityClient) ListIamWorkRequestLogs(ctx context.Context, request ListIamWorkRequestLogsRequest) (response ListIamWorkRequestLogsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listIamWorkRequestLogs, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListIamWorkRequestLogsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListIamWorkRequestLogsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListIamWorkRequestLogsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListIamWorkRequestLogsResponse\")\n\t}\n\treturn\n}", "func (b *ApprovalWorkflowProviderRequestsCollectionRequestBuilder) Request() *ApprovalWorkflowProviderRequestsCollectionRequest {\n\treturn &ApprovalWorkflowProviderRequestsCollectionRequest{\n\t\tBaseRequest: BaseRequest{baseURL: b.baseURL, client: b.client},\n\t}\n}", "func (r *RequestAPI) ListRequestV1(ctx context.Context, req *desc.ListRequestsV1Request) (*desc.ListRequestsV1Response, error) {\n\tlog.Printf(\"Got list request: %v\", req)\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"ListRequestV1\")\n\tdefer span.Finish()\n\n\tif err := r.validateAndSendErrorEvent(ctx, req, producer.ReadEvent); err != nil {\n\t\treturn nil, err\n\t}\n\tvar (\n\t\trequests []models.Request\n\t\terr error\n\t)\n\n\tif req.SearchQuery != \"\" { // ideally would move search to a separate endpoint, so it's easier to extend\n\t\trequests, err = r.searcher.Search(ctx, req.SearchQuery, req.Limit, req.Offset)\n\t} else {\n\t\trequests, err = r.repo.List(ctx, req.Limit, req.Offset)\n\t}\n\n\tif err != nil {\n\t\tlog.Error().\n\t\t\tErr(err).\n\t\t\tStr(\"endpoint\", \"ListRequestV1\").\n\t\t\tUint64(\"limit\", req.Limit).\n\t\t\tUint64(\"offset\", req.Offset).\n\t\t\tMsgf(\"Failed to list requests\")\n\t\tr.producer.Send(producer.NewEvent(ctx, 0, producer.ReadEvent, err))\n\t\treturn nil, err\n\t}\n\n\tret := make([]*desc.Request, 0, len(requests))\n\teventMsgs := make([]producer.EventMsg, 0, len(requests))\n\n\tfor _, req := range requests {\n\t\tret = append(ret, &desc.Request{\n\t\t\tId: req.Id,\n\t\t\tUserId: req.UserId,\n\t\t\tType: req.Type,\n\t\t\tText: req.Text,\n\t\t})\n\t\teventMsgs = append(eventMsgs, producer.NewEvent(ctx, req.Id, producer.ReadEvent, nil))\n\t\tr.producer.Send(eventMsgs...)\n\n\t}\n\tr.metrics.IncList(1, \"ListRequestV1\")\n\treturn &desc.ListRequestsV1Response{\n\t\tRequests: ret,\n\t}, nil\n}", "func (s *Service) GetRequest(identifiable DocIdentifiable) (*json.RawMessage, error) {\n\t// TODO: accept ctx as param\n\tget, err := s.Client.Get().\n\t\tIndex(s.RequestsIndex).\n\t\tType(\"_doc\").\n\t\tId(identifiable.DocID()).\n\t\tDo(context.TODO())\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn get.Source, nil\n}", "func (_IOrakuruCore *IOrakuruCoreCallerSession) GetPendingRequests() ([][32]byte, error) {\n\treturn _IOrakuruCore.Contract.GetPendingRequests(&_IOrakuruCore.CallOpts)\n}", "func (*ListCommitBlobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{20}\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListRecipePath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"recipe\", \"list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *AvailabilitySetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *AvailabilitySetsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (*CUserAccount_GetFriendInviteTokens_Request) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_useraccount_steamclient_proto_rawDescGZIP(), []int{8}\n}", "func (_Bridge *BridgeCallerSession) GetInterchainRequests(b []byte) (string, error) {\n\treturn _Bridge.Contract.GetInterchainRequests(&_Bridge.CallOpts, b)\n}", "func (g *V3) ListPullRequests(repo, state string) ([]scm.PullRequest, error) {\n\t// GitLab mr state: opened, closed, locked, merged, all\n\tvar s string\n\tswitch state {\n\tcase scm.PullRequestStateOpen:\n\t\ts = openedPullRequestState\n\tcase openedPullRequestState, \"closed\", \"locked\", \"merged\", \"all\":\n\t\ts = state\n\tdefault:\n\t\treturn nil, cerr.ErrorUnsupported.Error(\"GitLab(v3) pull request state\", state)\n\t}\n\n\topts := &v3.ListMergeRequestsOptions{\n\t\tState: &s,\n\t\tListOptions: v3.ListOptions{\n\t\t\tPerPage: scm.ListOptPerPage,\n\t\t},\n\t}\n\n\tvar allPRs []scm.PullRequest\n\tfor {\n\t\tprs, resp, err := g.client.MergeRequests.ListMergeRequests(repo, opts)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Fail to list merge requests for %s\", repo)\n\t\t\treturn nil, convertGitlabError(err, resp)\n\t\t}\n\n\t\tfor _, p := range prs {\n\t\t\tallPRs = append(allPRs, scm.PullRequest{\n\t\t\t\tID: p.IID,\n\t\t\t\tTitle: p.Title,\n\t\t\t\tDescription: p.Description,\n\t\t\t\tState: p.State,\n\t\t\t\tTargetBranch: p.TargetBranch,\n\t\t\t})\n\t\t}\n\t\tif resp.NextPage == 0 {\n\t\t\tbreak\n\t\t}\n\t\topts.Page = resp.NextPage\n\t}\n\n\treturn allPRs, nil\n}", "func (gc *GithubClient) ListCommits(org, repo string, ID int) ([]*github.RepositoryCommit, error) {\n\toptions := &github.ListOptions{}\n\tgenericList, err := gc.depaginate(\n\t\tfmt.Sprintf(\"listing commits in Pull Requests '%d'\", ID),\n\t\tmaxRetryCount,\n\t\toptions,\n\t\tfunc() ([]interface{}, *github.Response, error) {\n\t\t\tpage, resp, err := gc.Client.PullRequests.ListCommits(ctx, org, repo, ID, options)\n\t\t\tvar interfaceList []interface{}\n\t\t\tif nil == err {\n\t\t\t\tfor _, commit := range page {\n\t\t\t\t\tinterfaceList = append(interfaceList, commit)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn interfaceList, resp, err\n\t\t},\n\t)\n\tres := make([]*github.RepositoryCommit, len(genericList))\n\tfor i, elem := range genericList {\n\t\tres[i] = elem.(*github.RepositoryCommit)\n\t}\n\treturn res, err\n}", "func bulkRequest(list *AddressList) ([]byte, error) {\n\n\t// The max limit by WOC\n\tif len(list.Addresses) > MaxAddressesForLookup {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"max limit of addresses is %d and you sent %d\",\n\t\t\tMaxAddressesForLookup, len(list.Addresses),\n\t\t)\n\t}\n\n\t// Convert to JSON\n\treturn json.Marshal(list)\n}", "func (client *ApplyUpdatesClient) listCreateRequest(ctx context.Context, options *ApplyUpdatesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Maintenance/applyUpdates\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (*ListCommitsRequest) Descriptor() ([]byte, []int) {\n\treturn file_modeldb_versioning_VersioningService_proto_rawDescGZIP(), []int{16}\n}", "func (g *Gitlab) ListProjectMergeRequests(projectID int, since, until *time.Time, page int) (mrs []*gitlab.MergeRequest, resp *gitlab.Response, err error) {\n\n\topt := &gitlab.ListProjectMergeRequestsOptions{}\n\topt.UpdatedAfter = since\n\topt.UpdatedBefore = until\n\tif page != -1 {\n\t\topt.PerPage = _defaultPerPage\n\t\topt.Page = page\n\t}\n\tif mrs, resp, err = g.client.MergeRequests.ListProjectMergeRequests(projectID, opt); err != nil {\n\t\terr = errors.Wrapf(err, \"ListProjectMergeRequests projectId(%d), err(%+v)\", projectID, err)\n\t\treturn\n\t}\n\treturn\n}" ]
[ "0.6726568", "0.60950726", "0.586521", "0.58409595", "0.58262", "0.5651529", "0.56400454", "0.5546509", "0.54614383", "0.54555523", "0.54187846", "0.5413614", "0.53488183", "0.5328241", "0.5320421", "0.5247767", "0.5227244", "0.5193367", "0.51814556", "0.51613", "0.51548237", "0.51507753", "0.51348835", "0.5126457", "0.51058555", "0.509366", "0.5088158", "0.5072668", "0.50578666", "0.5048555", "0.50237244", "0.50199956", "0.500786", "0.49749464", "0.4959912", "0.49524903", "0.49359754", "0.49349767", "0.4919758", "0.49015194", "0.48813763", "0.4839936", "0.4808469", "0.47891715", "0.4781909", "0.47586235", "0.47582963", "0.47325847", "0.47300786", "0.47212115", "0.47207266", "0.47185096", "0.4704169", "0.4701519", "0.46858352", "0.4675497", "0.46747357", "0.466636", "0.46658716", "0.4663552", "0.46478435", "0.46357915", "0.46329263", "0.462789", "0.46251485", "0.4623658", "0.46215618", "0.46156025", "0.46155426", "0.46107525", "0.46105766", "0.46092197", "0.46072093", "0.46058565", "0.46044436", "0.45984003", "0.45898333", "0.45867363", "0.4584807", "0.45826778", "0.45779404", "0.45722905", "0.457129", "0.45701164", "0.4569999", "0.4564677", "0.4564492", "0.45619726", "0.45614997", "0.45565656", "0.4556313", "0.4553868", "0.4544514", "0.4538372", "0.45381796", "0.45379522", "0.4533261", "0.45302185", "0.45301524", "0.4529612" ]
0.8405216
0
connect to bitcoind with HTTP RPC transport
func NewBtcClient() (*BtcClient, error) { var BtcHostPort = fmt.Sprintf("%s:%s",config.RpcConfig.BtcHost,config.RpcConfig.BtcPort) var BtcrpcUser = config.RpcConfig.BtcRpcUser var BtcrpcPassword = config.RpcConfig.BtcRpcPwd connCfg := &rpcclient.ConnConfig{ Host: BtcHostPort, User: BtcrpcUser, Pass: BtcrpcPassword, HTTPPostMode: true, DisableTLS: true, } client := &BtcClient{} var err error client.rpcClient, err = rpcclient.New(connCfg, nil) if err != nil { return nil, err } if err != nil { return nil, err } fmt.Printf("network:%d coins=>btc_wallet=>initClinet sccuess.",client.rpcClient.NextID()) return client, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func setupBitcoind(t *testing.T, minerAddr string,\n\trpcPolling bool) *BitcoindClient {\n\n\t// Start a bitcoind instance and connect it to miner1.\n\ttempBitcoindDir, err := ioutil.TempDir(\"\", \"bitcoind\")\n\trequire.NoError(t, err)\n\n\tzmqBlockHost := \"ipc:///\" + tempBitcoindDir + \"/blocks.socket\"\n\tzmqTxHost := \"ipc:///\" + tempBitcoindDir + \"/tx.socket\"\n\tt.Cleanup(func() {\n\t\tos.RemoveAll(tempBitcoindDir)\n\t})\n\n\trpcPort := rand.Int()%(65536-1024) + 1024\n\tbitcoind := exec.Command(\n\t\t\"bitcoind\",\n\t\t\"-datadir=\"+tempBitcoindDir,\n\t\t\"-regtest\",\n\t\t\"-connect=\"+minerAddr,\n\t\t\"-txindex\",\n\t\t\"-rpcauth=weks:469e9bb14ab2360f8e226efed5ca6f\"+\n\t\t\t\"d$507c670e800a95284294edb5773b05544b\"+\n\t\t\t\"220110063096c221be9933c82d38e1\",\n\t\tfmt.Sprintf(\"-rpcport=%d\", rpcPort),\n\t\t\"-disablewallet\",\n\t\t\"-zmqpubrawblock=\"+zmqBlockHost,\n\t\t\"-zmqpubrawtx=\"+zmqTxHost,\n\t)\n\trequire.NoError(t, bitcoind.Start())\n\n\tt.Cleanup(func() {\n\t\tbitcoind.Process.Kill()\n\t\tbitcoind.Wait()\n\t})\n\n\t// Wait for the bitcoind instance to start up.\n\ttime.Sleep(time.Second)\n\n\thost := fmt.Sprintf(\"127.0.0.1:%d\", rpcPort)\n\tcfg := &BitcoindConfig{\n\t\tChainParams: &chaincfg.RegressionNetParams,\n\t\tHost: host,\n\t\tUser: \"weks\",\n\t\tPass: \"weks\",\n\t\t// Fields only required for pruned nodes, not\n\t\t// needed for these tests.\n\t\tDialer: nil,\n\t\tPrunedModeMaxPeers: 0,\n\t}\n\n\tif rpcPolling {\n\t\tcfg.PollingConfig = &PollingConfig{\n\t\t\tBlockPollingInterval: time.Millisecond * 100,\n\t\t\tTxPollingInterval: time.Millisecond * 100,\n\t\t}\n\t} else {\n\t\tcfg.ZMQConfig = &ZMQConfig{\n\t\t\tZMQBlockHost: zmqBlockHost,\n\t\t\tZMQTxHost: zmqTxHost,\n\t\t\tZMQReadDeadline: 5 * time.Second,\n\t\t}\n\t}\n\n\tchainConn, err := NewBitcoindConn(cfg)\n\trequire.NoError(t, err)\n\trequire.NoError(t, chainConn.Start())\n\n\tt.Cleanup(func() {\n\t\tchainConn.Stop()\n\t})\n\n\t// Create a bitcoind client.\n\tbtcClient := chainConn.NewBitcoindClient()\n\trequire.NoError(t, btcClient.Start())\n\n\tt.Cleanup(func() {\n\t\tbtcClient.Stop()\n\t})\n\n\trequire.NoError(t, btcClient.NotifyBlocks())\n\n\treturn btcClient\n}", "func bitcoinRPC(arguments *bitcoinArguments, reply *bitcoinReply) error {\n\n\ts, err := json.Marshal(arguments)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalData.log.Tracef(\"rpc send: %s\", s)\n\n\tpostData := bytes.NewBuffer(s)\n\n\trequest, err := http.NewRequest(\"POST\", globalData.url, postData)\n\tif nil != err {\n\t\treturn err\n\t}\n\trequest.SetBasicAuth(globalData.username, globalData.password)\n\n\tresponse, err := globalData.client.Do(request)\n\tif nil != err {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalData.log.Tracef(\"rpc response body: %s\", body)\n\n\terr = json.Unmarshal(body, &reply)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalData.log.Debugf(\"rpc receive: %s\", body)\n\n\treturn nil\n}", "func bitcoinRPC(arguments *bitcoinArguments, reply *bitcoinReply) error {\n\n\ts, err := json.Marshal(arguments)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalBitcoinData.log.Debugf(\"rpc send: %s\", s)\n\n\tpostData := bytes.NewBuffer(s)\n\n\trequest, err := http.NewRequest(\"POST\", globalBitcoinData.url, postData)\n\tif nil != err {\n\t\treturn err\n\t}\n\trequest.SetBasicAuth(globalBitcoinData.username, globalBitcoinData.password)\n\n\tresponse, err := globalBitcoinData.client.Do(request)\n\tif nil != err {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(body, &reply)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tglobalBitcoinData.log.Debugf(\"rpc receive: %s\", body)\n\n\treturn nil\n}", "func main() {\n\tlog.UseLog(&zapimpl.Logger{}) // use zap log\n\tlog.SetLevel(log.DevDebugLevel)\n\tlog.Info(\"start up bitcoin rpc client\")\n\t//cfg := &client.Config{\n\t//\tHost: \"172.16.2.35:3333\",\n\t//\tUser: \"et\",\n\t//\tPass: \"www.et.com\",\n\t//}\n\tcfg := &client.Config{\n\t\tHost: \"172.16.2.27:8332\",\n\t\tUser: \"btc\",\n\t\tPass: \"btcpwd\",\n\t}\n\tdemos.Initialize(cfg)\n\tdefer demos.Shutdown()\n\t//common rpc method\n\t//demos.SendTest() cli.Send(\"command\", args...)\n\n\t//Blockchain rpc methods\n\t//demos.GetBestBlockHashTest()\n\t//demos.GetBlockBytesTest()\n\t//demos.GetBlockTest()\n\t//demos.GetBlockVerboseTXTest()\n\tdemos.GetBlockVerboseTXTest()\n\t//demos.GetBlockChainInfoTest()\n\t//demos.GetBlockCountTest()\n\t//demos.GetBlockHashTest()\n\t//demos.GetBlockHeaderTest()\n\t//demos.GetBlockStatsTest()\n\t//demos.GetChainTipsTest()\n\t//demos.GetChainTXStatsTest()\n\t//demos.GetChainTXStatsEntireTest()\n\t//demos.GetDifficultyTest()\n\t//demos.GetMempoolAncestorsTest()\n\t//demos.GetMempoolAncestorsVerboseTest()\n\t//demos.GetMempoolDescendantsTest()\n\t//demos.GetMempoolDescendantsVerboseTest()\n\t//demos.GetMempoolEntryTest()\n\t//demos.GetMempoolInfoTest()\n\t//demos.GetRawMempoolTest()\n\t//demos.GetRawMempoolVerboseTest()\n\t//demos.GetTXOutTest()\n\t//demos.GetTXOutProofTest()\n\t//demos.GetTXOutSetInfoTest()\n\t//demos.PreciousBlockTest()\n\t//demos.PruneBlockchainTest()\n\t//demos.SaveMempoolTest()\n\t//demos.VerifyChainTest()\n\t//demos.VerifyTXOutProofTest()\n\t//demos.VerifyChainLevelTest()\n\t//demos.VerifyChainBlocksTest()\n\n\t//Control rpc methods\n\t//demos.GetMemoryInfoTest()\n\t//demos.GetMemoryInfo4MallocInfoTest()\n\t//demos.HelpTest()\n\t//demos.StopTest()\n\t//demos.UptimeTest()\n\n\t//Generate rpc methods\n\t//demos.GenerateTest()\n\t//demos.GenerateToAddressTest()\n\n\t//Network rpc methods\n\t//demos.GetNetworkInfoTest()\n\n\t//RawTransactions rpc methods\n\t//demos.CombinePSBTTest() //ok\n\t//demos.CombineRawTransactionTest() //ok\n\t//demos.ConvertToPSBTTest() //ok\n\t//demos.CreatepSBTTest() //ok\n\t//demos.CreateRawTransactionTest() //ok\n\t//demos.DecodePSBTTest() //ok\n\t//demos.DecodeRawTransactionTest() //ok\n\t//demos.DecodeScriptTest() //ok\n\t//demos.FinalizePSBTTest() //ok\n\t//demos.FundRawTransactionTest() //ok\n\t//demos.GetRawTransactionTest() //ok\n\t//demos.GetRawTransactionVerboseTest() //ok\n\t//demos.SendRawTransactionTest() //ok\n\t//demos.SignRawTransactionWithKeyTest() //ok\n\t//demos.TestMempoolAcceptTest() //ok\n\n\t//Wallet rpc methods\n\t//demos.WalletTestInitialize() //ok\n\t//demos.AbandonTransactionTest() //ok\n\t//demos.AbortRescanTest() //ok\n\t//demos.AddMultiSigAddressTest() //ok\n\t//demos.BackupWalletTest() //ok\n\t//demos.BumpFeeTest() //ok\n\t//demos.CreateWalletTest() //ok\n\t//demos.DumpPrivkeyTest() //ok\n\t//demos.DumpWalletTest() //ok\n\t//demos.EncryptWalletTest() //ok\n\t//demos.GetAddressesByLabelTest() //ok\n\t//demos.GetAddressInfoTest() //ok\n\t//demos.GetBalanceTest() //ok\n\t//demos.GetBalanceEntireTest() //ok\n\t//demos.GetNewAddressTest() //ok\n\t//demos.GetNewAddressEntireTest() //ok\n\t//demos.GetRawChangeAddressTest() //ok\n\t//demos.GetReceivedByAddressTest() //ok\n\t//demos.GetTransactionTest() //ok\n\t//demos.GetUnconfirmedBalanceTest() //ok\n\t//demos.GetWalletInfoTest() //ok\n\t//demos.ImportaddressTest() //ok\n\t//demos.ImportMultiTest() //ok\n\t//demos.ImportPrivkeyTest() //ok\n\t//demos.ImportPrunedFundsTest() //TODO\n\t//demos.ImportPubkeyTest() //ok\n\t//demos.ImportWalletTest() //test exception\n\t//demos.KeypoolRefillTest() //ok\n\t//demos.ListAddressGroupingsTest() //ok\n\t//demos.ListLabelsTest() //ok\n\t//demos.ListLockUnspentTest() // todo https://bitcoincore.org/en/doc/0.17.0/rpc/wallet/listlockunspent/\n\t//demos.ListReceivedByAddressTest() //ok\n\t//demos.ListSinceBlockTest() //ok\n\t//demos.ListTransactionsTest() //ok\n\t//demos.ListUnspentTest() //ok\n\t//demos.ListUnspentEntireTest() //ok\n\t//demos.ListWalletsTest() //ok\n\t//demos.LoadWalletTest() //ok\n\t//demos.LockUnspentTest() //ok\n\t//demos.RemovePrunedFundsTest() //ok\n\t//demos.RescanBlockChainTest() //ok\n\t//demos.SendManyTest() //ok\n\t//demos.SendToAddressTest() //ok\n\t//demos.SendToAddressEntireTest() //ok\n\t//demos.SetHDSeedTest() //ok\n\t//demos.SetTXFeeTest() //ok\n\t//demos.SignMessageTest() //ok\n\t//demos.SignRawtransactionWithWalletTest() //TODO\n\t//demos.UnloadWalletTest() //ok\n\t//demos.WalletCreateFundedPSBTTest() //TODO no implement\n\t//demos.WalletLockTest() //ok\n\t//demos.WalletPassphraseTest() //ok\n\t//demos.WalletPassphraseChangeTest() //ok\n\t//demos.WalletProcessPSBTTest() //TODO\n}", "func New(host string, port int, user, passwd string, useSSL bool, chainParams *chaincfg.Params) (*Bitcoind, error) {\n\tBTCRpcClient, err := newClient(host, port, user, passwd, useSSL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Bitcoind{client: BTCRpcClient, ChainParams: chainParams}, nil\n}", "func rpcRequest(t *testing.T, url string, extraHeaders ...string) *http.Response {\n\tt.Helper()\n\n\t// Create the request.\n\tbody := bytes.NewReader([]byte(`{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"rpc_modules\",\"params\":[]}`))\n\treq, err := http.NewRequest(\"POST\", url, body)\n\tif err != nil {\n\t\tt.Fatal(\"could not create http request:\", err)\n\t}\n\treq.Header.Set(\"content-type\", \"application/json\")\n\n\t// Apply extra headers.\n\tif len(extraHeaders)%2 != 0 {\n\t\tpanic(\"odd extraHeaders length\")\n\t}\n\tfor i := 0; i < len(extraHeaders); i += 2 {\n\t\tkey, value := extraHeaders[i], extraHeaders[i+1]\n\t\tif strings.ToLower(key) == \"host\" {\n\t\t\treq.Host = value\n\t\t} else {\n\t\t\treq.Header.Set(key, value)\n\t\t}\n\t}\n\n\t// Perform the request.\n\tt.Logf(\"checking RPC/HTTP on %s %v\", url, extraHeaders)\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn resp\n}", "func connect(host string) (*rpc.Client, error) {\n\taddr := fmt.Sprintf(\"%s:%d\", host, *Port)\n\tc, e := rpc.DialHTTP(\"tcp\", addr)\n\tif e != nil {\n\t\treturn nil, fmt.Errorf(\"Dialing Prism %s: %v\", addr, e)\n\t}\n\treturn c, nil\n}", "func main() {\n\tcdc := app.MakeCodec()\n\n\tmode := flag.String(\"m\", \"\", \"client mode: get/send\")\n\taddr := flag.String(\"addr\", \"\", \"input account addr(bech32)\")\n\tsender := flag.String(\"from\", \"\", \"input sender addr\")\n\treceiver := flag.String(\"to\", \"\", \"input receive addr\")\n\tcoinStr := flag.String(\"coin\", \"\", \"input coinname,coinamount\")\n\n\tflag.Parse()\n\n\thttp := client.NewHTTP(\"tcp://127.0.0.1:26657\", \"/websocket\")\n\n\tif *mode == \"query\" {\n\t\tif *addr == \"\" {\n\t\t\tpanic(\"usage: go run main.go -m query -addr xxx \")\n\t\t}\n\t\taddress, _ := types.GetAddrFromBech32(*addr)\n\t\tkey := account.AddressStoreKey(address)\n\t\tresult, err := http.ABCIQuery(\"/store/acc/key\", key)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tqueryValueBz := result.Response.GetValue()\n\t\tvar acc *bctypes.AppAccount\n\t\tcdc.UnmarshalBinaryBare(queryValueBz, &acc)\n\n\t\tfmt.Println(fmt.Sprintf(\"query addr is %s = %v\", *addr, acc))\n\t}\n\n\tif *mode == \"transfer\" {\n\t\tcoin := strings.Split(*coinStr, \",\")\n\t\tif *sender == \"\" || *receiver == \"\" || len(coin) != 2 {\n\t\t\tpanic(\"usage: go run main.go -m transfer -from xxx -to xxx -coin xxx,xxx\")\n\t\t}\n\t\tsenderAddr, _ := types.GetAddrFromBech32(*sender)\n\t\treceiverAddr, _ := types.GetAddrFromBech32(*receiver)\n\t\tamount, _ := strconv.ParseInt(coin[1], 10, 64)\n\t\ttxStd := genSendTx(senderAddr, receiverAddr, bctypes.Coin{\n\t\t\tcoin[0],\n\t\t\ttypes.NewInt(amount),\n\t\t})\n\n\t\ttx, err := cdc.MarshalBinaryBare(txStd)\n\t\tif err != nil {\n\t\t\tpanic(\"use cdc encode object fail\")\n\t\t}\n\n\t\t_, err = http.BroadcastTxSync(tx)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tpanic(\"BroadcastTxSync err\")\n\t\t}\n\n\t\tfmt.Println(fmt.Sprintf(\"send tx is %v\", txStd))\n\t}\n\n}", "func init() {\n\tconfig = tendermint_test.ResetConfig(\"rpc_test_client_test\")\n\tchainID = config.GetString(\"chain_id\")\n\trpcAddr = config.GetString(\"rpc_laddr\")\n\tgrpcAddr = config.GetString(\"grpc_laddr\")\n\trequestAddr = rpcAddr\n\twebsocketAddr = rpcAddr\n\twebsocketEndpoint = \"/websocket\"\n\n\tclientURI = client.NewClientURI(requestAddr)\n\tclientJSON = client.NewClientJSONRPC(requestAddr)\n\tclientGRPC = core_grpc.StartGRPCClient(grpcAddr)\n\n\t// TODO: change consensus/state.go timeouts to be shorter\n\n\t// start a node\n\tready := make(chan struct{})\n\tgo newNode(ready)\n\t<-ready\n}", "func NewRpcConnection(rpcAddress string) (*rpc.Client, *ethclient.Client) {\n\n\tc, err := rpc.Dial(rpcAddress)\n\tif err != nil {\n\t\tlog.Fatalln(\"Could not connect to RPC!\", err)\n\t}\n\n\t// Create a RPC connection to a full node\n\tconn := ethclient.NewClient(c)\n\n\tfmt.Println(\"Dial ok!\")\n\n\treturn c, conn\n}", "func TestHttpBrToChain(t *testing.T) {\n\tfromStr := \"address1vpszt2jp2j8m5l3mutvqserzuu9uylmzydqaj9\"\n\ttoStr := \"address1eep59h9ez4thymept8nxl0padlrc6r78fsjmp3\"\n\tcoinstr := \"2qos\"\n\t//generate singed Tx\n\tchainid := \"capricorn-2000\"\n\tnonce := int64(1)\n\t//gas := NewBigInt(int64(0))\n\t//PrivKey output\n\tprivkey := \"sV5sRbwnR8DddL5e4UC1ntKPiOtGEaOFAqvePTfhJFI9GcC28zmPURSUI6C1oBlnk2ykBcAtIbYUazuCexWyqg==\"\n\n\tjasonpayload := LocalTxGen(fromStr, toStr, coinstr, chainid, privkey, nonce)\n\n\t//tbt := new(types.Tx)\n\t//err := Cdc.UnmarshalJSON(jasonpayload, tbt)\n\t//if err != nil {\n\t//\tfmt.Println(err)\n\t//}\n\t//\n\t//txBytes, err := Cdc.MarshalBinaryBare(jasonpayload)\n\t//if err != nil {\n\t//\tpanic(\"use cdc encode object fail\")\n\t//}\n\n\tclient := client.NewHTTP(\"tcp://192.168.1.183:26657\", \"/websocket\")\n\tresult, err := client.BroadcastTxCommit(jasonpayload)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tt.Log(result)\n}", "func (m *managedServer) dial() error {\n\tvar err error\n\tvar host, port string\n\n\tlastColonPos := strings.LastIndex(m.address, \":\")\n\tif lastColonPos == -1 {\n\t\thost = m.address\n\t\tport = \"80\"\n\t} else {\n\t\thost, port, err = net.SplitHostPort(m.address)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar conn net.Conn\n\tdialTimeout := 5 * time.Second\n\tif port == \"443\" {\n\t\tdialer := net.Dialer{Timeout: dialTimeout}\n\t\tconn, err = tls.DialWithDialer(&dialer, \"tcp\", net.JoinHostPort(host, port), nil)\n\t} else {\n\t\tconn, err = net.DialTimeout(\"tcp\", net.JoinHostPort(host, port), dialTimeout)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thttpRequest := \"GET \" + liveo.RPCEndpoint + \" HTTP/1.0\\r\\n\"\n\thttpRequest += fmt.Sprintf(\"Host: %s\\r\\n\", host)\n\thttpRequest += fmt.Sprint(\"Connection: Upgrade\\r\\n\")\n\thttpRequest += fmt.Sprint(\"Upgrade: RPC\\r\\n\")\n\thttpRequest += fmt.Sprintf(\"Content-Length: %d\\r\\n\", len(liveo.SharedSecret))\n\thttpRequest += \"\\r\\n\"\n\thttpRequest += liveo.SharedSecret\n\tio.WriteString(conn, httpRequest)\n\n\t// Require successful HTTP response\n\t// before switching to RPC protocol.\n\tresp, err := http.ReadResponse(bufio.NewReader(conn), &http.Request{Method: \"CONNECT\"})\n\tif err != nil {\n\t\tconn.Close()\n\t\treturn err\n\t}\n\tif resp.Status != liveo.RPCConnectedStatus {\n\t\tconn.Close()\n\t\terr = errors.New(\"managedServer: unexpected HTTP response: \" + resp.Status)\n\t\treturn err\n\t}\n\n\t// Reset deadline so we don't lose the connection\n\tconn.SetDeadline(time.Time{})\n\n\tm.rpcClient = rpc.NewClient(conn)\n\treturn nil\n}", "func HTTPConnect(serviceURL string, opts ...ClientOpt) (jsonrpc.Client, error) {\n\ttransport, err := transport.NewHTTPClientTransport(serviceURL)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn New(append(opts, ClientTrans(transport))...)\n}", "func BitcoinInitialise(url string, username string, password string, minerAddress string, fee string, start uint64) error {\n\n\t// ensure payments are initialised\n\tif err := paymentInitialise(); nil != err {\n\t\treturn err\n\t}\n\n\tglobalBitcoinData.Lock()\n\tdefer globalBitcoinData.Unlock()\n\n\t// no need to start if already started\n\tif globalBitcoinData.initialised {\n\t\treturn fault.ErrAlreadyInitialised\n\t}\n\n\tif \"\" == minerAddress {\n\t\treturn fault.ErrPaymentAddressMissing\n\t}\n\n\tglobalBitcoinData.log = logger.New(bitcoinCurrencyName)\n\tif nil == globalBitcoinData.log {\n\t\treturn fault.ErrInvalidLoggerChannel\n\t}\n\tglobalBitcoinData.log.Info(\"starting…\")\n\n\tglobalBitcoinData.id = 0\n\tglobalBitcoinData.username = username\n\tglobalBitcoinData.password = password\n\tglobalBitcoinData.url = url\n\tglobalBitcoinData.minerAddress = minerAddress\n\tglobalBitcoinData.fee = convertToSatoshi([]byte(fee))\n\tglobalBitcoinData.latestBlockNumber = 0\n\tglobalBitcoinData.expire = make(map[uint64][]transaction.Link, bitcoinBlockRange)\n\n\tglobalBitcoinData.client = new(http.Client)\n\n\t// all data initialised\n\tglobalBitcoinData.initialised = true\n\n\t// query bitcoind for status\n\t// only need to have necessary fields as JSON unmarshaller will igtnore excess\n\tvar reply struct {\n\t\tVersion uint64 `json:\"version\"`\n\t\tBlocks uint64 `json:\"blocks\"`\n\t}\n\terr := bitcoinCall(\"getinfo\", []interface{}{}, &reply)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\t// check version is sufficient\n\tif reply.Version < bitcoinMinimumVersion {\n\t\tglobalBitcoinData.log.Errorf(\"Bitcoin version: %d < allowed: %d\", reply.Version, bitcoinMinimumVersion)\n\t\treturn fault.ErrInvalidVersion\n\t} else {\n\t\tglobalBitcoinData.log.Infof(\"Bitcoin version: %d\", reply.Version)\n\t}\n\n\t// set up current block number\n\tglobalBitcoinData.latestBlockNumber = reply.Blocks\n\tglobalBitcoinData.log.Debugf(\"block count: %d\", globalBitcoinData.latestBlockNumber)\n\n\t// start background processes\n\tglobalBitcoinData.log.Info(\"start background\")\n\tglobalBitcoinData.background = background.Start(bitcoinProcesses, globalBitcoinData.log)\n\n\tregister(bitcoinCurrencyName, &callType{\n\t\tpay: bitcoinPay,\n\t\tminer: bitcoinAddress,\n\t})\n\n\tglobalBitcoinData.log.Info(\"about to return\")\n\treturn nil\n}", "func NewClient(config *Config) Client {\n\tendpoint := net.JoinHostPort(config.Hostname, strconv.Itoa(config.port()))\n\tif !strings.Contains(endpoint, \"//\") {\n\t\tendpoint = \"http://\" + endpoint\n\t}\n\n\topts := &jsonrpc.RPCClientOpts{\n\t\tCustomHeaders: map[string]string{\n\t\t\t\"Authorization\": \"Basic \" + base64.StdEncoding.EncodeToString([]byte(config.Username+\":\"+config.Password)),\n\t\t},\n\t}\n\n\treturn &client{\n\t\tRPCClient: jsonrpc.NewClientWithOpts(endpoint, opts),\n\t}\n}", "func DialHTTP(url string) *BitcoinRPC {\n\trpcClient, err := rpc.DialHTTP(url)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn &BitcoinRPC{\n\t\tclient: rpcClient,\n\t}\n}", "func NewClient(config *Config) Client {\n\tendpoint := net.JoinHostPort(config.hostname(), strconv.Itoa(config.port()))\n\tif !strings.Contains(endpoint, \"//\") {\n\t\tendpoint = \"http://\" + endpoint\n\t}\n\n\topts := &jsonrpc.RPCClientOpts{\n\t\tCustomHeaders: map[string]string{\n\t\t\t\"Authorization\": \"Basic \" + base64.StdEncoding.EncodeToString([]byte(config.username()+\":\"+config.password())),\n\t\t},\n\t}\n\n\treturn &client{\n\t\tRPCClient: jsonrpc.NewClientWithOpts(endpoint, opts),\n\t}\n}", "func connectRPC(host, user, pass, certPath string) (*rpcclient.Client, error) {\n\t// Attempt to read certs\n\tcerts := []byte{}\n\tvar readCerts []byte\n\tvar err error\n\tif len(certPath) > 0 {\n\t\treadCerts, err = ioutil.ReadFile(certPath)\n\t} else {\n\t\t// Try a default cert path\n\t\tsoterdDir := soterutil.AppDataDir(\"soterd\", false)\n\t\treadCerts, err = ioutil.ReadFile(filepath.Join(soterdDir, \"rpc.cert\"))\n\t}\n\tif err == nil {\n\t\tcerts = readCerts\n\t}\n\n\tcfg := rpcclient.ConnConfig{\n\t\tHost: host,\n\t\tEndpoint: \"ws\",\n\t\tUser: user,\n\t\tPass: pass,\n\t\tCertificates: certs,\n\t\tDisableAutoReconnect: true,\n\t}\n\n\tclient, err := rpcclient.New(&cfg, nil)\n\tif err != nil {\n\t\treturn client, err\n\t}\n\n\treturn client, nil\n}", "func newRPCWallet(settings map[string]string, logger dex.Logger, net dex.Network) (*rpcWallet, error) {\n\tcfg, chainParams, err := loadRPCConfig(settings, net)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing config: %w\", err)\n\t}\n\n\t// Check rpc connection config values\n\tmissing := \"\"\n\tif cfg.RPCUser == \"\" {\n\t\tmissing += \" username\"\n\t}\n\tif cfg.RPCPass == \"\" {\n\t\tmissing += \" password\"\n\t}\n\tif missing != \"\" {\n\t\treturn nil, fmt.Errorf(\"missing dcrwallet rpc credentials:%s\", missing)\n\t}\n\n\tlog := logger.SubLogger(\"RPC\")\n\trpcw := &rpcWallet{\n\t\tchainParams: chainParams,\n\t\tlog: log,\n\t}\n\n\tcerts, err := os.ReadFile(cfg.RPCCert)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"TLS certificate read error: %w\", err)\n\t}\n\n\tlog.Infof(\"Setting up rpc client to communicate with dcrwallet at %s with TLS certificate %q.\",\n\t\tcfg.RPCListen, cfg.RPCCert)\n\trpcw.rpcCfg = &rpcclient.ConnConfig{\n\t\tHost: cfg.RPCListen,\n\t\tEndpoint: \"ws\",\n\t\tUser: cfg.RPCUser,\n\t\tPass: cfg.RPCPass,\n\t\tCertificates: certs,\n\t\tDisableConnectOnNew: true, // don't start until Connect\n\t}\n\t// Validate the RPC client config, and create a placeholder (non-nil) RPC\n\t// connector and client that will be replaced on Connect. Any method calls\n\t// prior to Connect will be met with rpcclient.ErrClientNotConnected rather\n\t// than a panic.\n\tnodeRPCClient, err := rpcclient.New(rpcw.rpcCfg, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error setting up rpc client: %w\", err)\n\t}\n\trpcw.rpcConnector = nodeRPCClient\n\trpcw.rpcClient = newCombinedClient(nodeRPCClient, chainParams)\n\n\treturn rpcw, nil\n}", "func startChainRPC(certs []byte) (*chain.RPCClient, error) {\n\t// log <- cl.Infof{\n\t// \t\"attempting RPC client connection to %v, TLS: %s\",\n\t// \t*cfg.RPCConnect, fmt.Sprint(!*cfg.NoTLS),\n\t// }\n\t// spew.Dump(cfg)\n\trpcc, err := chain.NewRPCClient(\n\t\tActiveNet.Params,\n\t\t*cfg.RPCConnect,\n\t\t*cfg.Username,\n\t\t*cfg.Password,\n\t\tcerts,\n\t\t!*cfg.NoTLS, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = rpcc.Start()\n\treturn rpcc, err\n}", "func New(conf common.Bitcoind) (Bitcoind, error) {\n\t// Check if theres a bitcoin conf defined\n\tif conf.Host == \"\" {\n\t\tconf.Host = DefaultHostname\n\t}\n\tif conf.Port == 0 {\n\t\tconf.Port = DefaultPort\n\t}\n\tif conf.User == \"\" {\n\t\tconf.User = DefaultUsername\n\t}\n\tclient := Bitcoind{\n\t\turl: fmt.Sprintf(\"http://%s:%d\", conf.Host, conf.Port),\n\t\tuser: conf.User,\n\t\tpass: conf.Pass,\n\t}\n\tfmt.Printf(\"Creating bitcoin client... %s\\n\", client.url)\n\t_, err := client.BlockCount()\n\tif err != nil {\n\t\treturn Bitcoind{}, fmt.Errorf(\"can't connect to Bitcoind: %w\", err)\n\t}\n\n\treturn client, nil\n}", "func serviceRpc(hr HandlerReq) {\n\tw, r, rpc, dir := hr.w, hr.r, hr.Rpc, hr.Dir\n\taccess := hasAccess(r, dir, rpc, true)\n\n\tif access == false {\n\t\trenderNoAccess(w)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", fmt.Sprintf(\"application/x-git-%s-result\", rpc))\n\tw.Header().Set(\"Connection\", \"Keep-Alive\")\n\tw.Header().Set(\"Transfer-Encoding\", \"chunked\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tw.WriteHeader(http.StatusOK)\n\n\tenv := os.Environ()\n\n\tif DefaultConfig.DefaultEnv != \"\" {\n\t\tenv = append(env, DefaultConfig.DefaultEnv)\n\t}\n\n\tuser, password, authok := r.BasicAuth()\n\tif authok {\n\t\tif DefaultConfig.AuthUserEnvVar != \"\" {\n\t\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", DefaultConfig.AuthUserEnvVar, user))\n\t\t}\n\t\tif DefaultConfig.AuthPassEnvVar != \"\" {\n\t\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", DefaultConfig.AuthPassEnvVar, password))\n\t\t}\n\t}\n\n\targs := []string{rpc, \"--stateless-rpc\", dir}\n\tcmd := exec.Command(DefaultConfig.GitBinPath, args...)\n\tversion := r.Header.Get(\"Git-Protocol\")\n\t\n\tcmd.Dir = dir\n\tcmd.Env = env\n\tif len(version) != 0 {\n\t\tcmd.Env = append(env, fmt.Sprintf(\"GIT_PROTOCOL=%s\", version))\n\t}\n\t\n\tDefaultConfig.CommandFunc(cmd)\n\n\tin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\n\tvar reader io.ReadCloser\n\tswitch r.Header.Get(\"Content-Encoding\") {\n\tcase \"gzip\":\n\t\treader, err = gzip.NewReader(r.Body)\n\t\tdefer reader.Close()\n\tdefault:\n\t\treader = r.Body\n\t}\n\tio.Copy(in, reader)\n\tin.Close()\n\n\tflusher, ok := w.(http.Flusher)\n\tif !ok {\n\t\tpanic(\"expected http.ResponseWriter to be an http.Flusher\")\n\t}\n\n\tp := make([]byte, 1024)\n\tfor {\n\t\tn_read, err := stdout.Read(p)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tn_write, err := w.Write(p[:n_read])\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif n_read != n_write {\n\t\t\tfmt.Printf(\"failed to write data: %d read, %d written\\n\", n_read, n_write)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tflusher.Flush()\n\t}\n\n\tcmd.Wait()\n}", "func Connect(\n\tctx context.Context,\n\tconfig *Config,\n) (Handle, error) {\n\tlogger.Infof(\"connecting remote Bitcoin chain\")\n\n\tconnCfg := &rpcclient.ConnConfig{\n\t\tUser: config.Username,\n\t\tPass: config.Password,\n\t\tHost: config.URL,\n\t\tHTTPPostMode: true, // Bitcoin core only supports HTTP POST mode\n\t\tDisableTLS: true, // Bitcoin core does not provide TLS by default\n\t}\n\n\tclient, err := rpcclient.New(connCfg, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"failed to create rpc client at [%s]: [%v]\",\n\t\t\tconfig.URL,\n\t\t\terr,\n\t\t)\n\t}\n\n\terr = testConnection(client, connectionTimeout)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"error while connecting to [%s]: [%v]; check if the Bitcoin node \"+\n\t\t\t\t\"is running and you provided correct credentials and url\",\n\t\t\tconfig.URL,\n\t\t\terr,\n\t\t)\n\t}\n\n\t// When the context is done, cancel all requests from the RPC client\n\t// and disconnect it.\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tlogger.Info(\"disconnecting from remote Bitcoin chain\")\n\t\tclient.Shutdown()\n\t}()\n\n\treturn &remoteChain{client: client}, nil\n}", "func BtcdConnect(certificates []byte) (*BtcdRPCConn, error) {\n\t// Open websocket connection.\n\tws, err := BtcdWS(certificates)\n\tif err != nil {\n\t\tlog.Errorf(\"Cannot open websocket connection to btcd: %v\", err)\n\t\treturn nil, err\n\t}\n\n\t// Create and start RPC connection using the btcd websocket.\n\trpc := NewBtcdRPCConn(ws)\n\trpc.Start()\n\treturn rpc, nil\n}", "func createRPCClient(serverIPPort string) *rpc.Client {\n\t// parse given string address\n\traddr, err := net.ResolveTCPAddr(\"tcp\", serverIPPort)\n\tif err != nil {\n\t\tlogger.Println(err)\n return nil\n\t}\n\t// dial rpc address\n\tconn, err := net.DialTCP(\"tcp\", nil, raddr)\n\tif err != nil {\n\t\tlogger.Println(err)\n return nil\n\t}\n\t// instantiate rpc client\n\tclient := rpc.NewClient(conn)\n\n\treturn client\n}", "func Relay(c *cli.Context) error {\n\tif c.IsSet(metricsFlag.Name) {\n\t\tmetricsListener := metrics.Start(c.String(metricsFlag.Name), pprof.WithProfile(), nil)\n\t\tdefer metricsListener.Close()\n\n\t\tif err := metrics.PrivateMetrics.Register(grpc_prometheus.DefaultClientMetrics); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tclient, err := lib.Create(c, c.IsSet(metricsFlag.Name))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thandler, err := dhttp.New(c.Context, client, fmt.Sprintf(\"drand/%s (%s)\", version, gitCommit), log.DefaultLogger().With(\"binary\", \"relay\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create rest handler: %w\", err)\n\t}\n\n\tif c.IsSet(accessLogFlag.Name) {\n\t\tlogFile, err := os.OpenFile(c.String(accessLogFlag.Name), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to open access log: %w\", err)\n\t\t}\n\t\tdefer logFile.Close()\n\t\thandler = handlers.CombinedLoggingHandler(logFile, handler)\n\t} else {\n\t\thandler = handlers.CombinedLoggingHandler(os.Stdout, handler)\n\t}\n\n\tbind := \"localhost:0\"\n\tif c.IsSet(listenFlag.Name) {\n\t\tbind = c.String(listenFlag.Name)\n\t}\n\tlistener, err := net.Listen(\"tcp\", bind)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// jumpstart bootup\n\treq, _ := http.NewRequest(\"GET\", \"/public/0\", nil)\n\trr := httptest.NewRecorder()\n\thandler.ServeHTTP(rr, req)\n\tif rr.Code != http.StatusOK {\n\t\tlog.DefaultLogger().Warn(\"binary\", \"relay\", \"startup failed\", rr.Code)\n\t}\n\n\tfmt.Printf(\"Listening at %s\\n\", listener.Addr())\n\treturn http.Serve(listener, handler)\n}", "func (a *WalletApplication) initMainnetConnection() {\n\ta.Network.URL = \"cl-lb-111349175.us-west-1.elb.amazonaws.com:9000\" // Temp\n\n\ta.Network.Handles.Send = \"/send\"\n\ta.Network.Handles.Transaction = \"/transaction\"\n\ta.Network.Handles.Balance = \"/balance\"\n\n\ta.Network.BlockExplorer.URL = \"https://2mqil2w38l.execute-api.us-west-1.amazonaws.com/block-explorer-api-dev\"\n\ta.Network.BlockExplorer.Handles.Transactions = \"/transactions/\"\n\ta.Network.BlockExplorer.Handles.Checkpoints = \"/checkpoints/\"\n\ta.Network.BlockExplorer.Handles.Snapshots = \"/snapshots/\"\n\ta.Network.BlockExplorer.Handles.CollectTX = \"/transactions?sender=\"\n}", "func call(rpcname string, args interface{}, reply interface{}) {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err != nil {\n\t\tlog.Fatal(\"rpc.Client.Call:\", err)\n\t}\n}", "func setUpRPC(nodeRPC string) {\n\trpcServ := new(Service)\n\trpc.Register(rpcServ)\n\trpcAddr, err := net.ResolveTCPAddr(\"tcp\", nodeRPC)\n\tif err != nil {\n\t\tlog.Fatal(\"listen error:\", err)\n\t}\n\tl, e := net.ListenTCP(consts.TransProtocol, rpcAddr)\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\tfor i := 0; i >= 0; i++ {\n\t\tconn, _ := l.AcceptTCP()\n\t\tcolorprint.Alert(\"=========================================================================================\")\n\t\tcolorprint.Debug(\"REQ \" + strconv.Itoa(i) + \": ESTABLISHING RPC REQUEST CONNECTION WITH \" + conn.LocalAddr().String())\n\t\tgo rpc.ServeConn(conn)\n\t\tcolorprint.Blue(\"REQ \" + strconv.Itoa(i) + \": Request Served\")\n\t\tcolorprint.Alert(\"=========================================================================================\")\n\t\tdefer conn.Close()\n\t}\n\tl.Close()\n\n\t// rpcServ := new(FTService)\n\t// rpc.Register(rpcServ)\n\t// rpcAddr, err := net.ResolveTCPAddr(\"tcp\", nodeRPC)\n\t// if err != nil {\n\t// \tlog.Fatal(\"listen error:\", err)\n\t// }\n\t// l, e := net.ListenTCP(consts.TransProtocol, rpcAddr)\n\t// if e != nil {\n\t// \tlog.Fatal(\"listen error:\", e)\n\t// }\n\t// for i := 0; i >= 0; i++ {\n\t// \tconn, _ := l.AcceptTCP()\n\t// \tcolorprint.Alert(\"=========================================================================================\")\n\t// \tcolorprint.Debug(\"REQ \" + strconv.Itoa(i) + \": ESTABLISHING RPC REQUEST CONNECTION WITH \" + conn.LocalAddr().String())\n\t// \trpc.ServeConn(conn)\n\t// \tcolorprint.Blue(\"REQ \" + strconv.Itoa(i) + \": Request Served\")\n\t// \tcolorprint.Alert(\"=========================================================================================\")\n\t// \t//defer conn.Close()\n\t// }\n\t// l.Close()\n\n}", "func connectRPC(hostPort, tlsCertPath, macaroonPath string) (*grpc.ClientConn,\n\terror) {\n\n\tcertBytes, err := ioutil.ReadFile(tlsCertPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading TLS cert file %v: %v\",\n\t\t\ttlsCertPath, err)\n\t}\n\n\tcp := x509.NewCertPool()\n\tif !cp.AppendCertsFromPEM(certBytes) {\n\t\treturn nil, fmt.Errorf(\"credentials: failed to append \" +\n\t\t\t\"certificate\")\n\t}\n\n\tmacBytes, err := ioutil.ReadFile(macaroonPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading macaroon file %v: %v\",\n\t\t\tmacaroonPath, err)\n\t}\n\tmac := &macaroon.Macaroon{}\n\tif err := mac.UnmarshalBinary(macBytes); err != nil {\n\t\treturn nil, fmt.Errorf(\"error decoding macaroon: %v\", err)\n\t}\n\n\tmacCred, err := macaroons.NewMacaroonCredential(mac)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating creds: %v\", err)\n\t}\n\n\topts := []grpc.DialOption{\n\t\tgrpc.WithTransportCredentials(credentials.NewClientTLSFromCert(\n\t\t\tcp, \"\",\n\t\t)),\n\t\tgrpc.WithPerRPCCredentials(macCred),\n\t}\n\tconn, err := grpc.Dial(hostPort, opts...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to connect to RPC server: %v\",\n\t\t\terr)\n\t}\n\n\treturn conn, nil\n}", "func (b *Backend) RPC(choice uint64, body []byte, v interface{}) error {\n\tconn, err := b.Dial()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer conn.Close()\n\n\tchoiceBuf := make([]byte, binary.MaxVarintLen64)\n\tbinary.PutUvarint(choiceBuf, choice)\n\t_, err = conn.conn.Write(choiceBuf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbodyLenBuf := make([]byte, binary.MaxVarintLen64)\n\tbinary.PutUvarint(bodyLenBuf, uint64(len(body)))\n\n\t_, err = conn.conn.Write(bodyLenBuf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = conn.conn.Write(body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trespLenBuf := make([]byte, binary.MaxVarintLen64)\n\t_, err = conn.conn.Read(respLenBuf)\n\tif err != nil {\n\t\treturn err\n\t}\n\trespLen, _ := binary.Uvarint(respLenBuf)\n\trespBuf := make([]byte, respLen)\n\t_, err = conn.conn.Read(respBuf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(respBuf, v)\n\n\treturn err\n}", "func execNewClientConn(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tret := httputil.NewClientConn(args[0].(net.Conn), args[1].(*bufio.Reader))\n\tp.Ret(2, ret)\n}", "func NewBackend(url string) Backend {\n\tcli, err := ethclient.Dial(url)\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\treturn &backend{connect: cli}\n}", "func NewDirectHTTPRPCClient(c *Client, clientCodecFunc ClientCodecFunc, network, address string, path string, timeout time.Duration) (*rpc.Client, error) {\n\tif path == \"\" {\n\t\tpath = rpc.DefaultRPCPath\n\t}\n\n\tvar conn net.Conn\n\tvar tlsConn *tls.Conn\n\tvar err error\n\n\tif c != nil && c.TLSConfig != nil {\n\t\tdialer := &net.Dialer{\n\t\t\tTimeout: timeout,\n\t\t}\n\t\ttlsConn, err = tls.DialWithDialer(dialer, \"tcp\", address, c.TLSConfig)\n\t\t//or conn:= tls.Client(netConn, &config)\n\n\t\tconn = net.Conn(tlsConn)\n\t} else {\n\t\tconn, err = net.DialTimeout(\"tcp\", address, timeout)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tio.WriteString(conn, \"CONNECT \"+path+\" HTTP/1.0\\n\\n\")\n\n\t// Require successful HTTP response\n\t// before switching to RPC protocol.\n\tresp, err := http.ReadResponse(bufio.NewReader(conn), &http.Request{Method: \"CONNECT\"})\n\tif err == nil && resp.Status == connected {\n\t\tif c == nil || c.PluginContainer == nil {\n\t\t\treturn rpc.NewClientWithCodec(clientCodecFunc(conn)), nil\n\t\t}\n\t\twrapper := newClientCodecWrapper(c.PluginContainer, clientCodecFunc(conn), conn)\n\t\twrapper.Timeout = c.Timeout\n\t\twrapper.ReadTimeout = c.ReadTimeout\n\t\twrapper.WriteTimeout = c.WriteTimeout\n\n\t\treturn rpc.NewClientWithCodec(wrapper), nil\n\t}\n\tif err == nil {\n\t\terr = errors.New(\"unexpected HTTP response: \" + resp.Status)\n\t}\n\tconn.Close()\n\treturn nil, &net.OpError{\n\t\tOp: \"dial-http\",\n\t\tNet: network + \" \" + address,\n\t\tAddr: nil,\n\t\tErr: err,\n\t}\n}", "func (c *Client) Coinbase() (*AddressResponse, error) {\n\trequest := c.newRequest(EthCoinbase)\n\n\tresponse := &AddressResponse{}\n\n\treturn response, c.send(request, response)\n}", "func (r *Reconciler) Connect() error {\n\n\tr.CheckServiceStatus(r.ServiceMgmt, &r.NooBaa.Status.Services.ServiceMgmt, \"mgmt-https\")\n\tr.CheckServiceStatus(r.ServiceS3, &r.NooBaa.Status.Services.ServiceS3, \"s3-https\")\n\n\tif len(r.NooBaa.Status.Services.ServiceMgmt.NodePorts) == 0 {\n\t\treturn fmt.Errorf(\"core pod port not ready yet\")\n\t}\n\n\tnodePort := r.NooBaa.Status.Services.ServiceMgmt.NodePorts[0]\n\tnodeIP := nodePort[strings.Index(nodePort, \"://\")+3 : strings.LastIndex(nodePort, \":\")]\n\n\tr.NBClient = nb.NewClient(&nb.APIRouterNodePort{\n\t\tServiceMgmt: r.ServiceMgmt,\n\t\tNodeIP: nodeIP,\n\t})\n\n\tr.NBClient.SetAuthToken(r.SecretOp.StringData[\"auth_token\"])\n\n\t// Check that the server is indeed serving the API already\n\t// we use the read_auth call here because it's an API that always answers\n\t// even when auth_token is empty.\n\t_, err := r.NBClient.ReadAuthAPI()\n\treturn err\n\n\t// if len(r.NooBaa.Status.Services.ServiceMgmt.PodPorts) != 0 {\n\t// \tpodPort := r.NooBaa.Status.Services.ServiceMgmt.PodPorts[0]\n\t// \tpodIP := podPort[strings.Index(podPort, \"://\")+3 : strings.LastIndex(podPort, \":\")]\n\t// \tr.NBClient = nb.NewClient(&nb.APIRouterPodPort{\n\t// \t\tServiceMgmt: r.ServiceMgmt,\n\t// \t\tPodIP: podIP,\n\t// \t})\n\t// \tr.NBClient.SetAuthToken(r.SecretOp.StringData[\"auth_token\"])\n\t// \treturn nil\n\t// }\n\n}", "func execNewProxyClientConn(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tret := httputil.NewProxyClientConn(args[0].(net.Conn), args[1].(*bufio.Reader))\n\tp.Ret(2, ret)\n}", "func CallRPC(rpcString string, args interface{}, reply interface{}, chordNodePtr *ChordNodePtr) error {\n\n\t// Just to test that my function signature syntax is correct:\n\tservice := chordNodePtr.IpAddress + \":\" + chordNodePtr.Port\n\tvar client *rpc.Client\n\tvar err error\n\tcallFailed := false\n\n\tclient = connections[service]\n\tif client != nil {\n\t\terr = client.Call(rpcString, args, reply)\n\t\tif err != nil {\n\t\t\tcallFailed = true\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif client == nil || callFailed {\n\n\t\tclient, err = jsonrpc.Dial(\"tcp\", service)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Only maintain a persistent connection if the node we're contacting is\n\t\t// in our finger table, or if it's our predecessor.\n\n\t\tif isFingerOrPredecessor(chordNodePtr) || aFingerOrPredecessorIsNil() {\n\t\t\tconnections[service] = client\n\t\t} else {\n\t\t\tdefer client.Close()\n\t\t}\n\t}\n\n\terr = client.Call(rpcString, args, reply)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func bitcoinCall(method string, params []interface{}, reply interface{}) error {\n\tif !globalBitcoinData.initialised {\n\t\tfault.Panic(\"bitcoin not initialised\")\n\t}\n\n\tglobalBitcoinData.id += 1\n\n\targuments := bitcoinArguments{\n\t\tId: globalBitcoinData.id,\n\t\tMethod: method,\n\t\tParams: params,\n\t}\n\tresponse := bitcoinReply{\n\t\tResult: reply,\n\t}\n\terr := bitcoinRPC(&arguments, &response)\n\tif nil != err {\n\t\treturn err\n\t}\n\n\tif nil != response.Error {\n\t\ts := response.Error.Message\n\t\treturn fault.ProcessError(\"Bitcoin RPC error: \" + s)\n\t}\n\treturn nil\n}", "func dialRpcClient(servAddr string, portNo uint16) (*rpc.Client, net.Conn) {\n\tvar client *rpc.Client\n\tvar conn net.Conn\n\tvar err error\n\tlog.Infof(\"Connecting to RPC server: %s:%d\", servAddr, portNo)\n\n\t// Retry connecting for 5sec and then give up\n\tfor i := 0; i < 5; i++ {\n\t\t// Connect to the server\n\t\tconn, err = net.Dial(\"tcp\", fmt.Sprintf(\"%s:%d\", servAddr, portNo))\n\t\tif err == nil {\n\t\t\tlog.Infof(\"Connected to RPC server: %s:%d\", servAddr, portNo)\n\n\t\t\t// Create an RPC client\n\t\t\tclient = jsonrpc.NewClient(conn)\n\n\t\t\tbreak\n\t\t}\n\n\t\tlog.Warnf(\"Error %v connecting to %s:%s. Retrying..\", err, servAddr, portNo)\n\t\t// Sleep for a second and retry again\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\n\t// If we failed to connect, report error\n\tif client == nil {\n\t\tlog.Errorf(\"Failed to connect to Rpc server %s:%d\", servAddr, portNo)\n\t\treturn nil, nil\n\t}\n\n\treturn client, conn\n}", "func (m *RPCModule) connect(host string, port int, user, pass string) util.Map {\n\n\t// Create a client\n\tc := client.NewClient(&types2.Options{\n\t\tHost: host,\n\t\tPort: port,\n\t\tUser: user,\n\t\tPassword: pass,\n\t})\n\n\t// Create a RPC context\n\tctx := m.ClientContextMaker(c)\n\n\t// Add call function for raw calls\n\tctx.Objects[\"call\"] = ctx.call\n\n\t// Attempt to query the methods from the RPC server.\n\t// Panics if not successful.\n\tmethods := ctx.call(\"rpc_methods\")\n\n\t// Build RPC namespaces and add methods into them\n\tfor _, method := range methods[\"methods\"].([]interface{}) {\n\t\to := objx.New(method)\n\t\tnamespace := o.Get(\"namespace\").String()\n\t\tname := o.Get(\"name\").String()\n\t\tnsObj, ok := ctx.Objects[namespace]\n\t\tif !ok {\n\t\t\tnsObj = make(map[string]interface{})\n\t\t\tctx.Objects[namespace] = nsObj\n\t\t}\n\t\tnsObj.(map[string]interface{})[name] = func(params ...interface{}) interface{} {\n\t\t\treturn ctx.call(fmt.Sprintf(\"%s_%s\", namespace, name), params...)\n\t\t}\n\t}\n\n\treturn ctx.Objects\n}", "func connectByHTTPProxy(pageItem *page.Page, inReq *request.Request) (*http.Response, error) {\r\n\trequest, _ := http.NewRequest(\"GET\", inReq.GetURL(), nil)\r\n\tproxy, err := url.Parse(inReq.GetProxyHost())\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tclient := &http.Client{\r\n\t\tTransport: &http.Transport{\r\n\t\t\tProxy: http.ProxyURL(proxy),\r\n\t\t},\r\n\t}\r\n\tresp, err := client.Do(request)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\treturn resp, nil\r\n\r\n}", "func bitcoinCall(method string, params []interface{}, reply interface{}) error {\n\tif !globalData.initialised {\n\t\tfault.Panic(\"bitcoin not initialised\")\n\t}\n\n\tglobalData.id += 1\n\n\targuments := bitcoinArguments{\n\t\tId: globalData.id,\n\t\tMethod: method,\n\t\tParams: params,\n\t}\n\tresponse := bitcoinReply{\n\t\tResult: reply,\n\t}\n\tglobalData.log.Debugf(\"rpc call with: %v\", arguments)\n\terr := bitcoinRPC(&arguments, &response)\n\tif nil != err {\n\t\tglobalData.log.Tracef(\"rpc returned error: %v\", err)\n\t\treturn err\n\t}\n\n\tif nil != response.Error {\n\t\ts := response.Error.Message\n\t\treturn fault.ProcessError(\"Bitcoin RPC error: \" + s)\n\t}\n\treturn nil\n}", "func New(addr string) (*Client, error) {\n\tc, err := rpc.DialHTTP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Client{c: c}, nil\n}", "func init() {\n\tSBC = data.NewBlockChain()\n\tid, _ := strconv.ParseInt(os.Args[1], 10, 32)\n\tPeers = data.NewPeerList( /*Register()*/ int32(id), 32) // Uses port number as ID since TA server is down\n\tprivateKey, publicKey = client.GenerateKeyPair()\n\tifStarted = false\n\tmpt.Initial()\n\tclientBalanceMap = make(map[string]int32)\n\tpendingTransaction = make(map[string]string)\n\ttransactionMpt.Initial()\n\tclientBalanceMap[string(client.PublicKeyToBytes(publicKey))] = 1000\n\thighestblockTransaction = 0\n}", "func NewRpcConn(addr string) *RpcConn {\n\treturn &RpcConn{\n\t\tEndpoint: addr,\n\t\tlogger: pegalog.GetLogger(),\n\t\tcstate: ConnStateInit,\n\t\treadTimeout: ConnReadTimeout,\n\t\twriteTimeout: ConnWriteTimeout,\n\t}\n}", "func run(ctx context.Context) error {\n\t// Load configuration and parse command line. This function also\n\t// initializes logging and configures it accordingly.\n\ttcfg, _, err := loadConfig(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcfg = tcfg\n\tdefer func() {\n\t\tif logRotator != nil {\n\t\t\tlogRotator.Close()\n\t\t}\n\t}()\n\n\t// Show version at startup.\n\tlog.Infof(\"Version %s (Go version %s)\", version.String(), runtime.Version())\n\n\tif done(ctx) {\n\t\treturn ctx.Err()\n\t}\n\n\t// Connect to the wallet RPC service\n\twalletClient, err := startRPCClient(ctx)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to connect to the wallet service: %v\", err)\n\t\treturn err\n\t}\n\tdefer walletClient.Close()\n\n\tif done(ctx) {\n\t\treturn ctx.Err()\n\t}\n\n\twalletCfg := wallet.Config{\n\t\tAccount: cfg.Account,\n\t\tAccountName: cfg.AccountName,\n\t\tChainParams: activeNet.Params,\n\t\tWalletConnection: walletClient,\n\t\tWalletPassword: cfg.WalletPassword,\n\t}\n\n\t// Create a wallet communication object\n\tw, err := wallet.New(ctx, &walletCfg)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to communicate with the wallet: %v\", err)\n\t\treturn err\n\t}\n\n\tif done(ctx) {\n\t\treturn ctx.Err()\n\t}\n\n\ttumblerCfg := tumbler.Config{\n\t\tChainParams: activeNet.Params,\n\t\tEpochDuration: cfg.EpochDuration,\n\t\tEpochRenewal: cfg.EpochRenewal,\n\t\tPuzzleDifficulty: cfg.PuzzleDifficulty,\n\t\tWallet: w,\n\t}\n\n\t// Create and start the RPC server to serve client connections.\n\ttumblerServer, err := startRPCServer()\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to create a Tumbler server: %v\", err)\n\t\treturn err\n\t}\n\n\ttb := tumbler.NewTumbler(&tumblerCfg)\n\n\tif tumblerServer != nil {\n\t\t// Start tumbler gRPC services.\n\t\trpcserver.StartTumblerService(tumblerServer, tb)\n\t\tdefer func() {\n\t\t\tlog.Warn(\"Stopping gRPC server...\")\n\t\t\ttumblerServer.Stop()\n\t\t\tlog.Info(\"gRPC server shutdown\")\n\t\t}()\n\t}\n\n\t// Fire up the TumbleBit server\n\terr = tb.Run(ctx)\n\tswitch err {\n\tcase nil:\n\t\tlog.Info(\"TumbleBit service stopped\")\n\tcase context.Canceled:\n\t\tlog.Error(\"TumbleBit service cancelled\")\n\tdefault:\n\t\tlog.Errorf(\"Failed to setup a TumbleBit service: %v\", err)\n\t\treturn err\n\t}\n\n\t// Wait until shutdown is signaled before returning and running deferred\n\t// shutdown tasks.\n\t<-ctx.Done()\n\treturn ctx.Err()\n}", "func (b *BitcoinClient) createRequest(rpcBody *RPCBody) (*http.Request, error) {\n\tbody, err := b.createBody(rpcBody)\n\tif err != nil {\n\t\tlog.Println(ErrCreatingBody)\n\t\treturn nil, ErrCreatingBody\n\t}\n\n\treq, err := http.NewRequest(\"POST\", b.BitcoinNodeAddr, body)\n\tif err != nil {\n\t\tlog.Println(ErrCreatingRequest)\n\t\treturn nil, ErrCreatingRequest\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.SetBasicAuth(username, password)\n\n\treturn req, nil\n}", "func (rpc *RpcClient) newHTTPClient() (*http.Client, error) {\n\t// Configure proxy if needed.\n\tvar dial func(network, addr string) (net.Conn, error)\n\tif rpc.Cfg.OptionConfig.Proxy != \"\" {\n\t\tproxy := &socks.Proxy{\n\t\t\tAddr: rpc.Cfg.OptionConfig.Proxy,\n\t\t\tUsername: rpc.Cfg.OptionConfig.ProxyUser,\n\t\t\tPassword: rpc.Cfg.OptionConfig.ProxyPass,\n\t\t}\n\t\tdial = func(network, addr string) (net.Conn, error) {\n\t\t\tc, err := proxy.Dial(network, addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, nil\n\t\t}\n\t}\n\n\t// Configure TLS if needed.\n\tvar tlsConfig *tls.Config\n\tif !rpc.Cfg.SoloConfig.NoTLS && rpc.Cfg.SoloConfig.RPCCert != \"\" {\n\t\tpem, err := ioutil.ReadFile(rpc.Cfg.SoloConfig.RPCCert)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpool := x509.NewCertPool()\n\t\tpool.AppendCertsFromPEM(pem)\n\t\ttlsConfig = &tls.Config{\n\t\t\tRootCAs: pool,\n\t\t\tInsecureSkipVerify: rpc.Cfg.SoloConfig.NoTLS,\n\t\t}\n\t} else {\n\t\ttlsConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: rpc.Cfg.SoloConfig.NoTLS,\n\t\t}\n\t}\n\n\t// Create and return the new HTTP client potentially configured with a\n\t// proxy and TLS.\n\tclient := http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: dial,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: time.Duration(rpc.Cfg.OptionConfig.Timeout) * time.Second,\n\t\t\t\tKeepAlive: time.Duration(rpc.Cfg.OptionConfig.Timeout) * time.Second,\n\t\t\t\tDualStack: true,\n\t\t\t}).DialContext,\n\t\t},\n\t}\n\treturn &client, nil\n}", "func rpc_call(reqMethod string, reqParam interface{}, ip string, port int) Node {\n\n\ttempClient, _ := jsonrpc.Dial(serverInfo1.Protocol, ip+\":\"+strconv.Itoa(port))\n\tdefer tempClient.Close()\n\tvar resp Node\n\terr := tempClient.Call(\"DICT3.\"+reqMethod, reqParam, &resp)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn Node{}\n\t}\n\treturn resp\n}", "func newMinecraftConn(base net.Conn, proxy *Proxy, playerConn bool) (conn *minecraftConn) {\n\tin := proto.ServerBound // reads from client are server bound (proxy <- client)\n\tout := proto.ClientBound // writes to client are client bound (proxy -> client)\n\tlogName := \"client\"\n\tif !playerConn { // if a backend server connection\n\t\tin = proto.ClientBound // reads from backend are client bound (proxy <- backend)\n\t\tout = proto.ServerBound // writes to backend are server bound (proxy -> backend)\n\t\tlogName = \"server\"\n\t}\n\n\tlog := proxy.log.WithName(logName)\n\twriteBuf := bufio.NewWriter(base)\n\treadBuf := bufio.NewReader(base)\n\treturn &minecraftConn{\n\t\tproxy: proxy,\n\t\tlog: log,\n\t\tc: base,\n\t\tclosed: make(chan struct{}),\n\t\twriteBuf: writeBuf,\n\t\treadBuf: readBuf,\n\t\tencoder: codec.NewEncoder(writeBuf, out),\n\t\tdecoder: codec.NewDecoder(readBuf, in, log.WithName(\"decoder\")),\n\t\tstate: state.Handshake,\n\t\tprotocol: version.Minecraft_1_7_2.Protocol,\n\t\tconnType: undeterminedConnectionType,\n\t}\n}", "func Dial(rawurl string) (*Client, error) {\n\tc, err := rpc.Dial(rawurl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewClient(c), nil\n}", "func createConnection() (*grpc.ClientConn, error) {\n\tconn, err := grpc.Dial(\":1981\", grpc.WithInsecure())\n\tif err != nil {\n\t\tlog.Fatalf(\"did not connect: %s\", err)\n\t}\n\treturn conn, err\n}", "func newPeerClientConnection(block bool) (*grpc.ClientConn, error) {\n\treturn newPeerClientConnectionWithAddress(block, viper.GetString(\"service.cliaddress\"))\n}", "func (t HTTP) Post(url string, payload []byte) (resp *http.Response, err error) {\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\thttp := &http.Client{Transport: tr}\n\tresp, err = http.Post(url, \"json-rpc\", strings.NewReader(string(payload)))\n\treturn resp, err\n}", "func (dynamoClient *RPCClient) RpcConnect() error {\n\tif dynamoClient.rpcConn != nil {\n\t\treturn nil\n\t}\n\n\tvar e error\n\tdynamoClient.rpcConn, e = rpc.DialHTTP(\"tcp\", dynamoClient.ServerAddr)\n\tif e != nil {\n\t\tdynamoClient.rpcConn = nil\n\t}\n\n\treturn e\n}", "func NewClient(addr string) *RPCClient {\n\tclient, err := jsonrpc.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\treturn newFromRPCClient(client)\n}", "func NewRPCConnection(config *rpcclient.ConnConfig, maxConnRetries int, ntfnHandlers *rpcclient.NotificationHandlers) *rpcclient.Client {\n\tvar client *rpcclient.Client\n\tvar err error\n\n\tfor i := 0; i < maxConnRetries; i++ {\n\t\tclient, err = rpcclient.New(config, ntfnHandlers)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"err: \" + err.Error())\n\t\t\ttime.Sleep(time.Duration(math.Log(float64(i+3))) * 50 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tif client == nil {\n\t\tintegration.ReportTestSetupMalfunction(fmt.Errorf(\"client connection timedout\"))\n\t}\n\treturn client\n}", "func (client *ClientRPC) connect() bool {\n\tconnection, err := net.DialTimeout(\"tcp\", \"localhost:7398\", time.Duration(10)*time.Second)\n\tif err != nil {\n\t\tlog.Printf(\"Unable to connect to relay server at localhost: %v\", err)\n\t\treturn false\n\t}\n\tclient.relay = jsonrpc.NewClient(connection)\n\tlog.Println(\"Connected to relay server\")\n\treturn true\n}", "func Main(c *nine.Config, activeNet *nine.Params, path string) error {\n\t// fmt.Println(\"wallet Main\")\n\tcfg = c\n\tActiveNet = activeNet\n\tif ActiveNet.Name == \"testnet\" {\n\t\tfork.IsTestnet = true\n\t}\n\tif cfg.Profile != nil {\n\t\tgo func() {\n\t\t\tlistenAddr :=\n\t\t\t\tnet.JoinHostPort(\"127.0.0.1\", fmt.Sprint(*cfg.Profile))\n\t\t\tlog <- cl.Info{\"profile server listening on\", listenAddr}\n\t\t\tprofileRedirect := http.RedirectHandler(\"/debug/pprof\",\n\t\t\t\thttp.StatusSeeOther)\n\t\t\thttp.Handle(\"/\", profileRedirect)\n\t\t\tlog <- cl.Error{http.ListenAndServe(listenAddr, nil)}\n\t\t}()\n\t}\n\t// dbDir := NetworkDir(path, activeNet.Params)\n\tlog <- cl.Debug{\"dbDir\", path, *cfg.DataDir, *cfg.DataDir, activeNet.Params.Name}\n\tloader := wallet.NewLoader(activeNet.Params, path, 250)\n\t// Create and start HTTP server to serve wallet client connections.\n\t// This will be updated with the wallet and chain server RPC client\n\t// created below after each is created.\n\tlog <- cl.Trc(\"startRPCServers loader\")\n\trpcs, legacyRPCServer, err := startRPCServers(loader)\n\tif err != nil {\n\t\tlog <- cl.Error{\n\t\t\t\"unable to create RPC servers:\", err,\n\t\t}\n\t\treturn err\n\t}\n\t// Create and start chain RPC client so it's ready to connect to\n\t// the wallet when loaded later.\n\tif !*cfg.NoInitialLoad {\n\t\tlog <- cl.Trc(\"starting rpcClientConnectLoop\")\n\t\tgo rpcClientConnectLoop(legacyRPCServer, loader)\n\t}\n\tloader.RunAfterLoad(func(w *wallet.Wallet) {\n\t\tlog <- cl.Trc(\"starting startWalletRPCServices\")\n\t\tstartWalletRPCServices(w, rpcs, legacyRPCServer)\n\t})\n\tif !*cfg.NoInitialLoad {\n\t\tlog <- cl.Debug{\"loading database\"}\n\t\t// Load the wallet database. It must have been created already\n\t\t// or this will return an appropriate error.\n\t\tif cfg.WalletPass != nil {\n\t\t\t_, err = loader.OpenExistingWallet([]byte(*cfg.WalletPass), true)\n\t\t} else {\n\t\t\t_, err = loader.OpenExistingWallet([]byte{}, true)\n\t\t}\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tlog <- cl.Error{err}\n\t\t\treturn err\n\t\t}\n\t}\n\tlog <- cl.Trc(\"adding interrupt handler to unload wallet\")\n\t// Add interrupt handlers to shutdown the various process components\n\t// before exiting. Interrupt handlers run in LIFO order, so the wallet\n\t// (which should be closed last) is added first.\n\tinterrupt.AddHandler(func() {\n\t\terr := loader.UnloadWallet()\n\t\tif err != nil && err != wallet.ErrNotLoaded {\n\t\t\tlog <- cl.Error{\n\t\t\t\t\"failed to close wallet:\", err,\n\t\t\t}\n\t\t}\n\t})\n\tif rpcs != nil {\n\t\tlog <- cl.Trc(\"starting rpc server\")\n\t\tinterrupt.AddHandler(func() {\n\t\t\t// TODO: Does this need to wait for the grpc server to\n\t\t\t// finish up any requests?\n\t\t\tlog <- cl.Wrn(\"stopping RPC server...\")\n\t\t\trpcs.Stop()\n\t\t\tlog <- cl.Inf(\"RPC server shutdown\")\n\t\t})\n\t}\n\tif legacyRPCServer != nil {\n\t\tinterrupt.AddHandler(func() {\n\t\t\tlog <- cl.Wrn(\"stopping legacy RPC server...\")\n\t\t\tlegacyRPCServer.Stop()\n\t\t\tlog <- cl.Inf(\"legacy RPC server shutdown\")\n\t\t})\n\t\tgo func() {\n\t\t\t<-legacyRPCServer.RequestProcessShutdown()\n\t\t\tinterrupt.Request()\n\t\t}()\n\t}\n\t<-interrupt.HandlersDone\n\tlog <- cl.Inf(\"shutdown complete\")\n\treturn nil\n}", "func BinanceApiProxy(w http.ResponseWriter, r *http.Request) {\n\ttarget := \"https://api.binance.com\"\n\trequest, err := http.NewRequest(r.Method, target, nil)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tlog.Printf(\"error: failed to create request: %v\\n\", err)\n\t\treturn\n\t}\n\trequest.URL.Path = r.URL.Path\n\trequest.URL.RawQuery = r.URL.RawQuery\n\n\tfor key, val := range r.Header {\n\t\tswitch strings.ToLower(key) {\n\t\tcase \"x-mbx-apikey\":\n\t\t\trequest.Header[key] = val\n\t\tdefault:\n\t\t}\n\t}\n\n\tresponse, err := http.DefaultClient.Do(request)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tlog.Printf(\"error: failed to send request: %v\\n\", err)\n\t\treturn\n\t}\n\n\tfor key, val := range response.Header {\n\t\tw.Header()[key] = val\n\t}\n\tw.WriteHeader(response.StatusCode)\n\tio.Copy(w, response.Body)\n}", "func NewFakeRpcConn(reader io.Reader, writer io.Writer) *RpcConn {\n\tconn := NewRpcConn(\"\")\n\tconn.setReady(reader, writer)\n\treturn conn\n}", "func main() {\n\tlog.SetFlags(log.LstdFlags | log.Lmicroseconds)\n\tlog.Printf(\"My peers are %v\", os.Getenv(\"PEERS\"))\n\tlog.Printf(\"traffic is %v\", os.Getenv(\"TRAFFIC\"))\n\tpeers := []*node.Peer{}\n\tfor _, s := range strings.Split(os.Getenv(\"PEERS\"), \" \") {\n\t\tp := &node.Peer{\n\t\t\tHost: fmt.Sprintf(\"node-%s\", s),\n\t\t\tPort: s}\n\t\tpeers = append(peers, p)\n\t}\n\n\n\tvar traffic = false\n\tif os.Getenv(\"TRAFFIC\") == \"1\" {\n\t\ttraffic = true\n\t}\n\n\tclientNode = client.NewClient(fmt.Sprintf(\"node-%s\", os.Getenv(\"PORT\")), os.Getenv(\"PORT\"), peers, uiChannel, nodeChannel, traffic)\n\n\terr := clientNode.SetupRPC()\n\tif err != nil {\n\t\tlog.Fatal(\"RPC setup error:\", err)\n\t}\n\terr = clientNode.Peer()\n\tif err != nil {\n\t\tlog.Fatal(\"Peering error:\", err)\n\t}\n\n\tfs := http.FileServer(http.Dir(\"../public\"))\n\thttp.Handle(\"/\", fs)\n\n\thttp.HandleFunc(\"/ws\", handleConnections)\n\thttp.HandleFunc(\"/disconnect\", handleDisconnect)\n\thttp.HandleFunc(\"/connect\", handleConnect)\n\thttp.HandleFunc(\"/getID\", handleGetID)\n\tgo handleMessages()\n\n\tgo func() {\n\t\terr := http.ListenAndServe(HttpPort, nil)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"ListenAndServe: \", err)\n\t\t}\n\t}()\n\n\tif traffic == true{\n\t\tclientNode.Start()\n\t}\n\n\tfor {\n\t\ttime.Sleep(time.Hour)\n\t}\n}", "func newClient(cfg Config) *Client {\n\treturn &Client{bitclient: bitclient.NewBitClient(cfg.Bitbucket.Host, cfg.Bitbucket.User, cfg.Bitbucket.Password)}\n}", "func makeLink(t *testing.T) (client *Client) {\n\t// start a server\n\tserver := NewServer()\n\terr := server.Register(new(StreamingArith))\n\tif err != nil {\n\t\tt.Fatal(\"Register failed\", err)\n\t}\n\n\t// listen and handle queries\n\tvar l net.Listener\n\tl, serverAddr = listenTCP()\n\tlog.Println(\"Test RPC server listening on\", serverAddr)\n\tgo server.Accept(l)\n\n\t// dial the client\n\tclient, err = Dial(\"tcp\", serverAddr)\n\tif err != nil {\n\t\tt.Fatal(\"dialing\", err)\n\t}\n\n\treturn\n}", "func newClient(address string) (c *client, err error) {\n\tif address, err = Host(address); err != nil {\n\t\treturn\n\t}\n\tc = new(client)\n\tif c.client, err = rpc.Dial(\"tcp\", address); err != nil {\n\t\treturn nil, err\n\t}\n\tc.address = address\n\treturn\n}", "func NewClient(connectionString string, baselineEncoding encoding.Encoding) (Client, error) {\n\tparts := strings.Split(connectionString, \"://\")\n\tif len(parts) < 2 {\n\t\treturn nil, errors.New(\"Connection string not in the correct format, must be transport://server:port\")\n\t}\n\n\ttransport := parts[0]\n\tid := uuid.NewV4().String()\n\n\tswitch transport {\n\tcase \"redis\":\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"tcf\",\n\t\t}).Info(\"Using Redis back-end\")\n\t\tc := &RedisClient{\n\t\t\tURL: connectionString,\n\t\t\tid: id,\n\t\t}\n\t\tc.SetEncoding(baselineEncoding)\n\t\tc.Init(nil)\n\t\treturn c, nil\n\tcase \"beacon\":\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"tcf\",\n\t\t}).Info(\"Using Beacon back-end\")\n\n\t\tURL, err := url.Parse(connectionString)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tparts := strings.Split(URL.Host, \":\")\n\t\tif len(parts) < 2 {\n\t\t\treturn nil, errors.New(\"No port specified\")\n\t\t}\n\n\t\tinterval := URL.Query().Get(\"interval\")\n\t\tif interval == \"\" {\n\t\t\tinterval = \"10\"\n\t\t}\n\n\t\tasInt, convErr := strconv.Atoi(interval)\n\t\tif convErr != nil {\n\t\t\treturn nil, convErr\n\t\t}\n\n\t\tportAsInt, portConvErr := strconv.Atoi(parts[1])\n\t\tif portConvErr != nil {\n\t\t\treturn nil, portConvErr\n\t\t}\n\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"tcf\",\n\t\t}).Debugf(\"Port is: %v\\n\", parts[1])\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"tcf\",\n\t\t}).Debugf(\"Interval is: %v\\n\", asInt)\n\n\t\tc := &BeaconClient{\n\t\t\tPort: portAsInt,\n\t\t\tInterval: asInt,\n\t\t\tid: id,\n\t\t\tUseMiniPayload: true,\n\t\t}\n\t\tc.SetEncoding(baselineEncoding)\n\t\tc.Init(nil)\n\t\treturn c, nil\n\n\tcase \"mangos\":\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"tcf\",\n\t\t}).Info(\"Using Mangos back-end\")\n\n\t\tURL, err := url.Parse(connectionString)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tparts := strings.Split(URL.Host, \":\")\n\t\tif len(parts) < 2 {\n\t\t\treturn nil, errors.New(\"No port specified\")\n\t\t}\n\t\turl := \"tcp://\" + URL.Host\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"prefix\": \"tcf\",\n\t\t}).Info(\"Connecting to: \", url)\n\n\t\tdisablePublisher := URL.Query().Get(\"disable_publisher\")\n\n\t\tc := &MangosClient{\n\t\t\tURL: url,\n\t\t\tdisablePublisher: disablePublisher != \"\",\n\t\t\tid: id,\n\t\t}\n\n\t\tc.SetEncoding(baselineEncoding)\n\t\tif initErr := c.Init(nil); initErr != nil {\n\t\t\treturn nil, initErr\n\t\t}\n\n\t\treturn c, nil\n\tdefault:\n\t\treturn nil, errors.New(\"No valid transport set.\")\n\t}\n}", "func main() {\n\t//\n\t//fileName := \"index\"\n\t//\n\t//port, _ := PickUnusedPort()\n\t//ui.ShowUI(port, fileName)\n\t// 4780\n\t//startProxy := ui.StartProxy()\n\t//log.Printf(\"%+v\\n\", startProxy)\n\n\tproxy := ui.ReqForProxy(\"127.0.0.1:4780\")\n\tlog.Printf(\"%+v\\n\", proxy)\n\n}", "func rpc_Go(method string, args Triplet, resp *Response, ip string, port int, cs chan *rpc.Call) interface{} {\n\ttempClient, err := jsonrpc.Dial(serverInfo1.Protocol, ip+\":\"+strconv.Itoa(port))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\t(*resp).client = tempClient\n\ttempClient.Go(\"DICT3.\"+method, args, resp, cs)\n\treturn nil\n}", "func ConnectAndCall(srv_addr string, srv_port string, method string, args interface{}, reply interface{}) error {\n\tclient, err := rpc.DialHTTP(\"tcp\", srv_addr+\":\"+srv_port)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = client.Call(method, args, reply) // make rpc call\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil // no error\n}", "func main() {\n\tcalculix := serverCalculix.NewCalculix()\n\terr := rpc.Register(calculix)\n\tif err != nil {\n\t\tfmt.Println(\"Cannot register the calculix\")\n\t\treturn\n\t}\n\trpc.HandleHTTP()\n\tl, e := net.Listen(\"tcp\", \":1234\")\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\terr = http.Serve(l, nil)\n\tif err != nil {\n\t\tfmt.Println(\"Cannot serve the calculix\")\n\t\treturn\n\t}\n}", "func connect(cfg *config.Config, log logger.Logger) (*ftm.Client, *eth.Client, error) {\n\t// log what we do\n\tlog.Debugf(\"connecting blockchain node at %s\", cfg.Opera.ApiNodeUrl)\n\n\t// try to establish a connection\n\tclient, err := ftm.Dial(cfg.Opera.ApiNodeUrl)\n\tif err != nil {\n\t\tlog.Critical(err)\n\t\treturn nil, nil, err\n\t}\n\n\t// try to establish a for smart contract interaction\n\tcon, err := eth.Dial(cfg.Opera.ApiNodeUrl)\n\tif err != nil {\n\t\tlog.Critical(err)\n\t\treturn nil, nil, err\n\t}\n\n\t// log\n\tlog.Notice(\"node connection open\")\n\treturn client, con, nil\n}", "func (bf *WebSocketClient) Connect() error {\n\turl := url2.URL{Scheme: \"wss\", Host: url, Path: \"/json-rpc\"}\n\tcon, _, err := websocket.DefaultDialer.Dial(url.String(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbf.Con = con\n\treturn nil\n}", "func (r *RPC) Send(ctx context.Context, serviceName, method, uri string, header map[string]string, body io.Reader, timeout time.Duration) (ret *Response, err error) {\n\tnode := &registry.ServiceNode{}\n\n\tvar buf []byte\n\tif body != nil {\n\t\tbuf, _ := ioutil.ReadAll(body)\n\t\tbody = ioutil.NopCloser(bytes.NewBuffer(buf))\n\t}\n\n\tret = &Response{}\n\n\tdefer func() {\n\t\tif r.logger == nil {\n\t\t\treturn\n\t\t}\n\t\tfields := r.logger.Fields(ctx, serviceName, method, uri, header, buf, timeout, node.Host, node.Port, ret.Response, err)\n\t\tif err == nil {\n\t\t\tr.logger.Info(\"http rpc\", fields...)\n\t\t\treturn\n\t\t}\n\t\tr.logger.Error(\"http rpc errpr\", fields...)\n\t}()\n\n\tnode, err = r.loadBalance(serviceName)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar req *http.Request\n\n\tclient := &http.Client{\n\t\tTransport: http.DefaultTransport,\n\t\tTimeout: timeout,\n\t}\n\n\t//构建req\n\treq, err = http.NewRequestWithContext(ctx, method, fmt.Sprintf(\"http://%s:%d%s\", node.Host, node.Port, uri), body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t//设置请求header\n\tfor k, v := range header {\n\t\treq.Header.Add(k, v)\n\t}\n\n\tif ctx.Err() != nil {\n\t\treturn nil, err\n\t}\n\n\t//注入Jaeger\n\tlogID := req.Header.Get(logging.LogHeader)\n\tjaeger_http.InjectHTTP(ctx, req, logID)\n\n\t//发送请求\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tret.HTTPCode = resp.StatusCode\n\tif resp.StatusCode != http.StatusOK {\n\t\terr = fmt.Errorf(\"http code is %d\", resp.StatusCode)\n\t\treturn\n\t}\n\n\tif b != nil {\n\t\tret.Response = string(b)\n\t}\n\n\treturn\n}", "func Test100_01SetupApi(t *testing.T) {\n\n\tt.Run(\"ConnectToBackend\", func(t *testing.T) {\n\t\tvar err error\n\t\tt.Logf(\"Listen address: %v\", accaGrpcAddr)\n\t\tConn, err = grpc.Dial(accaGrpcAddr, grpc.WithInsecure(), grpc.WithBlock())\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t})\n\n}", "func Connect(url string) (Client, error) {\n\tlog.Printf(\"Connecting to %v...\", url)\n\n\tctx, cancel := context.WithTimeout(context.Background(), config.Default().DialTimeout)\n\tdefer cancel()\n\n\tc, err := gethrpc.DialContext(ctx, url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcc := client{*c, url}\n\treturn &cc, nil\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\tc, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\t// sockname := masterSock()\n\t// c, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tlog.Println(err)\n\treturn false\n}", "func connectNodeRPC(host, user, pass, cert string,\n\tnotifications *rpcclient.NotificationHandlers) (*rpcclient.Client, error) {\n\n\tdcrdCerts, err := ioutil.ReadFile(cert)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"TLS certificate read error: %v\", err)\n\t}\n\n\tconfig := &rpcclient.ConnConfig{\n\t\tHost: host,\n\t\tEndpoint: \"ws\", // websocket\n\t\tUser: user,\n\t\tPass: pass,\n\t\tCertificates: dcrdCerts,\n\t}\n\n\tdcrdClient, err := rpcclient.New(config, notifications)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to start dcrd RPC client: %v\", err)\n\t}\n\n\treturn dcrdClient, nil\n}", "func main() {\n\tport := flag.String(\"port\", \"5000\", \"port num\")\n\tflag.Parse()\n\n\tconn, err := grpc.Dial(\"127.0.0.1:\"+*port, grpc.WithInsecure())\n\tif err != nil {\n\t\tlog.Fatal(\"did not connect: %v\", err)\n\t}\n\tdefer conn.Close()\n\tc := pb.NewCoinCalcClient(conn)\n\n\treq := &pb.CoinListRequest{Page: 2}\n\tr, err := c.GetCoins(context.Background(), req)\n\tfor idx, coin := range r.Coins {\n\t\tfmt.Println(\"idx: coin: \", idx, coin)\n\t}\n\n\tsymbols := []string{\"BTC\", \"ETH\"}\n\treq2 := &pb.PriceRequest{Symbols: symbols}\n\tr2, _ := c.GetCoinPrices(context.Background(), req2)\n\tfor idx, coin2 := range r2.Coins {\n\t\tfmt.Println(\"idx: coin: \", idx, coin2)\n\t}\n\n\treq3 := &pb.GetUserCoinRequest{User: \"2FE957DF-8017-4E10-8F1B-D69E1A2BD032\"}\n\tr3, _ := c.GetUserCoins(context.Background(), req3)\n\tfor idx, coin3 := range r3.Ucs {\n\t\tfmt.Println(\"idx: coin: \", idx, coin3.Coin)\n\t}\n\n\tsearchReq := &pb.SearchCoinRequest{Name: \"B\"}\n\tr4, _ := c.SearchCoin(context.Background(), searchReq)\n\tfmt.Print(r4)\n\n\tfor idx, coin4 := range r4.Coins {\n\t\tfmt.Print(\"idx: coin:\", idx, coin4)\n\t}\n}", "func (rpc *RpcClient) newHTTPClient() (*http.Client, error) {\n\t// Configure proxy if needed.\n\tvar dial func(network, addr string) (net.Conn, error)\n\n\t// Configure TLS if needed.\n\tvar tlsConfig *tls.Config\n\n\t// Create and return the new HTTP client potentially configured with a\n\t// proxy and TLS.\n\tclient := http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: dial,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t\tKeepAlive: 5 * time.Second,\n\t\t\t\tDualStack: true,\n\t\t\t}).DialContext,\n\t\t},\n\t}\n\treturn &client, nil\n}", "func OpenConnection(host string, port int) client.RpcMethod {\n\treq := &client.TemplateRpcMethod{}\n\t_ = json.Unmarshal([]byte(\"{\\\"MethodName\\\":\\\"OpenConnection\\\",\\\"Desc\\\":\\\"Opens a connection to another peer\\\",\\\"Method\\\":\\\"POST\\\",\\\"Url\\\":\\\"open_connection\\\",\\\"JsonTemplate\\\":\\\"{\\\\\\\"host\\\\\\\": \\\\\\\"\\\\\\\", \\\\\\\"port\\\\\\\": 0}\\\",\\\"ValInfo\\\":[{\\\"Name\\\":\\\"host\\\",\\\"Desc\\\":\\\"ip or dns name of the peer\\\",\\\"Type\\\":24,\\\"Default\\\":\\\"\\\",\\\"Path\\\":\\\"host\\\",\\\"Data\\\":null},{\\\"Name\\\":\\\"port\\\",\\\"Desc\\\":\\\"port of the peer\\\",\\\"Type\\\":2,\\\"Default\\\":0,\\\"Path\\\":\\\"port\\\",\\\"Data\\\":null}]}\"), req)\n\treq.ValInfo[0].Data = &host\n\treq.ValInfo[1].Data = &port\n\treturn req\n}", "func New(host string, user string, pass string, tls bool) (*Bus, error) {\n\tconnCfg := &rpcclient.ConnConfig{\n\t\tHost: host,\n\t\tUser: user,\n\t\tPass: pass,\n\t\tHTTPPostMode: true,\n\t\tDisableTLS: !tls,\n\t}\n\t// The notification parameter is nil since notifications are not\n\t// supported in HTTP POST mode.\n\tclient, err := rpcclient.New(connCfg, nil)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"host\": host,\n\t\t\t\"user\": user,\n\t\t\t\"TLS\": tls,\n\t\t}).Error(\"Failed to initialize RPC client\")\n\t\treturn nil, err\n\t}\n\n\tinfo, err := client.GetBlockChainInfo()\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"host\": host,\n\t\t\t\"user\": user,\n\t\t\t\"TLS\": tls,\n\t\t}).Error(\"Failed to connect to RPC server\")\n\t\treturn nil, err\n\t}\n\n\ttxIndex := isTxIndexEnabled(client)\n\n\tlog.WithFields(log.Fields{\n\t\t\"chain\": info.Chain,\n\t\t\"pruned\": info.Pruned,\n\t\t\"txindex\": txIndex,\n\t}).Info(\"RPC connection established\")\n\n\tif !txIndex {\n\t\tlog.Warn(\"May have unexpected errors without txindex\")\n\t}\n\n\treturn &Bus{\n\t\tClient: client,\n\t\tPruned: info.Pruned,\n\t\tChain: info.Chain,\n\t\tTxIndex: txIndex,\n\t\tCurrency: getCurrencyFromChain(info.Chain),\n\t\tCache: nil, // Disabled by default\n\t}, nil\n}", "func handleRpcConnection() {\n\n\tfortuneServerRPC := new(FortuneServerRPC)\n\trpc.Register(fortuneServerRPC)\n\n\ttcpAddress, err := net.ResolveTCPAddr(\"tcp\", fserverTcpG)\n\thandleError(err)\n\n\t// Listen for Tcp connections\n\tln, err := net.ListenTCP(\"tcp\", tcpAddress)\n\thandleError(err)\n\n\tfor {\n\n\t\tconn, err := ln.AcceptTCP()\n\t\thandleError(err)\n\t\tgo rpc.ServeConn(conn)\n\t}\n\n\tln.Close()\n}", "func New(url string) Client {\n\treturn &clientImpl{\n\t\tRPCClient: jsonrpc.NewRPCClient(url),\n\t\tLogger: slf4go.Get(\"eth-rpc-client\"),\n\t}\n}", "func Make(peers []string, me int, rpcs *rpc.Server) *Paxos {\n px := &Paxos{}\n px.peers = peers\n px.me = me\n\n // Your initialization code here.\n px.history = make(map[int]Proposal)\n px.decisions = make(map[int]int)\n for i := 0; i < len(px.peers); i++ {\n px.decisions[i] = -1\n }\n px.maxSeq = -1\n px.majorSize = len(px.peers)/2 + 1\n\n if rpcs != nil {\n // caller will create socket &c\n rpcs.Register(px)\n } else {\n rpcs = rpc.NewServer()\n rpcs.Register(px)\n\n // prepare to receive connections from clients.\n // change \"unix\" to \"tcp\" to use over a network.\n os.Remove(peers[me]) // only needed for \"unix\"\n l, e := net.Listen(\"unix\", peers[me]);\n if e != nil {\n log.Fatal(\"listen error: \", e);\n }\n px.l = l\n\n // please do not change any of the following code,\n // or do anything to subvert it.\n\n // create a thread to accept RPC connections\n go func() {\n for px.dead == false {\n conn, err := px.l.Accept()\n if err == nil && px.dead == false {\n if px.unreliable && (rand.Int63() % 1000) < 100 {\n // discard the request.\n conn.Close()\n } else if px.unreliable && (rand.Int63() % 1000) < 200 {\n // process the request but force discard of reply.\n c1 := conn.(*net.UnixConn)\n f, _ := c1.File()\n err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)\n if err != nil {\n fmt.Printf(\"shutdown: %v\\n\", err)\n }\n px.rpcCount++\n go rpcs.ServeConn(conn)\n } else {\n px.rpcCount++\n go rpcs.ServeConn(conn)\n }\n } else if err == nil {\n conn.Close()\n }\n if err != nil && px.dead == false {\n fmt.Printf(\"Paxos(%v) accept: %v\\n\", me, err.Error())\n }\n }\n }()\n }\n\n\n return px\n}", "func NewHTTPClient(conn net.Conn, opt *codec.Option) (*Client, error) {\n\t_, _ = io.WriteString(conn, fmt.Sprintf(\"CONNECT %s HTTP/1.0\\n\\n\", defaultHandlePath))\n\n\tres, err := http.ReadResponse(bufio.NewReader(conn), &http.Request{Method: \"CONNECT\"})\n\tif err == nil && res.Status == \"200 Connected to Gingle RPC\" {\n\t\treturn NewRPCClient(conn, opt)\n\t}\n\n\tif err == nil {\n\t\terr = fmt.Errorf(\"client: failed to new http client, err: unexpected http response\")\n\t}\n\treturn nil, err\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\t//远程调用Master.Example(args, reply)\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\tfmt.Println(err)\n\treturn false\n}", "func wsHttpConnect(proxy, url_ string) (io.ReadWriteCloser, error) {\n\tlog.Printf(\"[%s] proxy =\", __FILE__, proxy)\n\tproxy_tcp_conn, err := net.Dial(\"tcp\", proxy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"[%s] proxy_tcp_conn =\", __FILE__, proxy_tcp_conn)\n\tlog.Printf(\"[%s] url_ =\", __FILE__, url_)\n\n\tturl, err := url.Parse(url_)\n\tif err != nil {\n\t\tlog.Printf(\"[%s] Parse : \", __FILE__, err)\n\t\treturn nil, err\n\t}\n\n\tlog.Printf(\"[%s] proxy turl.Host =\", __FILE__, string(turl.Host))\n\n\treq := http.Request{\n\t\tMethod: \"CONNECT\",\n\t\tURL: &url.URL{},\n\t\tHost: turl.Host,\n\t}\n\n\tproxy_http_conn := httputil.NewProxyClientConn(proxy_tcp_conn, nil)\n\t//cc := http.NewClientConn(proxy_tcp_conn, nil)\n\n\tlog.Printf(\"[%s] proxy_http_conn =\", __FILE__, proxy_http_conn)\n\n\tresp, err := proxy_http_conn.Do(&req)\n\tif err != nil && err != httputil.ErrPersistEOF {\n\t\tlog.Printf(\"[%s] ErrPersistEOF : \", __FILE__, err)\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"[%s] proxy_http_conn<resp> =\", __FILE__, (resp))\n\n\trwc, _ := proxy_http_conn.Hijack()\n\n\treturn rwc, nil\n\n}", "func sendCommand(command CommandRequest) *CommandResponse {\n conn, err := net.Dial(\"tcp\", \"127.0.0.1:5000\")\n if err != nil {\n LOG[ERROR].Println(StatusText(StatusConnectionError), err, \"retrying...\")\n // Sleep to allow some time for new master startup\n time.Sleep(5 * time.Second)\n conn, err = net.Dial(\"tcp\", \"127.0.0.1:5000\")\n }\n if err != nil {\n LOG[ERROR].Println(StatusText(StatusConnectionError), err)\n return nil\n }\n defer conn.Close()\n\n encoder := gob.NewEncoder(conn)\n err = encoder.Encode(command)\n if err != nil {\n LOG[ERROR].Println(StatusText(StatusEncodeError), err)\n return nil\n }\n\n var response CommandResponse\n decoder := gob.NewDecoder(conn)\n err = decoder.Decode(&response)\n if err != nil {\n LOG[ERROR].Println(StatusText(StatusDecodeError), err)\n return nil\n }\n return &response\n}", "func getClient(url string, groupID uint) (*client.Client) {\n\t// RPC API\n\tc, err := client.Dial(url, groupID) // change to your RPC and groupID\n\tif err != nil {\n fmt.Println(\"can not dial to the RPC API, please check the config file gobcos_config.yaml: \", err)\n os.Exit(1)\n\t}\n\treturn c\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func call(rpcname string, args interface{}, reply interface{}) bool {\n\t// c, err := rpc.DialHTTP(\"tcp\", \"127.0.0.1\"+\":1234\")\n\tsockname := masterSock()\n\tc, err := rpc.DialHTTP(\"unix\", sockname)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}" ]
[ "0.63428277", "0.622701", "0.62245643", "0.59835815", "0.57889086", "0.5719119", "0.56793374", "0.5646093", "0.5581715", "0.5572185", "0.5528617", "0.55063194", "0.5499948", "0.5494585", "0.54556996", "0.5449738", "0.5431415", "0.54304606", "0.54102886", "0.5409134", "0.5391432", "0.5282433", "0.5271965", "0.52564347", "0.52452296", "0.5222412", "0.52102154", "0.52011806", "0.5198238", "0.51976866", "0.5190038", "0.518433", "0.5166703", "0.51583695", "0.51566374", "0.5149011", "0.5147749", "0.51420355", "0.5139781", "0.5135504", "0.5128876", "0.51202023", "0.51133686", "0.51065004", "0.51035815", "0.5098184", "0.5058975", "0.50551593", "0.5051846", "0.5036499", "0.5033031", "0.5030904", "0.50303274", "0.5022563", "0.4996274", "0.49923658", "0.49921665", "0.49888876", "0.498508", "0.49725065", "0.4971204", "0.4967266", "0.49640667", "0.4963628", "0.49599028", "0.49594566", "0.49564534", "0.494689", "0.49407104", "0.49281198", "0.49223605", "0.49220222", "0.4915831", "0.49153984", "0.49011138", "0.4892331", "0.48878136", "0.4887763", "0.487248", "0.48676533", "0.4865552", "0.48629138", "0.48579684", "0.48578596", "0.4838653", "0.48367602", "0.4832339", "0.48305488", "0.48287225", "0.48256323", "0.48216853", "0.48216853", "0.48216853", "0.48216853", "0.48216853", "0.48216853", "0.48216853", "0.48216853", "0.48216853", "0.48216853", "0.48216853" ]
0.0
-1
TODO GetNewAddress does map to `getnewaddress` rpc call now rpcclient doesn't have such golang wrapper func.
func (client *BtcClient) GetNewAddress(account string) (string, error) { if len(account) == 0 { account = DEFAULT_ACCOUNT } address, err := client.rpcClient.GetNewAddress(account) if err != nil { return "", err } return address.String(), nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (b *Bitcoind) GetNewAddress(account ...string) (addr string, err error) {\n\t// 0 or 1 account\n\tif len(account) > 1 {\n\t\terr = errors.New(\"Bad parameters for GetNewAddress: you can set 0 or 1 account\")\n\t\treturn\n\t}\n\n\tr, err := b.client.call(\"getnewaddress\", account)\n\tif err = handleError(err, &r); err != nil {\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(r.Result, &addr)\n\treturn\n}", "func (i *Instance) GetNewAddress(_ trinary.Trytes, _ api.GetNewAddressOptions) (trinary.Hashes, error) {\n\treturn i.addressResultValue, i.addressResultError\n}", "func (serv *ExchangeServer) GetNewAddress(cp string) string {\n\tserv.wltMtx.Lock()\n\tdefer serv.wltMtx.Unlock()\n\taddrEntry, err := serv.wallets.NewAddresses(cp, 1)\n\tif err != nil {\n\t\tpanic(\"server get new address failed\")\n\t}\n\treturn addrEntry[0].Address\n}", "func (lu *litUiClient) NewAddress() (string, error) {\n\n\t// cointype of 0 means default, not mainnet.\n\t// this is ugly but does prevent mainnet use for now.\n\n\tvar cointype, numadrs uint32\n\n\t// if no arguments given, generate 1 new address.\n\t// if no cointype given, assume type 1 (testnet)\n\n\tnumadrs = 1\n\n\treply := new(litrpc.AddressReply)\n\n\targs := new(litrpc.AddressArgs)\n\targs.CoinType = cointype\n\targs.NumToMake = numadrs\n\n\tfmt.Printf(\"adr cointye: %d num:%d\\n\", args.CoinType, args.NumToMake)\n\terr := lu.rpccon.Call(\"LitRPC.Address\", args, reply)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tresponse := reply.WitAddresses[0]\n\t//\tfmt.Fprintf(color.Output, \"new adr(s): %s\\nold: %s\\n\",\n\t//\t\tlnutil.Address(reply.WitAddresses), lnutil.Address(reply.LegacyAddresses))\n\treturn response, nil // reply.WitAddresses[]\n\n}", "func (p *Poloniex) GenerateNewAddress(ctx context.Context, curr string) (string, error) {\n\ttype Response struct {\n\t\tSuccess int\n\t\tError string\n\t\tResponse string\n\t}\n\tresp := Response{}\n\tvalues := url.Values{}\n\tvalues.Set(\"currency\", curr)\n\n\terr := p.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, poloniexGenerateNewAddress, values, &resp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.Error != \"\" {\n\t\treturn \"\", errors.New(resp.Error)\n\t}\n\n\treturn resp.Response, nil\n}", "func (c *Client) GetNewAddressT(account string) (string, error) {\n\treturn c.GetNewAddressAsync(account).ReceiveT()\n}", "func (h *HitBTC) GenerateNewAddress(ctx context.Context, currency string) (DepositCryptoAddresses, error) {\n\tresp := DepositCryptoAddresses{}\n\terr := h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost,\n\t\tapiV2CryptoAddress+\"/\"+currency,\n\t\turl.Values{},\n\t\totherRequests,\n\t\t&resp)\n\n\treturn resp, err\n}", "func (c *Client) GetNewAddressAndKey() (string, string, error) {\n\treturn c.GetNewAddressAndKeyAsync().Receive()\n}", "func (c *Constructor) newAddress(ctx context.Context) (string, error) {\n\tkp, err := keys.GenerateKeypair(c.curveType)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w unable to generate keypair\", err)\n\t}\n\n\taddress, _, err := c.helper.Derive(\n\t\tctx,\n\t\tc.network,\n\t\tkp.PublicKey,\n\t\tnil,\n\t)\n\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: unable to derive address\", err)\n\t}\n\n\terr = c.helper.StoreKey(ctx, address, kp)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: unable to store address\", err)\n\t}\n\n\tif err := c.handler.AddressCreated(ctx, address); err != nil {\n\t\treturn \"\", fmt.Errorf(\"%w: could not handle address creation\", err)\n\t}\n\n\treturn address, nil\n}", "func NewAddresser() Addresser { return &addresser{execute: rtnlExecute} }", "func (dcr *ExchangeWallet) NewAddress() (string, error) {\n\treturn dcr.DepositAddress()\n}", "func (f *FFS) NewAddr(ctx context.Context, name string, options ...NewAddressOption) (string, error) {\n\tr := &rpc.NewAddrRequest{Name: name}\n\tfor _, opt := range options {\n\t\topt(r)\n\t}\n\tresp, err := f.client.NewAddr(ctx, r)\n\treturn resp.Addr, err\n}", "func (walletAPI *WalletAPI) WalletNewAddress(protocol address.Protocol) (address.Address, error) {\n\treturn wallet.NewAddress(walletAPI.wallet.Wallet, protocol)\n}", "func newPeerAddr(ip net.IP, curTime time.Time) *peerAddr {\n\n\t// Create the new peer address.\n\tp := &peerAddr{\n\t\tip: ip,\n\t\tlastPing: ring.New(6),\n\t}\n\n\t// Record the current ping.\n\tp.lastPing.Value = curTime\n\n\treturn p\n}", "func NewAddress(street string) *Address {\n // Just return a dummy for STUB\n return &Address{}\n}", "func CreateAddress() *addresspb.Address {\n\ta := addresspb.Address{\n\t\tCorrespondanceAddr: &addresspb.Location{\n\t\t\tLocation: \"loc 1\",\n\t\t\tCity: &addresspb.City{\n\t\t\t\tName: \"Mumbai\",\n\t\t\t\tZipCode: \"400005\",\n\t\t\t\tRegion: addresspb.Division_WEST,\n\t\t\t},\n\t\t},\n\n\t\tAdditionalAddr: []*addresspb.Location{\n\t\t\t{\n\t\t\t\tLocation: \"loc 2\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Srinagar\",\n\t\t\t\t\tZipCode: \"190001\",\n\t\t\t\t\tRegion: addresspb.Division_NORTH,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tLocation: \"loc 3\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Imphal\",\n\t\t\t\t\tZipCode: \"795001\",\n\t\t\t\t\tRegion: addresspb.Division_EAST,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tLocation: \"loc 4\",\n\t\t\t\tCity: &addresspb.City{\n\t\t\t\t\tName: \"Mysore\",\n\t\t\t\t\tZipCode: \"570001\",\n\t\t\t\t\tRegion: addresspb.Division_SOUTH,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &a\n}", "func (eth *Eth) NewAddress(set bool) string {\n\tnewpair := crypto.GenerateNewKeyPair()\n\taddr := ethutil.Bytes2Hex(newpair.Address())\n\tring := eth.keyManager.KeyRing()\n\tring.AddKeyPair(newpair)\n\tif set {\n\t\teth.SetAddressN(ring.Len() - 1)\n\t}\n\treturn addr\n}", "func (b *Backend) NewAddress() wallet.Address {\n\taddr := Address{}\n\treturn &addr\n}", "func (wallet *Wallet) NewReceiveAddress() address.Address {\n\treturn wallet.addressManager.NewAddress()\n}", "func newNetAddress(addr net.Addr, services wire.ServiceFlag) (*wire.NetAddress, error) {\n\t// addr will be a net.TCPAddr when not using a proxy.\n\tif tcpAddr, ok := addr.(*net.TCPAddr); ok {\n\t\tip := tcpAddr.IP\n\t\tport := uint16(tcpAddr.Port)\n\t\tna := wire.NewNetAddressIPPort(ip, port, services)\n\t\treturn na, nil\n\t}\n\n\t// addr will be a socks.ProxiedAddr when using a proxy.\n\tif proxiedAddr, ok := addr.(*socks.ProxiedAddr); ok {\n\t\tip := net.ParseIP(proxiedAddr.Host)\n\t\tif ip == nil {\n\t\t\tip = net.ParseIP(\"0.0.0.0\")\n\t\t}\n\t\tport := uint16(proxiedAddr.Port)\n\t\tna := wire.NewNetAddressIPPort(ip, port, services)\n\t\treturn na, nil\n\t}\n\n\t// For the most part, addr should be one of the two above cases, but\n\t// to be safe, fall back to trying to parse the information from the\n\t// address string as a last resort.\n\thost, portStr, err := net.SplitHostPort(addr.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tip := net.ParseIP(host)\n\tport, err := strconv.ParseUint(portStr, 10, 16)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tna := wire.NewNetAddressIPPort(ip, uint16(port), services)\n\treturn na, nil\n}", "func (addressManager *AddressManager) NewAddress() address.Address {\n\treturn addressManager.Address(addressManager.lastAddressIndex + 1)\n}", "func (mc *MoacChain) createChainAddress(tradePassword string) (addr string, err error) {\n\n\tdefer func() {\n\t\tif re := recover(); re != nil {\n\t\t\terr = re.(error)\n\t\t}\n\t}()\n\n\terr = rpcClient.Call(&addr, \"personal_newAccount\", tradePassword)\n\n\treturn addr, err\n}", "func (a *Account) NewAddress() (btcutil.Address, error) {\n\t// Get current block's height and hash.\n\trpcc, err := accessClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbs, err := rpcc.BlockStamp()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get next address from wallet.\n\taddr, err := a.KeyStore.NextChainedAddress(&bs, cfg.KeypoolSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Immediately write updated wallet to disk.\n\tAcctMgr.ds.ScheduleWalletWrite(a)\n\tif err := AcctMgr.ds.FlushAccount(a); err != nil {\n\t\treturn nil, fmt.Errorf(\"account write failed: %v\", err)\n\t}\n\n\t// Mark this new address as belonging to this account.\n\tAcctMgr.MarkAddressForAccount(addr, a)\n\n\t// Request updates from btcd for new transactions sent to this address.\n\tif err := rpcc.NotifyReceived([]btcutil.Address{addr}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn addr, nil\n}", "func TestGenerateAddress(t *testing.T) {\n\tif _, err := GenerateAddress(); err != nil {\n\t\tt.Fatalf(\"Failed to generate new address: %v\", err)\n\t}\n}", "func (h *Harness) NewAddress() (btcaddr.Address, error) {\n\treturn h.wallet.NewAddress()\n}", "func (lu *litUiClient) Address() (string, error) {\n\n\t// cointype of 0 means default, not mainnet.\n\t// this is ugly but does prevent mainnet use for now.\n\n\tvar cointype, numadrs uint32\n\n\t// if no arguments given, generate 1 new address.\n\t// if no cointype given, assume type 1 (testnet)\n\n\tnumadrs = 0\n\n\treply := new(litrpc.AddressReply)\n\n\targs := new(litrpc.AddressArgs)\n\targs.CoinType = cointype\n\targs.NumToMake = numadrs\n\n\tfmt.Printf(\"adr cointye: %d num:%d\\n\", args.CoinType, args.NumToMake)\n\terr := lu.rpccon.Call(\"LitRPC.Address\", args, reply)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tresponse := reply.WitAddresses[len(reply.WitAddresses)-1]\n\treturn response, nil\n}", "func (t *SimpleChaincode) modifyAddress(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tvar dni, address string // Entities\n\tvar err error\n\n\tif len(args) != 2 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 3\")\n\t}\n\n\tdni = args[0]\n\taddress = args[1]\n\n\tvar personObject Person;\n\tpersonStored, err := stub.GetState(dni)\n\tif err != nil {\n\t\treturn shim.Error(\"Failed to get state\")\n\t}\n\terr = json.Unmarshal(personStored, &personObject)\n\t\n\tfmt.Println(\"Modifying person DNI \" + dni + \" with Name \" + personObject.Name)\n\tpersonObject.Address = address\n\n\tdata, err := json.Marshal(personObject)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\terr = stub.PutState(dni, data)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\treturn shim.Success(nil)\n}", "func (m *MockFullNode) WalletNewAddress(arg0 context.Context, arg1 byte) (address.Address, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"WalletNewAddress\", arg0, arg1)\n\tret0, _ := ret[0].(address.Address)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func newClient(address string) (c *client, err error) {\n\tif address, err = Host(address); err != nil {\n\t\treturn\n\t}\n\tc = new(client)\n\tif c.client, err = rpc.Dial(\"tcp\", address); err != nil {\n\t\treturn nil, err\n\t}\n\tc.address = address\n\treturn\n}", "func (a *Account) NewAddress() (btcutil.Address, error) {\n\t// Get current block's height and hash.\n\tbs, err := GetCurBlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get next address from wallet.\n\taddr, err := a.Wallet.NextChainedAddress(&bs, cfg.KeypoolSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Immediately write updated wallet to disk.\n\tAcctMgr.ds.ScheduleWalletWrite(a)\n\tif err := AcctMgr.ds.FlushAccount(a); err != nil {\n\t\treturn nil, fmt.Errorf(\"account write failed: %v\", err)\n\t}\n\n\t// Mark this new address as belonging to this account.\n\tAcctMgr.MarkAddressForAccount(addr, a)\n\n\t// Request updates from btcd for new transactions sent to this address.\n\ta.ReqNewTxsForAddress(addr)\n\n\treturn addr, nil\n}", "func NewAddr(address, network string) net.Addr {\n\treturn &addr{address: address, network: network}\n}", "func NewAddr(address, network string) net.Addr {\n\treturn &addr{address: address, network: network}\n}", "func (a *addrBook) GetAddress() *KnownAddress {\n\t// Protect concurrent access.\n\ta.mtx.Lock()\n\tdefer a.mtx.Unlock()\n\n\tif a.numAddresses() == 0 {\n\t\treturn nil\n\t}\n\n\t// Use a 50% chance for choosing between tried and new table entries.\n\tif a.nTried > 0 && (a.nNew == 0 || a.rand.Intn(2) == 0) {\n\t\t// Tried entry.\n\t\tlarge := 1 << 30\n\t\tfactor := 1.0\n\t\tfor {\n\t\t\t// pick a random bucket.\n\t\t\tbucket := a.rand.Intn(len(a.addrTried))\n\t\t\tif len(a.addrTried[bucket]) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Pick a random entry in it\n\t\t\tvar ka *KnownAddress\n\t\t\tnth := a.rand.Intn(len(a.addrTried[bucket]))\n\t\t\tfor _, value := range a.addrTried[bucket] {\n\t\t\t\tif nth == 0 {\n\t\t\t\t\tka = value\n\t\t\t\t}\n\t\t\t\tnth--\n\t\t\t}\n\n\t\t\trandval := a.rand.Intn(large)\n\t\t\tif float64(randval) < (factor * ka.chance() * float64(large)) {\n\t\t\t\ta.logger.Debug(\"Selected %v from tried bucket\", ka.na.String())\n\t\t\t\treturn ka\n\t\t\t}\n\t\t\tfactor *= 1.2\n\t\t}\n\t} else {\n\t\t// new node.\n\t\t// XXX use a closure/function to avoid repeating this.\n\t\tlarge := 1 << 30\n\t\tfactor := 1.0\n\t\tfor {\n\t\t\t// Pick a random bucket.\n\t\t\tbucket := a.rand.Intn(len(a.addrNew))\n\t\t\tif len(a.addrNew[bucket]) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Then, a random entry in it.\n\t\t\tvar ka *KnownAddress\n\t\t\tnth := a.rand.Intn(len(a.addrNew[bucket]))\n\t\t\tfor _, value := range a.addrNew[bucket] {\n\t\t\t\tif nth == 0 {\n\t\t\t\t\tka = value\n\t\t\t\t}\n\t\t\t\tnth--\n\t\t\t}\n\t\t\trandval := a.rand.Intn(large)\n\t\t\tif float64(randval) < (factor * ka.chance() * float64(large)) {\n\t\t\t\ta.logger.Debug(\"Selected %v from new bucket\", ka.na.String())\n\t\t\t\treturn ka\n\t\t\t}\n\t\t\tfactor *= 1.2\n\t\t}\n\t}\n}", "func (c *Client) GetNewAddressAndKeyAsync() FutureGetNewAddressAndKeyResult {\n\tcmd := tcoinjson.NewGetNewAddressAndKeyCmd()\n\treturn c.sendCmd(cmd)\n}", "func newAddress(pos int, address []string, value int) string {\n\tvar temp [4]int64\n\n\tfor i := 0; i < 4; i++ {\n\t\tnumber, _ := strconv.ParseInt(address[i], 2, 64)\n\t\ttemp[i] = number\n\t}\n\n\ttemp[pos] = int64(value)\n\n\treturn fmt.Sprint(temp[0], \".\", temp[1], \".\", temp[2], \".\", temp[3])\n}", "func WithMakeDefault(makeDefault bool) NewAddressOption {\n\treturn func(r *rpc.NewAddrRequest) {\n\t\tr.MakeDefault = makeDefault\n\t}\n}", "func newAddressMutation(c config, op Op, opts ...addressOption) *AddressMutation {\n\tm := &AddressMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypeAddress,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}", "func ResolveNewAddresses(ctx context.Context, dns *dnsclient.Client, data *ResolverData) map[string]*am.ScanGroupAddress {\n\tnewRecords := make(map[string]*am.ScanGroupAddress, 0)\n\n\tnumHosts := len(data.NewAddresses)\n\trps := data.RequestsPerSecond\n\tif numHosts < rps {\n\t\trps = numHosts\n\t}\n\tpool := workerpool.New(rps)\n\n\tout := make(chan *results, numHosts) // how many results we expect\n\n\ttask := func(ctx context.Context, host string, out chan<- *results) func() {\n\t\treturn func() {\n\t\t\tif ctx.Err() != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// check if our group has been paused/deleted prior to continuing.\n\t\t\tgroup, err := data.Cache.GetGroupByIDs(data.Address.OrgID, data.Address.GroupID)\n\t\t\tif err == nil {\n\t\t\t\tif group.Paused || group.Deleted {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Ctx(ctx).Warn().Err(err).Msg(\"failed to get group from cache during resolve, continuing\")\n\t\t\t}\n\t\t\tr, err := dns.ResolveName(ctx, host)\n\t\t\tif err != nil {\n\t\t\t\tlog.Ctx(ctx).Error().Err(err).Msg(\"error\")\n\t\t\t}\n\t\t\tout <- &results{Hostname: host, R: r, Err: err}\n\t\t}\n\t}\n\n\t// submit all hosts to our worker pool\n\tfor newHost := range data.NewAddresses {\n\t\th := newHost\n\t\tpool.Submit(task(ctx, h, out))\n\t}\n\n\tpool.StopWait()\n\tclose(out)\n\n\tlog.Ctx(ctx).Info().Msg(\"all tasks completed\")\n\n\tfor result := range out {\n\t\tif result.Err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, rr := range result.R {\n\t\t\tfor _, ip := range rr.IPs {\n\t\t\t\tnewAddress := NewAddressFromDNS(data.Address, ip, result.Hostname, data.DiscoveryMethod, uint(rr.RecordType))\n\t\t\t\tif newAddress == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tnewAddress.ConfidenceScore = CalculateConfidence(ctx, data.Address, newAddress)\n\t\t\t\tnewRecords[newAddress.AddressHash] = newAddress\n\t\t\t\t//log.Ctx(ctx).Info().Str(\"hostname\", result.Hostname).Str(\"ip_address\", ip).Str(\"newAddress.AddressHash\", newAddress.AddressHash).Str(\"hash\", convert.HashAddress(ip, result.Hostname)).Msg(\"found new record\")\n\t\t\t}\n\t\t}\n\t}\n\tlog.Ctx(ctx).Info().Int(\"record_count\", len(newRecords)).Msg(\"found new records\")\n\treturn newRecords\n}", "func RawChangeAddress(c *rpcd.Client, currency string) (diviutil.Address, error) {\n\tnetwork := RetrieveNetwork(currency)\n\n\tparams := []json.RawMessage{[]byte(`\"legacy\"`)}\n\trawResp, err := c.RawRequest(\"getrawchangeaddress\", params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar addrStr string\n\terr = json.Unmarshal(rawResp, &addrStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taddr, err := diviutil.DecodeAddress(addrStr, network)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !addr.IsForNet(network) {\n\t\treturn nil, fmt.Errorf(\"address %v is not intended for use on %v\", addrStr, network.Name)\n\t}\n\n\tif _, ok := addr.(*diviutil.AddressPubKeyHash); !ok {\n\t\treturn nil, fmt.Errorf(\"getrawchangeaddress: address %v is not P2PKH\", addr)\n\t}\n\n\treturn addr, nil\n}", "func (s IpNetwork_getRemoteHost_Params) NewAddress() (IpAddress, error) {\n\tss, err := NewIpAddress(s.Struct.Segment())\n\tif err != nil {\n\t\treturn IpAddress{}, err\n\t}\n\terr = s.Struct.SetPtr(0, ss.Struct.ToPtr())\n\treturn ss, err\n}", "func CreateQueryCustomerAddressListRequest() (request *QueryCustomerAddressListRequest) {\nrequest = &QueryCustomerAddressListRequest{\nRpcRequest: &requests.RpcRequest{},\n}\nrequest.InitWithApiInfo(\"BssOpenApi\", \"2017-12-14\", \"QueryCustomerAddressList\", \"\", \"\")\nreturn\n}", "func generateNewAccount() string {\n\taccount := crypto.GenerateAccount()\n\tpassphrase, err := mnemonic.FromPrivateKey(account.PrivateKey)\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating new account: %s\\n\", err)\n\t} else {\n\t\tfmt.Printf(\"Created new account: %s\\n\", account.Address)\n\t\tfmt.Printf(\"Generated mnemonic: \\\"%s\\\"\\n\", passphrase)\n\t}\n\treturn account.Address.String()\n}", "func (r *RPCKeyRing) NewAddress(addrType lnwallet.AddressType, change bool,\n\taccount string) (btcutil.Address, error) {\n\n\tctxt, cancel := context.WithTimeout(context.Background(), r.rpcTimeout)\n\tdefer cancel()\n\n\trpcAddrType := walletrpc.AddressType_WITNESS_PUBKEY_HASH\n\tif addrType == lnwallet.NestedWitnessPubKey {\n\t\trpcAddrType = walletrpc.AddressType_NESTED_WITNESS_PUBKEY_HASH\n\t}\n\n\tremoteAddr, err := r.walletClient.NextAddr(ctxt, &walletrpc.AddrRequest{\n\t\tAccount: account,\n\t\tType: rpcAddrType,\n\t\tChange: change,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error deriving address on remote \"+\n\t\t\t\"signer instance: %v\", err)\n\t}\n\n\tlocalAddr, err := r.WalletController.NewAddress(\n\t\taddrType, change, account,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error deriving address on local \"+\n\t\t\t\"wallet instance: %v\", err)\n\t}\n\n\t// We need to make sure we've derived the same address on the remote\n\t// signing machine, otherwise we don't know whether we're at the same\n\t// address index (and therefore the same wallet state in general).\n\tif localAddr.String() != remoteAddr.Addr {\n\t\treturn nil, fmt.Errorf(\"error deriving address on remote \"+\n\t\t\t\"signing instance, got different address (%s) than \"+\n\t\t\t\"on local wallet instance (%s)\", remoteAddr.Addr,\n\t\t\tlocalAddr.String())\n\t}\n\n\treturn localAddr, nil\n}", "func (a *Account) NewChangeAddress() (btcutil.Address, error) {\n\t// Get current block's height and hash.\n\trpcc, err := accessClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbs, err := rpcc.BlockStamp()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get next chained change address from wallet.\n\taddr, err := a.KeyStore.ChangeAddress(&bs, cfg.KeypoolSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Immediately write updated wallet to disk.\n\tAcctMgr.ds.ScheduleWalletWrite(a)\n\tif err := AcctMgr.ds.FlushAccount(a); err != nil {\n\t\treturn nil, fmt.Errorf(\"account write failed: %v\", err)\n\t}\n\n\t// Mark this new address as belonging to this account.\n\tAcctMgr.MarkAddressForAccount(addr, a)\n\n\t// Request updates from btcd for new transactions sent to this address.\n\tif err := rpcc.NotifyReceived([]btcutil.Address{addr}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn addr, nil\n}", "func NewAddress() platformservices.Address {\n\n\tvar lat = 37.7917146\n\tvar lng = -122.397054\n\n\treturn platformservices.Address{\n\t\tAddressType: platformservices.AddressTypeLegal,\n\t\tStreetAddress: \"100 Main Street\",\n\t\tCity: \"San Francisco\",\n\t\tState: \"CA\",\n\t\tCountry: \"US\",\n\t\tPostalCode: \"94100\",\n\t\tLatitude: &lat,\n\t\tLongitude: &lng,\n\t}\n}", "func CreateQueryCustomerAddressListResponse() (response *QueryCustomerAddressListResponse) {\nresponse = &QueryCustomerAddressListResponse{\nBaseResponse: &responses.BaseResponse{},\n}\nreturn\n}", "func (s *SkyСoinService) GenerateAddr(pubStr string) (maddr *AddressResponse, err error) {\n\tmaddr = &AddressResponse{}\n\n\tpubKey, err := cipher.PubKeyFromHex(pubStr)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taddress := cipher.AddressFromPubKey(pubKey)\n\n\tmaddr.Address = address.String()\n\treturn maddr, nil\n}", "func main() {\n\tvar template string\n\tvar network string\n\tvar generate bool\n\tvar showVer bool\n\tflag.StringVar(&template, \"t\", \"\",\"template\")\n\tflag.StringVar(&network, \"n\",defaultNetwork ,\"network [mainnet|testnet|0.9testnet|mixnet|privnet]\")\n\tflag.BoolVar(&generate, \"new\", false, \"generate new address\")\n\tflag.BoolVar(&showVer, \"version\", false, \"show version\")\n\tflag.Parse()\n\tif showVer {\n\t\tversion();\n\t\tos.Exit(0);\n\t}\n\tp, err := getParams(network);\n\texitIfErr(err)\n\tif template == \"\" {\n\t\ttemplate = genTemplateByParams(p,network)\n\t}\n\taddr, err := getAddr(template,p,generate)\n\texitIfErr(err)\n\tfmt.Printf(\" network = %s \\n\", network)\n\tfmt.Printf(\"template = %s \\n\", template)\n\tfmt.Printf(\" addr = %v \\n\", string(addr));\n}", "func (a *Client) PostReturnAddressesGet(params *PostReturnAddressesGetParams, authInfo runtime.ClientAuthInfoWriter) (*PostReturnAddressesGetOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostReturnAddressesGetParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostReturnAddressesGet\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/post/return-addresses\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PostReturnAddressesGetReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PostReturnAddressesGetOK), nil\n\n}", "func makeAddress(keyPair *keypair.KeyPair, testnet bool) *account.Account {\n\n\treturn &account.Account{\n\t\tAccountInterface: &account.ED25519Account{\n\t\t\tTest: testnet,\n\t\t\tPublicKey: keyPair.PublicKey[:],\n\t\t},\n\t}\n}", "func (ns *NetworkServer) newDevAddr(context.Context, *ttnpb.EndDevice) types.DevAddr {\n\tvar devAddr types.DevAddr\n\trandom.Read(devAddr[:])\n\tprefix := ns.devAddrPrefixes[random.Intn(len(ns.devAddrPrefixes))]\n\treturn devAddr.WithPrefix(prefix)\n}", "func CreateAddress(address models.RequestAddress) (bool, models.ResponseAddress, error) {\n\n\t//Create request\n\trequest := models.Request{}\n\trequest.AddBody(address)\n\trequest.SetUri(\"https://api.easypost.com/v2/addresses\")\n\trequest.SetMethod(\"POST\")\n\n\t//Send request\n\tresponseBody, err := SendRequest(request)\n\n\t//Initialize response address\n\tresponseAddress := models.ResponseAddress{}\n\n\terr = json.Unmarshal(responseBody, &responseAddress)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn false, responseAddress, fmt.Errorf(\"Unrecognized response from easypost %s\", err.Error())\n\t}\n\n\treturn responseAddress.Verifications.Delivery.Success, responseAddress, err\n}", "func NewAddress(path, addr string, net Network, change, addrIndex uint32) *Address {\n\treturn &Address{path: path, addr: addr, net: net, change: change, addrIndex: addrIndex}\n}", "func syncNewPeer(nodeMessage NodeMessage) NodeMessage {\n\tlog.Printf(\"Received new peer %v:%v\", nodeMessage.IpAddress, nodeMessage.Port)\n\tnewPeer := repository.Peer {\n\t\tIpAddress: nodeMessage.IpAddress,\n\t\tPort: nodeMessage.Port,\n\t}\n\tsavePeer(newPeer)\n\treturn nodeMessage\n}", "func (mock *ClientConnMock) NewAddressCalls() []struct {\n\tAddresses []resolver.Address\n} {\n\tvar calls []struct {\n\t\tAddresses []resolver.Address\n\t}\n\tmock.lockNewAddress.RLock()\n\tcalls = mock.calls.NewAddress\n\tmock.lockNewAddress.RUnlock()\n\treturn calls\n}", "func newClient(addr string) (discovery.DiscoveryClient, *grpc.ClientConn, error) {\n\tconn, err := grpc.Dial(addr, grpc.WithInsecure())\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn discovery.NewDiscoveryClient(conn), conn, nil\n}", "func (w *Wallet) NewAddress(account uint32,\n\tscope waddrmgr.KeyScope) (btcutil.Address, er.R) {\n\n\tchainClient, err := w.requireChainClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\taddr btcutil.Address\n\t\tprops *waddrmgr.AccountProperties\n\t)\n\terr = walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) er.R {\n\t\taddrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey)\n\t\tvar err er.R\n\t\taddr, props, err = w.newAddress(addrmgrNs, account, scope)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Notify the rpc server about the newly created address.\n\terr = chainClient.NotifyReceived([]btcutil.Address{addr})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tw.NtfnServer.notifyAccountProperties(props)\n\n\treturn addr, nil\n}", "func CreateAddress(amount int) (string, []Wallet) {\n\n\twallets := []Wallet{}\n\tfor i := 0; i < amount; i++ {\n\t\twif, _ := network.CreatePrivateKey()\n\t\taddress, _ := network.GetAddress(wif)\n\t\tvar wallet = Wallet{ADDRESS: address.EncodeAddress(), PRIVKEY: wif.String()}\n\t\twallets = append(wallets, wallet)\n\t}\n\n\tjson := ConvertToJSON(&wallets)\n\n\tlog.Println(\"Generated\", amount, \"addresses\")\n\n\treturn json, wallets\n\n}", "func (a *Account) NewChangeAddress() (btcutil.Address, error) {\n\t// Get current block's height and hash.\n\tbs, err := GetCurBlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get next chained change address from wallet.\n\taddr, err := a.Wallet.ChangeAddress(&bs, cfg.KeypoolSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Immediately write updated wallet to disk.\n\tAcctMgr.ds.ScheduleWalletWrite(a)\n\tif err := AcctMgr.ds.FlushAccount(a); err != nil {\n\t\treturn nil, fmt.Errorf(\"account write failed: %v\", err)\n\t}\n\n\t// Mark this new address as belonging to this account.\n\tAcctMgr.MarkAddressForAccount(addr, a)\n\n\t// Request updates from btcd for new transactions sent to this address.\n\ta.ReqNewTxsForAddress(addr)\n\n\treturn addr, nil\n}", "func (a *addrBook) NeedNewAddresses() bool {\n\ta.mtx.Lock()\n\tif a.nNew < a.nTried && a.nNew < needAddressThreshold/2 {\n\t\ta.mtx.Unlock()\n\t\treturn true\n\t}\n\ta.mtx.Unlock()\n\n\treturn false\n}", "func (a *addrBook) updateAddress(netAddr, srcAddr *node.Info) {\n\n\tif a.IsLocalAddress(netAddr) {\n\t\ta.logger.Debug(\"skipping adding a local address %v\", netAddr.String())\n\t\treturn\n\t}\n\t//Filter out non-routable addresses. Note that non-routable\n\t//also includes invalid and localNode addresses.\n\tif !IsRoutable(netAddr.IP) && IsRoutable(srcAddr.IP) {\n\t\ta.logger.Debug(\"skipping adding non routable address%v\", netAddr.String())\n\t\t// XXX: this makes tests work with unroutable addresses(loopback)\n\t\treturn\n\t}\n\n\tka := a.lookup(netAddr.PublicKey())\n\tif ka != nil {\n\t\t// TODO: only update addresses periodically.\n\t\t// Update the last seen time and services.\n\t\t// note that to prevent causing excess garbage on getaddr\n\t\t// messages the netaddresses in addrmaanger are *immutable*,\n\t\t// if we need to change them then we replace the pointer with a\n\t\t// new copy so that we don't have to copy every na for getaddr.\n\t\tka.lastSeen = time.Now()\n\n\t\t// If already in tried, we have nothing to do here.\n\t\tif ka.tried {\n\t\t\treturn\n\t\t}\n\n\t\t// Already at our max?\n\t\tif ka.refs == newBucketsPerAddress {\n\t\t\treturn\n\t\t}\n\n\t\t// The more entries we have, the less likely we are to add more.\n\t\t// likelihood is 2N.\n\t\t//factor := int32(2 * ka.refs)\n\t\t//if a.rand.Int31n(factor) != 0 {\n\t\treturn\n\t\t//}\n\t}\n\t// Make a copy of the net address to avoid races since it is\n\t// updated elsewhere in the addrmanager code and would otherwise\n\t// change the actual netaddress on the peer.\n\tka = &KnownAddress{na: netAddr, srcAddr: srcAddr, lastSeen: time.Now()}\n\ta.addrIndex[netAddr.ID] = ka\n\ta.nNew++\n\t// XXX time penalty?\n\n\tbucket := a.getNewBucket(netAddr.IP, srcAddr.IP)\n\n\t// Already exists?\n\tif _, ok := a.addrNew[bucket][netAddr.ID]; ok {\n\t\treturn\n\t}\n\n\t// Enforce max addresses.\n\tif len(a.addrNew[bucket]) > newBucketSize {\n\t\ta.logger.Debug(\"new bucket is full, expiring old\")\n\t\ta.expireNew(bucket)\n\t}\n\n\t// Add to new bucket.\n\tka.refs++\n\ta.addrNew[bucket][netAddr.ID] = ka\n\n\ta.logger.Debug(\"Added new address %s for a total of %d addresses\", netAddr.String(), a.nTried+a.nNew)\n}", "func (r FutureGetNewAddressAndKeyResult) Receive() (string, string, error) {\n\tres, err := receiveFuture(r)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\ttype AddrKey struct {\n\t\tAddress string\n\t\tSecret string\n\t}\n\t// Unmarshal result as a AddrKey.\n\tvar addrKey AddrKey\n\terr = json.Unmarshal(res, &addrKey)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn addrKey.Address, addrKey.Secret, nil\n}", "func createAddress(creator *Account) Word256 {\n\tnonce := creator.Nonce\n\tcreator.Nonce += 1\n\ttemp := make([]byte, 32+8)\n\tcopy(temp, creator.Address[:])\n\tPutUint64BE(temp[32:], nonce)\n\treturn LeftPadWord256(sha3.Sha3(temp)[:20])\n}", "func NewAddress(pk ctypes.PubKey) *Address {\n\ta := Address{pk}\n\treturn &a\n}", "func (x *fastReflection_AddressBytesToStringResponse) New() protoreflect.Message {\n\treturn new(fastReflection_AddressBytesToStringResponse)\n}", "func (trcn *TestRetrievalClientNode) GetKnownAddresses(ctx context.Context, p retrievalmarket.RetrievalPeer, tok shared.TipSetToken) ([]ma.Multiaddr, error) {\n\ttrcn.receivedKnownAddresses[p] = struct{}{}\n\taddrs, ok := trcn.knownAddreses[p]\n\tif !ok {\n\t\treturn nil, errors.New(\"Provider not found\")\n\t}\n\treturn addrs, nil\n}", "func ValidateAddress(property db.Property, apiKey string) (*LobAddress, error) {\n\n\taddressRequestURL := fmt.Sprintf(\"%sv1/us_verifications/\", lobEndpoint)\n\trequestBody, err := json.Marshal(map[string]string{\n\t\t\"primary_line\": property.AddressOne,\n\t\t\"city\": property.City,\n\t\t\"state\": property.State,\n\t\t\"zip_code\": property.ZipCode,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", addressRequestURL, bytes.NewBuffer(requestBody))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.SetBasicAuth(apiKey, \"\")\n\n\tclient := &http.Client{}\n resp, err := client.Do(req)\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar lobResponse LobResponse \n\terr = json.Unmarshal(body, &lobResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar addressOne, addressTwo, city, state, zip string\n\tif lobResponse.PrimaryLine != \"\" {\n\t\taddressOne = lobResponse.PrimaryLine\n\t} else {\n\t\taddressOne = property.AddressOne\n\t}\n\n\tif lobResponse.SecondaryLine != \"\" {\n\t\taddressTwo = lobResponse.SecondaryLine\n\t} else {\n\t\taddressTwo = property.AddressTwo\n\t}\n\n\tif lobResponse.Components != nil {\n\t\tif lobResponse.Components.City != \"\" {\n\t\t\tcity = lobResponse.Components.City\n\t\t} else {\n\t\t\tcity = property.City\n\t\t}\n\t\tif lobResponse.Components.State != \"\" {\n\t\t\tstate = lobResponse.Components.State\n\t\t} else {\n\t\t\tstate = property.State\n\t\t}\n\t\tif lobResponse.Components.ZipCode != \"\" && lobResponse.Components.ZipCodePlusFour != \"\" {\n\t\t\tzip = lobResponse.Components.ZipCode + \" \" + lobResponse.Components.ZipCodePlusFour\n\t\t} else {\n\t\t\tzip = property.ZipCode\n\t\t}\n\t}\n\n\treturn &LobAddress{\n\t\tAddressLineOne: addressOne,\n\t\tAddressLineTwo: addressTwo,\n\t\tAddressCity: city,\n\t\tAddressState: state,\n\t\tAddressZip: zip,\n\t}, nil\n\t\n}", "func newPeerClientConnection(block bool) (*grpc.ClientConn, error) {\n\treturn newPeerClientConnectionWithAddress(block, viper.GetString(\"service.cliaddress\"))\n}", "func NewFromString(addr string) (*Address, error) {\n\tlegaddr, err := legacy.Decode(addr)\n\tif err == nil {\n\t\taddr, err := NewFromLegacy(legaddr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn addr, nil\n\t}\n\n\tcashaddr, err := cashaddress.Decode(addr, cashaddress.MainNet)\n\tif err == nil {\n\t\taddr, err := NewFromCashAddress(cashaddr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn addr, nil\n\t}\n\n\treturn nil, errors.New(\"unable to decode address\")\n}", "func GeocodeAnAddress() {\n\tgeocodingService := geocoding.NewGeocodingService(\"YOUR_MICRO_TOKEN_HERE\")\n\trsp, _ := geocodingService.Lookup(geocoding.LookupRequest{\n\t\tAddress: \"10 russell st\",\n\t\tCity: \"london\",\n\t\tCountry: \"uk\",\n\t\tPostcode: \"wc2b\",\n\t})\n\tfmt.Println(rsp)\n}", "func (s) TestPickFirst_NewAddressWhileBlocking(t *testing.T) {\n\tcc, r, backends := setupPickFirst(t, 2)\n\taddrs := stubBackendsToResolverAddrs(backends)\n\tr.UpdateState(resolver.State{Addresses: addrs})\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Send a resolver update with no addresses. This should push the channel into\n\t// TransientFailure.\n\tr.UpdateState(resolver.State{})\n\tfor state := cc.GetState(); state != connectivity.TransientFailure; state = cc.GetState() {\n\t\tif !cc.WaitForStateChange(ctx, state) {\n\t\t\tt.Fatalf(\"timeout waiting for state change. got %v; want %v\", state, connectivity.TransientFailure)\n\t\t}\n\t}\n\n\tdoneCh := make(chan struct{})\n\tclient := testpb.NewTestServiceClient(cc)\n\tgo func() {\n\t\t// The channel is currently in TransientFailure and this RPC will block\n\t\t// until the channel becomes Ready, which will only happen when we push a\n\t\t// resolver update with a valid backend address.\n\t\tif _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {\n\t\t\tt.Errorf(\"EmptyCall() = %v, want <nil>\", err)\n\t\t}\n\t\tclose(doneCh)\n\t}()\n\n\t// Make sure that there is one pending RPC on the ClientConn before attempting\n\t// to push new addresses through the name resolver. If we don't do this, the\n\t// resolver update can happen before the above goroutine gets to make the RPC.\n\tfor {\n\t\tif err := ctx.Err(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\ttcs, _ := channelz.GetTopChannels(0, 0)\n\t\tif len(tcs) != 1 {\n\t\t\tt.Fatalf(\"there should only be one top channel, not %d\", len(tcs))\n\t\t}\n\t\tstarted := tcs[0].ChannelData.CallsStarted\n\t\tcompleted := tcs[0].ChannelData.CallsSucceeded + tcs[0].ChannelData.CallsFailed\n\t\tif (started - completed) == 1 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(defaultTestShortTimeout)\n\t}\n\n\t// Send a resolver update with a valid backend to push the channel to Ready\n\t// and unblock the above RPC.\n\tr.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backends[0].Address}}})\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Timeout when waiting for blocked RPC to complete\")\n\tcase <-doneCh:\n\t}\n}", "func createRPCClient(serverIPPort string) *rpc.Client {\n\t// parse given string address\n\traddr, err := net.ResolveTCPAddr(\"tcp\", serverIPPort)\n\tif err != nil {\n\t\tlogger.Println(err)\n return nil\n\t}\n\t// dial rpc address\n\tconn, err := net.DialTCP(\"tcp\", nil, raddr)\n\tif err != nil {\n\t\tlogger.Println(err)\n return nil\n\t}\n\t// instantiate rpc client\n\tclient := rpc.NewClient(conn)\n\n\treturn client\n}", "func NewAddress(i, ii int) *Address {\n\tn := strconv.Itoa(i)\n\treturn &Address{\n\t\tId: time.Now().UnixNano(),\n\t\tStreet: \"10\" + n + \" Somewhere Lane\",\n\t\tCity: \"Awesome City \" + n,\n\t\tState: func() string {\n\t\t\tif i%2 == 0 {\n\t\t\t\treturn \"PA\"\n\t\t\t}\n\t\t\treturn \"CA\"\n\t\t}(),\n\t\tZip: ii,\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tgo setupAddressObserver(mgr, C)\n\treturn &ReconcileActiveMQArtemisAddress{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func genAddress(ip string, port int) string {\n\treturn fmt.Sprintf(\"%s:%d\", ip, port)\n}", "func (a *StartupConfigurationApiService) GenerateNewNodeId(ctx _context.Context) ApiGenerateNewNodeIdRequest {\n\treturn ApiGenerateNewNodeIdRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func updateAddr(newaddr string) error {\n\trec, err := getRec()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error getting DNS record: %v\", err)\n\t}\n\tif rec.Content == newaddr {\n\t\tlog.Printf(\"DNS record matches current IP\\n\")\n\t\treturn nil\n\t}\n\targs := url.Values{}\n\targs.Set(\"a\", \"rec_edit\")\n\targs.Set(\"tkn\", TKN)\n\targs.Set(\"email\", EMAIL)\n\targs.Set(\"z\", ZONE)\n\targs.Set(\"type\", \"A\")\n\targs.Set(\"id\", rec.Rec_id)\n\targs.Set(\"name\", rec.Name)\n\targs.Set(\"content\", newaddr)\n\targs.Set(\"ttl\", \"1\") // 1=Automatic, otherwise set between 120 and 4,294,967,295 seconds\n\targs.Set(\"service_mode\", \"1\") // 1 = orange cloud, 0 = grey cloud\n\tresp, err := http.PostForm(APIURL, args)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error posting request: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\tdec := json.NewDecoder(resp.Body)\n\t// not exactly right, but ApiRecLoadAll will get us the result and msg\n\tvar m ApiRecLoadAll\n\terr = dec.Decode(&m)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error decoding response: %v\", err)\n\t}\n\tif m.Result != \"success\" {\n\t\treturn fmt.Errorf(\"API call returned error: %v\", m.Msg)\n\t}\n\tlog.Printf(\"Successfully updated DNS record.\\n\")\n\treturn nil\n}", "func (o *PartnerCustomerCreateRequest) GetAddressOk() (*PartnerEndCustomerAddress, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Address, true\n}", "func (client *PublicIPAddressesClient) getCreateRequest(ctx context.Context, resourceGroupName string, publicIPAddressName string, options *PublicIPAddressesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif publicIPAddressName == \"\" {\n\t\treturn nil, errors.New(\"parameter publicIPAddressName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{publicIpAddressName}\", url.PathEscape(publicIPAddressName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func newManagedAddressWithoutPrivKey(m *Manager, account uint32, pubKey chainec.PublicKey, compressed bool) (*managedAddress, error) {\n\t// Create a pay-to-pubkey-hash address from the public key.\n\tvar pubKeyHash []byte\n\tif compressed {\n\t\tpubKeyHash = abcutil.Hash160(pubKey.SerializeCompressed())\n\t} else {\n\t\tpubKeyHash = abcutil.Hash160(pubKey.SerializeUncompressed())\n\t}\n\taddress, err := abcutil.NewAddressPubKeyHash(pubKeyHash, m.chainParams,\n\t\tchainec.ECTypeSecp256k1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &managedAddress{\n\t\tmanager: m,\n\t\taddress: address,\n\t\taccount: account,\n\t\timported: false,\n\t\tinternal: false,\n\t\tmultisig: false,\n\t\tcompressed: compressed,\n\t\tpubKey: pubKey,\n\t}, nil\n}", "func (a *addrBook) updateAddress(addr, src *addrInfo) {\n\tif !IsRoutable(addr.IP) && IsRoutable(src.IP) {\n\t\ta.logger.Debug(\"skipped non routable address received from routable ip\",\n\t\t\tlog.String(\"received\", addr.IP.String()),\n\t\t\tlog.String(\"from\", src.IP.String()),\n\t\t)\n\t\treturn\n\t}\n\tka := a.lookup(addr.ID)\n\tif ka != nil {\n\t\t// TODO: only update addresses periodically.\n\t\t// Update the last seen time and services.\n\t\t// note that to prevent causing excess garbage on getaddr\n\t\t// messages the netaddresses in addrmaanger are *immutable*,\n\t\t// if we need to change them then we replace the pointer with a\n\t\t// new copy so that we don't have to copy every na for getaddr.\n\t\tka.LastSeen = time.Now()\n\n\t\t// If already in tried, we have nothing to do here.\n\t\tif ka.tried {\n\t\t\treturn\n\t\t}\n\n\t\t// Already at our max?\n\t\tif ka.refs == newBucketsPerAddress {\n\t\t\treturn\n\t\t}\n\n\t\t// The more entries we have, the less likely we are to add more.\n\t\t// likelihood is 2N.\n\t\t// factor := int32(2 * ka.refs)\n\t\t// if a.rand.Int31n(factor) != 0 {\n\t\treturn\n\t\t//}\n\t}\n\t// Make a copy of the net address to avoid races since it is\n\t// updated elsewhere in the addrmanager code and would otherwise\n\t// change the actual netaddress on the peer.\n\tka = &knownAddress{Addr: addr, SrcAddr: src, LastSeen: time.Now()}\n\ta.addrIndex[addr.ID] = ka\n\ta.nNew++\n\t// XXX time penalty?\n\n\tbucket := a.getNewBucket(addr.IP, src.IP)\n\n\t// Already exists?\n\tif _, ok := a.addrNew[bucket][addr.ID]; ok {\n\t\treturn\n\t}\n\n\t// Enforce max addresses.\n\tif len(a.addrNew[bucket]) > newBucketSize {\n\t\ta.logger.Debug(\"new bucket is full, expiring old\")\n\t\ta.expireNew(bucket)\n\t}\n\n\t// Add to new bucket.\n\tka.refs++\n\ta.addrNew[bucket][addr.ID] = ka\n\n\ta.logger.Debug(\"added new address %s for a total of %d addresses\", addr.RawAddr, a.nTried+a.nNew)\n}", "func (handler *Handler) CreateAddress(ctx context.Context, in *candyland_grpc.CreateAddressRequest) (*candyland_grpc.CreateAddressReply, error) {\n\terr := handler.CreateAddressUsecase.Create(in.UserId, in.CountryId, in.StateId, in.CityId, in.StreetId, in.Number, in.Complement.String())\n\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn nil, err\n\t}\n\n\tres := &candyland_grpc.CreateAddressReply{\n\t\tWasCreated: true,\n\t}\n\n\treturn res, nil\n}", "func (c *Client) newRequest(method, path string) (*request, error) {\n\tbase, _ := url.Parse(c.config.Address)\n\tu, err := url.Parse(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := &request{\n\t\tconfig: &c.config,\n\t\tmethod: method,\n\t\turl: &url.URL{\n\t\t\tScheme: base.Scheme,\n\t\t\tUser: base.User,\n\t\t\tHost: base.Host,\n\t\t\tPath: u.Path,\n\t\t\tRawPath: u.RawPath,\n\t\t},\n\t\theader: make(http.Header),\n\t\tparams: make(map[string][]string),\n\t}\n\tif c.config.Region != \"\" {\n\t\tr.params.Set(\"region\", c.config.Region)\n\t}\n\tif c.config.Namespace != \"\" {\n\t\tr.params.Set(\"namespace\", c.config.Namespace)\n\t}\n\tif c.config.WaitTime != 0 {\n\t\tr.params.Set(\"wait\", durToMsec(r.config.WaitTime))\n\t}\n\tif c.config.SecretID != \"\" {\n\t\tr.token = r.config.SecretID\n\t}\n\n\t// Add in the query parameters, if any\n\tfor key, values := range u.Query() {\n\t\tfor _, value := range values {\n\t\t\tr.params.Add(key, value)\n\t\t}\n\t}\n\n\tfor key, values := range c.config.Headers {\n\t\tr.header[key] = values\n\t}\n\n\treturn r, nil\n}", "func (x *fastReflection_MsgSetWithdrawAddressResponse) New() protoreflect.Message {\n\treturn new(fastReflection_MsgSetWithdrawAddressResponse)\n}", "func (as *AddressService) Create(name string) (*Address, error) {\n\tif isEmptyStr(as.assetCode) {\n\t\treturn nil, errAssetCode\n\t}\n\n\tvar (\n\t\taddresses []*Address\n\t\tbody struct {\n\t\t\tAddress struct {\n\t\t\t\tName string `json:\"name\"`\n\t\t\t} `json:\"address\"`\n\t\t}\n\t)\n\tbody.Address.Name = name\n\n\tif err := as.client.Post(buildString(\"address/deposit/\", as.assetCode), &body, &addresses); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn addresses[0], nil\n}", "func (n *PeernotifyNode) getNextAddress() []byte {\n\treturn []byte{}\n}", "func NewAddress(address common.Address, backend bind.ContractBackend) (*Address, error) {\n\tcontract, err := bindAddress(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Address{AddressCaller: AddressCaller{contract: contract}, AddressTransactor: AddressTransactor{contract: contract}, AddressFilterer: AddressFilterer{contract: contract}}, nil\n}", "func NewAddress(address common.Address, backend bind.ContractBackend) (*Address, error) {\n\tcontract, err := bindAddress(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Address{AddressCaller: AddressCaller{contract: contract}, AddressTransactor: AddressTransactor{contract: contract}, AddressFilterer: AddressFilterer{contract: contract}}, nil\n}", "func NewAddress(address common.Address, backend bind.ContractBackend) (*Address, error) {\n\tcontract, err := bindAddress(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Address{AddressCaller: AddressCaller{contract: contract}, AddressTransactor: AddressTransactor{contract: contract}, AddressFilterer: AddressFilterer{contract: contract}}, nil\n}", "func NewAddress(address common.Address, backend bind.ContractBackend) (*Address, error) {\n\tcontract, err := bindAddress(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Address{AddressCaller: AddressCaller{contract: contract}, AddressTransactor: AddressTransactor{contract: contract}, AddressFilterer: AddressFilterer{contract: contract}}, nil\n}", "func NewAddress(address common.Address, backend bind.ContractBackend) (*Address, error) {\n\tcontract, err := bindAddress(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Address{AddressCaller: AddressCaller{contract: contract}, AddressTransactor: AddressTransactor{contract: contract}, AddressFilterer: AddressFilterer{contract: contract}}, nil\n}", "func (s *Server) handleGetAddrCmd(p Peer) error {\n\taddrs := s.discovery.GoodPeers()\n\tif len(addrs) > payload.MaxAddrsCount {\n\t\taddrs = addrs[:payload.MaxAddrsCount]\n\t}\n\talist := payload.NewAddressList(len(addrs))\n\tts := time.Now()\n\tfor i, addr := range addrs {\n\t\t// we know it's a good address, so it can't fail\n\t\tnetaddr, _ := net.ResolveTCPAddr(\"tcp\", addr.Address)\n\t\talist.Addrs[i] = payload.NewAddressAndTime(netaddr, ts, addr.Capabilities)\n\t}\n\treturn p.EnqueueP2PMessage(NewMessage(CMDAddr, alist))\n}", "func (_Crowdsale *CrowdsaleCaller) RoyaltyCrowdsaleAddress(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Crowdsale.contract.Call(opts, out, \"royaltyCrowdsaleAddress\")\n\treturn *ret0, err\n}", "func getAddress(mn string) string {\n\tsk, err := mnemonic.ToPrivateKey(mn)\n\tif err != nil {\n\t\tfmt.Printf(\"error recovering account: %s\\n\", err)\n\t\treturn \"\"\n\t}\n\tpk := sk.Public()\n\tvar a types.Address\n\tcpk := pk.(ed25519.PublicKey)\n\tcopy(a[:], cpk[:])\n\tfmt.Printf(\"Address: %s\\n\", a.String())\n\taddress := a.String()\n\treturn address\n}", "func createListeningAddress(network, address string) string {\n\treturn strings.Join([]string{network, address}, \"/\")\n}", "func newCreateClientTunnelReply(cidr *net.IPNet, tid int64) (f linkFrame) {\n f.method = CLIENT_TUN_NEW\n f.param = map[string]interface{} { K_CIDR : cidr.String() }\n f.response = tid\n return\n}", "func rstPopulateContract(w http.ResponseWriter, r *http.Request) {\n\n\terr := r.ParseMultipartForm(32 << 20)\n\tif err != nil {\n\t\tfmt.Printf(\"No change data: Parsing multipart form: %v\\n\", err.Error())\n\t\treturn\n\t}\n\n\t//isCurl = r.MultipartForm.Value[\"Curl\"]\n\tif len(r.MultipartForm.Value[\"ContrAddr\"]) == 0 {\n\t\thttp.Error(w, GeneralError{\"No contrAddr is provided\"}.Error(),\n\t\t\thttp.StatusInternalServerError)\n\t\treturn\n\t}\n\tcontrAddrStr := r.MultipartForm.Value[\"ContrAddr\"][0]\n\tif common.IsHexAddress(contrAddrStr) == false {\n\t\thttp.Error(w, GeneralError{\"Contract address is incorrect\"}.Error(),\n\t\t\thttp.StatusInternalServerError)\n\t\treturn\n\t}\n\tcontrAddr := common.HexToAddress(contrAddrStr)\n\n\t/*\n\t parentAddr := common.Address{}\n\t if len(r.MultipartForm.Value[\"ParentAddr\"])!=0 {\n\t parentAddrStr := r.MultipartForm.Value[\"ParentAddr\"][0]\n\t if (common.IsHexAddress(parentAddrStr) == false) {\n\t http.Error(w, GeneralError{\"Parent contract address is incorrect\"}.Error(),\n\t http.StatusInternalServerError)\n\t return\n\t }\n\t parentAddr = common.HexToAddress(parentAddrStr)\n\t }\n\t*/\n\n\tnewUserAddr := common.Address{}\n\tif len(r.MultipartForm.Value[\"NewUserAddr\"]) != 0 {\n\t\tuserAddrStr := r.MultipartForm.Value[\"NewUserAddr\"][0]\n\t\tif common.IsHexAddress(userAddrStr) == false {\n\t\t\thttp.Error(w, GeneralError{\"New User address is incorrect\"}.Error(),\n\t\t\t\thttp.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tnewUserAddr = common.HexToAddress(userAddrStr)\n\t} /*else {\n\t\thttp.Error(w, \"New User address is not available in params\", http.StatusInternalServerError)\n\t\treturn\n\t}*/\n\n\tcurUserAddr := common.Address{}\n\tif len(r.MultipartForm.Value[\"CurrentUserAddr\"]) != 0 {\n\t\tuserAddrStr := r.MultipartForm.Value[\"CurrentUserAddr\"][0]\n\t\tif common.IsHexAddress(userAddrStr) == false {\n\t\t\thttp.Error(w, GeneralError{\"Current User address is incorrect\"}.Error(),\n\t\t\t\thttp.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tcurUserAddr = common.HexToAddress(userAddrStr)\n\t} /*else {\n\t\thttp.Error(w, \"Current User address is not available in params\", http.StatusInternalServerError)\n\t\treturn\n\t}*/\n\n\thashCert, _, dataCert, cerr := UploadFile(w, r, \"UplFiles\", true)\n\tif cerr.errCode != 0 {\n\t\tfmt.Printf(fmt.Sprintf(\"Populate Uploadfile: %v\\n\", cerr.Error()))\n\t\thttp.Error(w, cerr.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t/*\n\t dataCert, err := GenerateCert(contrAddr, parentAddr, true, \"Mother Nature CA\")\n\t if err != nil {\n\t http.Error(w, err.Error(), http.StatusInternalServerError)\n\t return\n\t }\n\n\t hashCert, err := CalcHash(dataCert)\n\t if err != nil {\n\t http.Error(w, err.Error(), http.StatusInternalServerError)\n\t return\n\t }\n\t*/\n\n\tclient, err := ethclient.Dial(gConfig.IPCpath)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Instantiate the contract, the address is taken from eth at the moment of contract initiation\n\t// kyc, err := NewLuxUni_KYC(common.HexToAddress(gContractHash), backends.NewRPCBackend(conn))\n\tpkiContract, err := NewLuxUni_PKI(contrAddr, client)\n\tif err != nil {\n\t\thttp.Error(w, GeneralError{\n\t\t\tfmt.Sprintf(\"Failed to instantiate a smart contract: %v\", err)}.Error(),\n\t\t\thttp.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Logging into Ethereum as a user\n\tif (curUserAddr == common.Address{}) {\n\t\tfmt.Printf(\"Attention! Populate contract: user address is zero, default config account is used\\n\")\n\t\tcurUserAddr = common.HexToAddress(gConfig.AccountAddr)\n\t}\n\tkeyFile, err := FindKeyFile(curUserAddr)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Failed to find key file for account %v. %v \",\n\t\t\tcurUserAddr.String(), err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tkey, e := ioutil.ReadFile(gConfig.KeyDir + keyFile)\n\tif e != nil {\n\t\tfmt.Printf(\"Key File error: %v\\n\", e)\n\t\tos.Exit(1)\n\t}\n\tfmt.Printf(\"Found Ethereum Key File \\n\")\n\n\tauth, err := bind.NewTransactor(strings.NewReader(string(key)), gConfig.Pswd)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create authorized transactor: %v\", err)\n\t}\n\n\tsess := &LuxUni_PKISession{\n\t\tContract: pkiContract,\n\t\tCallOpts: bind.CallOpts{\n\t\t\tPending: true,\n\t\t},\n\t\tTransactOpts: bind.TransactOpts{},\n\t}\n\tsess.TransactOpts = *auth\n\tsess.TransactOpts.GasLimit = big.NewInt(50000000)\n\n\t_, err = sess.PopulateCertificate(dataCert)\n\tif err != nil {\n\t\tfmt.Printf(fmt.Sprintf(\"Failed to populate blockchain: %v.\\n\", err))\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif (newUserAddr != common.Address{}) {\n\t\t_, err := sess.SetOwner(newUserAddr)\n\t\tif err != nil {\n\t\t\tfmt.Printf(fmt.Sprintf(\"Failed to update owner addr: %v.\\n\", err))\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tnewOwner, err := sess.GetOwner()\n\t\tif err != nil {\n\t\t\tfmt.Printf(fmt.Sprintf(\"Failed to check new owner addr: %v.\\n\", err))\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tif newOwner != newUserAddr {\n\t\t\thttp.Error(w, fmt.Sprintf(\"OwnerAddr (%v) does not equal to newUserAddr (%v) despite SetOwner - probably lack of permissions\",\n\t\t\t\tnewOwner.String(), newUserAddr.String()), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t} /*else {\n\t\thttp.Error(w, \"New User addr is null\", http.StatusInternalServerError)\n\t\treturn\n\t}*/\n\n\t//fmt.Printf(\"Debug Hash Populate: %s, arr:%v \\n\", hex.EncodeToString(hashCert), []byte(hex.EncodeToString(hashCert)))\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(hex.EncodeToString(hashCert)))\n}", "func (x *fastReflection_AddressBytesToStringRequest) New() protoreflect.Message {\n\treturn new(fastReflection_AddressBytesToStringRequest)\n}", "func (w *Wallet) NewAddress(s *aklib.DBConfig, pwd []byte, isPublic bool) (*address.Address, error) {\n\tadrmap := w.AddressPublic\n\tvar idx uint32\n\tif !isPublic {\n\t\tadrmap = w.AddressChange\n\t\tidx = 1\n\t}\n\tmaster, err := address.DecryptSeed(w.EncSeed, pwd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tseed := address.HDseed(master, idx, uint32(len(adrmap)))\n\ta, err := address.New(s.Config, seed)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tadr := &Address{\n\t\tAddress: a,\n\t\tAdrstr: a.Address58(s.Config),\n\t\tNo: len(adrmap),\n\t}\n\tif err := w.PutAddress(s, pwd, adr, true); err != nil {\n\t\treturn nil, err\n\t}\n\tadrmap[a.Address58(s.Config)] = struct{}{}\n\treturn a, w.put(s)\n}", "func NewRandomAddress(rng *rand.Rand) common.Address {\n\treturn common.Address(wtest.NewRandomAddress(rng))\n}" ]
[ "0.7285334", "0.7254116", "0.7206184", "0.7199894", "0.71601254", "0.7149224", "0.66494334", "0.6274284", "0.6202237", "0.61646694", "0.608202", "0.5939763", "0.58696765", "0.58615035", "0.5792989", "0.57605034", "0.57210624", "0.5708498", "0.5630859", "0.56129843", "0.5611596", "0.55982834", "0.55370736", "0.5478587", "0.5469931", "0.5468681", "0.54640573", "0.5407431", "0.5398442", "0.5398356", "0.535769", "0.535769", "0.5343541", "0.5343425", "0.53365636", "0.5327208", "0.532574", "0.5323628", "0.53235143", "0.53174925", "0.52913904", "0.5274156", "0.5272926", "0.52478945", "0.5229344", "0.52268475", "0.5213539", "0.52111834", "0.5188356", "0.5184529", "0.51829004", "0.5177956", "0.5177697", "0.5167108", "0.51534355", "0.5148527", "0.5141576", "0.5141161", "0.5120872", "0.51040703", "0.50927824", "0.5091678", "0.5088829", "0.5086931", "0.5078229", "0.50773436", "0.5076747", "0.50729203", "0.50695753", "0.5064026", "0.5063408", "0.50626427", "0.50517046", "0.5051578", "0.5050484", "0.5046947", "0.5030114", "0.5023051", "0.5017013", "0.50061476", "0.4995924", "0.49949446", "0.49847135", "0.49833596", "0.4978958", "0.49699348", "0.49678037", "0.49678037", "0.49678037", "0.49678037", "0.49678037", "0.49613404", "0.4954236", "0.49471286", "0.49470606", "0.4946713", "0.49451956", "0.49444267", "0.4933029", "0.4929508" ]
0.77015626
0
TODO check validity of account and have sufficent balance
func (client *BtcClient) SendFrom(account, address string, amount float64) (string, error) { decoded, err := decodeAddress(address, btcEnv) if err != nil { return "", err } btcAmount, err := convertToBtcAmount(amount) if err != nil { return "", err } hash, err := client.rpcClient.SendFrom(account, decoded, btcAmount) if err != nil { return "", err } return hash.String(), nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func getAccountBal(num hedera.AccountID) float64{\n\taccountID := num\n\tclient, err := hedera.Dial(server)\n\tif err != nil{\n\t\tpanic(err)\n\t}\n\n\toperatorAccountID := hedera.AccountID{Account: 1001}\n\n\toperatorSecret,err := hedera.SecretKeyFromString(secret)\n\tif err != nil{\n\t\tpanic(err)\n\t}\n\tclient.SetNode(hedera.AccountID{Account: 3})\n\tclient.SetOperator(operatorAccountID, func() hedera.SecretKey {\n\t\treturn operatorSecret\n\t})\n\t\n\tdefer client.Close()\n\n\tbalance, err := client.Account(accountID).Balance().Get()\n\tif err != nil{\n\t\tpanic(err)\n\t}\n\thbars := float64(balance)/100000000\n\treturn hbars\n}", "func (acc Account) Balance() float64 {\n\treturn float64(acc.AvailableBalance / 100)\n}", "func (t *TaskChaincode) getBalance(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\t// 0\n\t// \"$account\"\n\tif len(args) != 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\tfmt.Println(\"cacluate begins!\");\n\tif len(args[0]) <= 0 {\n\t\treturn shim.Error(\"1st argument must be a non-empty string\")\n\t}\n\n\taccount := args[0]\n\n\tqueryString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"objectType\\\":\\\"PayTX\\\",\\\"payer\\\":\\\"%s\\\"}}\", account)\n\tqueryResults, err := getResultForQueryString(stub, queryString)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tvar payerTXs []payTX\n\terr = json.Unmarshal(queryResults, &payerTXs)\n\tif err != nil {\n\t\tshim.Error(err.Error())\n\t}\n\n\t//fmt.Println(len(payTXs))\n\tvar i int\n\toutcomeVal := 0.0\n for i=0;i<len(payerTXs);i=i+1 {\n\t\tpayerTX := payerTXs[i]\n\t\toutcomeVal = outcomeVal + payerTX.Value\n\t}\n //fmt.Println(outcomeVal)\n\n\tqueryString = fmt.Sprintf(\"{\\\"selector\\\":{\\\"objectType\\\":\\\"PayTX\\\",\\\"payee\\\":\\\"%s\\\"}}\", account)\n\tqueryResults, err = getResultForQueryString(stub, queryString)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tvar payeeTXs []payTX\n\terr = json.Unmarshal(queryResults, &payeeTXs)\n\tif err != nil {\n\t\tshim.Error(err.Error())\n\t}\n\n\tincomeVal := 0.0\n for i=0;i<len(payeeTXs);i=i+1 {\n\t\tpayeeTX := payeeTXs[i]\n\t\tincomeVal = incomeVal + payeeTX.Value\n\t}\n //fmt.Println(incomeVal)\n\n\tbalance := incomeVal - outcomeVal\n\t//fmt.Println(balance)\n balanceStr := strconv.FormatFloat(balance, 'f', 6, 64)\n\n return shim.Success([]byte(balanceStr))\n}", "func (a Account) Balance() string {\n\treturn a.client.Request(\"GET\", \"api/accounts/balance\", \"\")\n}", "func (t *ManageAccount) updateAccountBalance(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\tvar jsonResp string\n\tvar err error\n\n\t//set amountPaid\n\tamountPaid := args[2]\n\n\t// input sanitation\n\tif len(args) != 4 {\n\t\terrMsg := \"{ \\\"message\\\" : \\\"Incorrect number of arguments. Expecting \\\"Customer Account Id, Service Provider Account Id, Amount paid\\\" and \\\" operation\\\" as an argument.\\\", \\\"code\\\" : \\\"503\\\"}\"\n\t\terr = stub.SetEvent(\"errEvent\", []byte(errMsg))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfmt.Println(errMsg)\t\n\t\treturn nil, errors.New(errMsg)\n\t}\n\n\tfmt.Println(\"Updating the account balance of\"+ args[0] + \" and \" + args[1])\n\t// convert string to float\n\t_amountPaid, _ := strconv.ParseFloat(amountPaid, 64)\n\toperation := args[3]\n\taccount := Account{}\n\tfor i := 0; i < 2; i++ {\n\t\taccountAsBytes, err := stub.GetState(args[i])\t\t\t\t\t\t\t\t\t//get the var from chaincode state\n\t\tif err != nil {\n\t\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to get state for \" + args[i] + \"\\\"}\"\n\t\t\treturn nil, errors.New(jsonResp)\n\t\t}\n\t\tjson.Unmarshal(accountAsBytes, &account)\n\t\tif account.AccountOwnerId == args[i]{\n\t\t\tif account.AccountName == \"Customer\" {\n\t\t\t\tfmt.Println(\"Customer Account found with account Owner Id : \" + args[i])\n\t\t\t\tfmt.Println(account);\n\t\t\t\tif operation == \"Initial\" || operation == \"Final\" {\n\t\t\t\t\taccount.AccountBalance = account.AccountBalance - _amountPaid\n\t\t\t\t}else{\n\t\t\t\t\taccount.AccountBalance = account.AccountBalance + _amountPaid\n\t\t\t\t}\n\t\t\t} else if account.AccountName == \"Service Provider\" {\n\t\t\t\tfmt.Println(\"Service Provider Account found with account Owner Id : \" + args[i])\n\t\t\t\tfmt.Println(account);\n\t\t\t\tif operation == \"Final\" || operation == \"Initial\"{\n\t\t\t\t\taccount.AccountBalance = account.AccountBalance + _amountPaid\n\t\t\t\t}else {\n\t\t\t\t\taccount.AccountBalance = account.AccountBalance - _amountPaid\n\t\t\t\t}\n\t\t\t}\n\t\t}else {\n\t\t\terrMsg := \"{ \\\"message\\\" : \\\"\"+ args[i]+ \" Not Found.\\\", \\\"code\\\" : \\\"503\\\"}\"\n\t\t\terr = stub.SetEvent(\"errEvent\", []byte(errMsg))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tfmt.Println(errMsg); \n\t\t}\n\t\t\n\t\t//build the Payment json string\n\t\taccountJson := &Account{account.AccountOwnerId,account.AccountName,account.AccountBalance}\n\t\t// convert *Account to []byte\n\t\taccountJsonasBytes, err := json.Marshal(accountJson)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t//store account Owner Id as key\n\t\terr = stub.PutState(account.AccountOwnerId, accountJsonasBytes)\t\t\t\t\t\t\t\t\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// event message to set on successful account updation\n\t\ttosend := \"{ \\\"Account Owner Id\\\" : \\\"\"+account.AccountOwnerId+\"\\\", \\\"message\\\" : \\\"Account updated succcessfully\\\", \\\"code\\\" : \\\"200\\\"}\"\n\t\terr = stub.SetEvent(\"evtsender\", []byte(tosend))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfmt.Println(tosend); \t\n\t}\n\tfmt.Println(\"Account balance Updated Successfully.\")\n\treturn nil, nil\n}", "func (theAccount Account) Balance() int {\n\treturn theAccount.balance\n}", "func (a Account) Balance() (balance int64, ok bool) {\n\tif a.defunct == 1 {\n\t\treturn 0, false\n\t}\n\treturn a.amt, true\n}", "func (a *Account) Balance() (balance int64, ok bool) {\n\t// a.mutex.Lock() // Aquire the lock on the shared resource ... the balance\n\t// defer a.mutex.Unlock() // Release lock once surrounding function has been executed\n\tif !a.open { // If the bank account isnt open return false\n\t\treturn 0, false\n\t}\n\treturn a.balance, true\n}", "func QueryAccount() {\n\n\taccountInfo, err := binanceSrv.Account(binance.AccountRequest{\n\t\tRecvWindow: 5 * time.Second,\n\t\tTimestamp: time.Now(),\n\t})\n\tif err != nil {\n\t\tlevel.Error(logger).Log(\"QueryAccount - fail! Err=\", err)\n\t\treturn\n\t}\n\n\tlookForNew:\n\tfor _, balance := range accountInfo.Balances {\n\n\t\tif balance.Asset == \"BTC\" || balance.Asset == \"ETH\" || balance.Free+balance.Locked == 0{\n\t\t\tcontinue\n\t\t}\n\n\t\tfmt.Printf(\"QueryAccount - %s balance=%f. Free=%f, Locked=%f\\n\", balance.Asset,\n\t\t\tbalance.Free+balance.Locked, balance.Free, balance.Locked)\n\n\t\tasset := balance.Asset + \"BTC\"\n\n\t\t// get latest price\n\t\thighestBid := getHighestBid(asset)\n\t\tif highestBid.Time.Add(time.Second * 60).Before(time.Now()) {\n\t\t\tfmt.Println(\"Warning! QueryAccount - getHighestBid got old data. fail to manage its project\", asset)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, knownProject := range ActiveProjectList {\n\n\t\t\t// Existing Known Project?\n\t\t\tif knownProject.Symbol == asset {\n\n\t\t\t\tif !FloatEquals(knownProject.AccBalanceBase, balance.Free+balance.Locked) {\n\n\t\t\t\t\tfmt.Printf(\"QueryAccount - Info: found new balance for %s. new=%f, old=%f\\n\",\n\t\t\t\t\t\tknownProject.Symbol, balance.Free+balance.Locked,\n\t\t\t\t\t\tknownProject.AccBalanceBase)\n\n\t\t\t\t\tknownProject.AccBalanceBase = balance.Free+balance.Locked\n\t\t\t\t\tknownProject.AccBalanceLocked = balance.Locked\n\n\t\t\t\t\tif !UpdateProjectAccBalanceBase(knownProject){\n\t\t\t\t\t\tfmt.Printf(\"QueryAccount - Update Project %s AccBalanceBase Fail!\\n\",\n\t\t\t\t\t\t\tknownProject.Symbol)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tcontinue lookForNew\n\t\t\t}\n\t\t}\n\n\n\t\thistoryRemain := GetHistoryRemain(asset)\n\n\t\t// ignore trivial balance\n\t\tif highestBid.Price * (balance.Free+balance.Locked) < MinOrderTotal {\n\n\t\t\t// update trivial balance into history_remain table\n\t\t\tif !FloatEquals(historyRemain.Amount, balance.Free+balance.Locked){\n\t\t\t\tUpdateHistoryRemain(asset, balance.Free+balance.Locked)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t// Must Be a New Project!\n\t\tProjectImport(balance, historyRemain)\n\t}\n}", "func (a *Account) Balance() (models.Amount, error) {\n\tbalance, err := a.repo.Rpc.AccountBalance(a.acc.Address)\n\treturn *balance, err\n}", "func (c *rpcclient) accountBalance(ctx context.Context, assetID uint32, addr common.Address) (bal *big.Int, err error) {\n\treturn bal, c.withClient(func(ec *ethConn) error {\n\t\tif ec.txPoolSupported {\n\t\t\tbal, err = c.smartBalance(ctx, ec, assetID, addr)\n\t\t} else {\n\t\t\tbal, err = c.dumbBalance(ctx, ec, assetID, addr)\n\t\t}\n\t\treturn err\n\t})\n\n}", "func (acc *Account) Balance() (balance int64, ok bool) {\n\tacc.RLock()\n\tdefer acc.RUnlock()\n\n\t//if the account is invalid, just return now\n\tif acc.invalid {\n\t\treturn\n\t}\n\n\tok = true\n\tbalance = acc.balance\n\n\treturn\n}", "func (a *Ethereum) Balance(addr string) (string, error) {\n\tvar (\n\t\tctx, _ = context.WithDeadline(context.Background(), time.Now().Add(time.Second*30))\n\t)\n\tvar address = common.HexToAddress(addr)\n\tvar bignum, err = ethclient.NewClient(a.rpcclient).BalanceAt(ctx, address, nil)\n\tif err != nil {\n\t\tlog.Printf(\"aqua: %v\", err)\n\t\treturn \"ERR\", err\n\t}\n\tdots := new(big.Float).Quo(new(big.Float).SetInt(bignum), big.NewFloat(OneEther))\n\treturn fmt.Sprintf(\"%.8f\", dots), nil\n}", "func (wsm *impl) Balance() (ret float64, err error) {\n\tvar obj *websmsTypes.BalanceRequest\n\tvar rsp *websmsTypes.BalanceResponse\n\tvar req request.Interface\n\n\t// Авторизация\n\tobj = &websmsTypes.BalanceRequest{\n\t\tAuth: websmsTypes.Auth{\n\t\t\tUsername: wsm.cfg.Username,\n\t\t\tPassword: wsm.cfg.Password,\n\t\t},\n\t}\n\t// Запрос\n\treq = singletonTransport.RequestGet().\n\t\tUserAgent(userAgent).\n\t\tMethod(singletonTransport.Method().Post()).\n\t\tContentType(mime.ApplicationXMLCharsetUTF8).\n\t\tDataXML(obj).\n\t\tURL(apiXMLURI)\n\tdefer singletonTransport.RequestPut(req)\n\tsingletonTransport.Do(req)\n\t// Результат\n\trsp = new(websmsTypes.BalanceResponse)\n\tif err = wsm.SendResponse(req, rsp); err != nil {\n\t\treturn\n\t}\n\t// Обработка результата\n\tret, err = strconv.ParseFloat(strings.Replace(rsp.Value, \",\", \".\", -1), 64)\n\tif err != nil {\n\t\terr = fmt.Errorf(rsp.Value)\n\t}\n\n\treturn\n}", "func printBalance(account horizon.Account) {\n\taddress := account.AccountID\n\tbalance := getBalance(account)\n\n\tfmt.Println(\"Balances for account:\", address)\n\tfmt.Println(\"XLM Balance:\", balance)\n}", "func (e *Huobi) GetAccount() interface{} {\n\taccounts, err := services.GetAccounts()\n\tif err != nil {\n\t\te.logger.Log(constant.ERROR, \"\", 0.0, 0.0, \"GetAccount() error, \", err)\n\t\treturn false\n\t}\n\tif accounts.Status != \"ok\" {\n\t\te.logger.Log(constant.ERROR, \"\", 0.0, 0.0, \"GetAccount() error, \", accounts.ErrMsg)\n\t\treturn false\n\t}\n\taccountID := int64(-1)\n\tcount := len(accounts.Data)\n\tfor i := 0; i < count; i++ {\n\t\tactData := accounts.Data[i]\n\t\tif actData.State == \"working\" && actData.Type == \"spot\" {\n\t\t\taccountID = actData.ID\n\t\t\tbreak\n\t\t}\n\t}\n\tif accountID == -1 {\n\t\te.logger.Log(constant.ERROR, \"\", 0.0, 0.0, \"GetAccount() error, \", \"all account locked\")\n\t\treturn false\n\t}\n\tbalance, err := services.GetAccountBalance(strconv.FormatInt(accountID, 10))\n\tif err != nil {\n\t\te.logger.Log(constant.ERROR, \"\", 0.0, 0.0, \"GetAccount() error, \", err)\n\t\treturn false\n\t}\n\tif balance.Status != \"ok\" {\n\t\te.logger.Log(constant.ERROR, \"\", 0.0, 0.0, \"GetAccount() error, \", balance.ErrMsg)\n\t\treturn false\n\t}\n\tresult := make(map[string]float64)\n\tcount = len(balance.Data.List)\n\tfor i := 0; i < count; i++ {\n\t\tsubAcc := balance.Data.List[i]\n\t\tif subAcc.Type == \"trade\" {\n\t\t\tresult[strings.ToUpper(subAcc.Currency)] = conver.Float64Must(subAcc.Balance)\n\t\t} else if subAcc.Type == \"frozen\" {\n\t\t\tresult[\"Frozen\"+strings.ToUpper(subAcc.Currency)] = conver.Float64Must(subAcc.Balance)\n\t\t}\n\t}\n\t//...\n\tconfig.ACCOUNT_ID = strconv.FormatInt(accountID, 10)\n\t//...\n\treturn result\n}", "func main() {\n\taccount := accounts.NewAccount(\"hskimim\")\n\taccount.Deposit(10)\n\terr := account.Withdraw(100)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfmt.Println(account)\n}", "func Withdraw(card types.Card, amount types.Money) types.Card {\n \n if (card.Active) && (card.Balance >= amount) && (amount > 0) && (amount <= 2_000_000) {\n\t\tcard.Balance = card.Balance - amount \n }\n\n return card\n}", "func (a *Account) Balance() numeric.Numeric {\n\treturn a.AccountTransactionList[len(a.AccountTransactionList)-1].Balance\n}", "func (c BaseController) Balance(store weave.KVStore, src weave.Address) (coin.Coins, error) {\n\tstate, err := c.bucket.Get(store, src)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot get account state\")\n\t}\n\tif state == nil {\n\t\treturn nil, errors.Wrap(errors.ErrNotFound, \"no account\")\n\t}\n\treturn AsCoins(state), nil\n}", "func (c *Constructor) balance(\n\tctx context.Context,\n\taddress string,\n) (*big.Int, *types.CoinIdentifier, error) {\n\taccountIdentifier := &types.AccountIdentifier{Address: address}\n\n\tswitch c.accountingModel {\n\tcase configuration.AccountModel:\n\t\tbal, err := c.helper.AccountBalance(ctx, accountIdentifier, c.currency)\n\n\t\treturn bal, nil, err\n\tcase configuration.UtxoModel:\n\t\treturn c.helper.CoinBalance(ctx, accountIdentifier, c.currency)\n\t}\n\n\treturn nil, nil, fmt.Errorf(\"unable to find balance for %s\", address)\n}", "func (c *Client) balance(ctx context.Context, opts []string) (*Response, error) {\n\taccountsRes, err := c.wc.Accounts(ctx, &pb.AccountsRequest{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error fetching accounts. err: %s\", err.Error())\n\t}\n\n\tbalances := make([][]interface{}, len(accountsRes.Accounts))\n\tfor i, v := range accountsRes.Accounts {\n\t\tbalanceReq := &pb.BalanceRequest{\n\t\t\tAccountNumber: v.AccountNumber,\n\t\t\tRequiredConfirmations: 0,\n\t\t}\n\n\t\tbalanceRes, err := c.wc.Balance(ctx, balanceReq)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error fetching balance for account: %d. err: %s\", v.AccountNumber, err.Error())\n\t\t}\n\n\t\tbalances[i] = []interface{}{\n\t\t\tv.AccountName,\n\t\t\tdcrutil.Amount(balanceRes.Total),\n\t\t\tdcrutil.Amount(balanceRes.Spendable),\n\t\t\tdcrutil.Amount(balanceRes.LockedByTickets),\n\t\t\tdcrutil.Amount(balanceRes.VotingAuthority),\n\t\t\tdcrutil.Amount(balanceRes.Unconfirmed),\n\t\t}\n\t}\n\n\tbalanceColumns := []string{\n\t\t\"Account\",\n\t\t\"Total\",\n\t\t\"Spendable\",\n\t\t\"Locked By Tickets\",\n\t\t\"Voting Authority\",\n\t\t\"Unconfirmed\",\n\t}\n\n\tres := &Response{\n\t\tColumns: balanceColumns,\n\t\tResult: balances,\n\t}\n\n\treturn res, nil\n}", "func (a *Account) Balance() (balance int64, ok bool) {\n\tif a.isClosed {\n\t\treturn 0, false\n\t}\n\treturn a.sold, true\n}", "func (a *Account) Withdraw (amount int) error {\n\tif a.balance < amount {\n\t\treturn errNomoney\n\t}\n\ta.balance -= amount\n\treturn nil\n}", "func (t *SimpleChaincode) getBalance(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tvar sourceAccountName, destinationAccountName string // Entities\n\tvar err error\n\n\tif len(args) != 2 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting name of the person to query\")\n\t}\n\n\tsourceAccountName = args[0]\n\tdestinationAccountName = args[1]\n\n\t// Get the state from the ledger\n\taccountValBytes, err := stub.GetState(sourceAccountName + \".\" + destinationAccountName)\n\tif err != nil {\n\t\tjsonResp := \"{\\\"Error\\\":\\\"Failed to get state for \" + sourceAccountName + \".\" + destinationAccountName + \"\\\"}\"\n\t\treturn shim.Error(jsonResp)\n\t}\n\n\tif accountValBytes == nil {\n\t\taccountValBytes = []byte(\"0\")\n\t}\n\n\tjsonResp := \"{\\\"Source\\\":\\\"\" + sourceAccountName + \"\\\",\" +\n\t\t\"\\\"Destination\\\":\\\"\" + destinationAccountName + \"\\\",\\\"\" +\n\t\t\"Amount\\\":\\\"\" + string(accountValBytes) + \"\\\"}\"\n\tfmt.Printf(\"Query Response:%s\\n\", jsonResp)\n\treturn shim.Success(accountValBytes)\n}", "func (w *Wallet) Balance() Bitcoin {\n\t/*Same here, we use a pointer because we want to see\n\tthe balance of the original wallet that we have in memory and not a copy\n\t*/\n\treturn w.balance //this still returns correctly without having to type (*w)\n\n}", "func TransferBalance(c router.Context) (interface{}, error) {\n\t// get the data from the request and parse it as structure\n\tdata := c.Param(`data`).(SendBalance)\n\n\t// Validate the inputed data\n\terr := data.Validate()\n\tif err != nil {\n\t\tif _, ok := err.(validation.InternalError); ok {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, status.ErrStatusUnprocessableEntity.WithValidationError(err.(validation.Errors))\n\t}\n\n\t// check receiver data\n\tqueryRecevierString := fmt.Sprintf(\"{\\\"selector\\\": {\\\"user_addresses\\\": {\\\"$elemMatch\\\": {\\\"value\\\": \\\"%s\\\"}},\\\"doc_type\\\":\\\"%s\\\"}}\", data.To, utils.DocTypeUser)\n\treceiverData, _, err5 := utils.Get(c, queryRecevierString, fmt.Sprintf(\"Receiver %s does not exist!\", data.To))\n\tif err5 != nil {\n\t\treturn nil, err5\n\t}\n\n\treceiver := User{}\n\terr = json.Unmarshal(receiverData, &receiver)\n\tif err != nil {\n\t\treturn nil, status.ErrInternal.WithError(err)\n\t}\n\n\t// check sender data\n\tquerySenderString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"_id\\\":\\\"%s\\\",\\\"doc_type\\\":\\\"%s\\\"}}\", data.From, utils.DocTypeUser)\n\tsenderData, _, err6 := utils.Get(c, querySenderString, fmt.Sprintf(\"You account %s does not exist!\", data.From))\n\tif err6 != nil {\n\t\treturn nil, err6\n\t}\n\tsender := User{}\n\terr = json.Unmarshal(senderData, &sender)\n\tif err != nil {\n\t\treturn nil, status.ErrInternal.WithError(err)\n\t}\n\n\tfor i := range sender.UserAddresses {\n\t\tif sender.UserAddresses[i].Value == data.To {\n\t\t\treturn nil, status.ErrInternal.WithMessage(fmt.Sprintf(\"You can't transfer coins to yourself!\"))\n\t\t}\n\t}\n\n\tif data.Quantity > sender.WalletBalance {\n\t\treturn nil, status.ErrInternal.WithMessage(fmt.Sprintf(\"Quantity should be less or equal to %d\", sender.WalletBalance))\n\t}\n\n\tstub := c.Stub()\n\ttxID := stub.GetTxID()\n\n\tvar receiverLabel, senderLabel string\n\t// check label of receiver in sender's address book\n\treceiverLabelString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"user_id\\\":\\\"%s\\\",\\\"address\\\":\\\"%s\\\",\\\"label\\\":\\\"%s\\\",\\\"doc_type\\\":\\\"%s\\\"}}\", data.From, data.To, data.Label, utils.DocTypeAddressBook)\n\treceiverLabelData, _, err6 := utils.Get(c, receiverLabelString, fmt.Sprintf(\"Label of receiver does not exist!\"))\n\n\t//If label does not exist in address book then save it into db\n\tif receiverLabelData == nil {\n\t\t// check if label is unique\n\t\tcheckUniqueString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"user_id\\\":\\\"%s\\\",\\\"label\\\":\\\"%s\\\",\\\"doc_type\\\":\\\"%s\\\"}}\", data.From, data.Label, utils.DocTypeAddressBook)\n\t\tuniqueLabelData, _, err := utils.Get(c, checkUniqueString, fmt.Sprintf(\"This label already exists!\"))\n\t\tif uniqueLabelData != nil {\n\t\t\treturn nil, status.ErrInternal.WithMessage(fmt.Sprintf(\"This label already exists!\"))\n\t\t}\n\n\t\tlabelTxn := AddressBook{UserID: data.From, Address: data.To, Label: data.Label, DocType: utils.DocTypeAddressBook}\n\t\treceiverLabel = data.Label\n\t\t// Save the data\n\t\terr = c.State().Put(txID, labelTxn)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\n\t\taddressLabel := AddressBook{}\n\t\terr = json.Unmarshal(receiverLabelData, &addressLabel)\n\t\tif err != nil {\n\t\t\treturn nil, status.ErrInternal.WithError(err)\n\t\t}\n\t\treceiverLabel = addressLabel.Label\n\t}\n\n\t// check label of sender in receiver's address book\n\tsenderLabelString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"user_id\\\":\\\"%s\\\",\\\"address\\\":\\\"%s\\\",\\\"doc_type\\\":\\\"%s\\\"}}\", receiver.UserAddresses[0].UserID, sender.Address, utils.DocTypeAddressBook)\n\tsenderLabelData, _, err6 := utils.Get(c, senderLabelString, fmt.Sprintf(\"Label of sender does not exist!\"))\n\n\t//If label does not exist in address book\n\tif senderLabelData == nil {\n\t\tsenderLabel = \"N/A\"\n\t} else {\n\n\t\taddressLabel1 := AddressBook{}\n\t\terr = json.Unmarshal(senderLabelData, &addressLabel1)\n\t\tif err != nil {\n\t\t\treturn nil, status.ErrInternal.WithError(err)\n\t\t}\n\t\tsenderLabel = addressLabel1.Label\n\t}\n\n\tcreatedAt := time.Now().Format(time.RFC3339)\n\t// sender transactions\n\tvar senderTransaction = Transaction{UserID: data.From, Type: utils.Send, Code: utils.WalletCoinSymbol, Quantity: data.Quantity, DocType: utils.DocTypeTransaction, CreatedAt: createdAt, AddressValue: data.To, LabelValue: \"\", AddressBookLabel: receiverLabel, TxnType: utils.CoinTxnType}\n\terr = c.State().Put(txID+strconv.Itoa(1), senderTransaction)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// receiver transactions\n\tvar receiveTransaction = Transaction{UserID: receiver.UserAddresses[0].UserID, Type: utils.Receive, Code: utils.WalletCoinSymbol, Quantity: data.Quantity, DocType: utils.DocTypeTransaction, CreatedAt: createdAt, AddressValue: sender.Address, LabelValue: \"Original\", AddressBookLabel: senderLabel, TxnType: utils.CoinTxnType}\n\terr = c.State().Put(txID+strconv.Itoa(2), receiveTransaction)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// update sender wallet\n\tsender.WalletBalance = sender.WalletBalance - data.Quantity\n\terr = c.State().Put(data.From, sender)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// update receiver wallet\n\treceiver.WalletBalance = receiver.WalletBalance + data.Quantity\n\terr = c.State().Put(receiver.UserAddresses[0].UserID, receiver)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponseBody := ResponseAddAsset{ID: data.From, Balance: sender.WalletBalance, Symbol: sender.Symbol}\n\t// return the response\n\treturn responseBody, nil\n}", "func TestPagarmeCurrentBalance(t *testing.T) {\n \n Pagarme := pagarme.NewPagarme(\"pt-BR\", ApiKey, CryptoKey)\n Pagarme.SetDebug()\n\n\n result, err := Pagarme.CurrentBalance(\"re_ciiahjw06003a546eedfngbv8\")\n\n if err != nil {\n t.Errorf(\"Erro ao get subscription: %v\", err)\n }\n\n if result.Status == api.PagarmeCancelled {\n t.Errorf(\"transactions paid expected\")\n return\n } \n}", "func (eth *Backend) AccountBalance(addrStr string) (uint64, error) {\n\tbigBal, err := eth.node.accountBalance(eth.rpcCtx, common.HexToAddress(addrStr))\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"accountBalance error: %w\", err)\n\t}\n\treturn dexeth.ToGwei(bigBal)\n}", "func getBalance(account horizon.Account) string {\n\tbalance, _ := account.GetNativeBalance()\n\treturn balance\n}", "func (f *Fund) Balance() int {\r\n\treturn f.balance\r\n}", "func (w Wallet) Balance() Bitcoin {\n\treturn w.balance\n}", "func (w *worker) staticHostAccountBalance() (types.Currency, error) {\n\t// Sanity check - only one account balance check should be running at a\n\t// time.\n\tif !atomic.CompareAndSwapUint64(&w.atomicAccountBalanceCheckRunning, 0, 1) {\n\t\tw.renter.log.Critical(\"account balance is being checked in two threads concurrently\")\n\t}\n\tdefer atomic.StoreUint64(&w.atomicAccountBalanceCheckRunning, 0)\n\n\t// Get a stream.\n\tstream, err := w.staticNewStream()\n\tif err != nil {\n\t\treturn types.ZeroCurrency, err\n\t}\n\tdefer func() {\n\t\tif err := stream.Close(); err != nil {\n\t\t\tw.renter.log.Println(\"ERROR: failed to close stream\", err)\n\t\t}\n\t}()\n\n\t// write the specifier\n\terr = modules.RPCWrite(stream, modules.RPCAccountBalance)\n\tif err != nil {\n\t\treturn types.ZeroCurrency, err\n\t}\n\n\t// send price table uid\n\tpt := w.staticPriceTable().staticPriceTable\n\terr = modules.RPCWrite(stream, pt.UID)\n\tif err != nil {\n\t\treturn types.ZeroCurrency, err\n\t}\n\n\t// provide payment\n\terr = w.renter.hostContractor.ProvidePayment(stream, w.staticHostPubKey, modules.RPCAccountBalance, pt.AccountBalanceCost, w.staticAccount.staticID, pt.HostBlockHeight)\n\tif err != nil {\n\t\t// If the error could be caused by a revision number mismatch,\n\t\t// signal it by setting the flag.\n\t\tif errCausedByRevisionMismatch(err) {\n\t\t\tw.staticSetSuspectRevisionMismatch()\n\t\t\tw.staticWake()\n\t\t}\n\t\treturn types.ZeroCurrency, err\n\t}\n\n\t// prepare the request.\n\tabr := modules.AccountBalanceRequest{Account: w.staticAccount.staticID}\n\terr = modules.RPCWrite(stream, abr)\n\tif err != nil {\n\t\treturn types.ZeroCurrency, err\n\t}\n\n\t// read the response\n\tvar resp modules.AccountBalanceResponse\n\terr = modules.RPCRead(stream, &resp)\n\tif err != nil {\n\t\treturn types.ZeroCurrency, err\n\t}\n\treturn resp.Balance, nil\n}", "func (_GameJam *GameJamCaller) Balance(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _GameJam.contract.Call(opts, out, \"balance\")\n\treturn *ret0, err\n}", "func (rpc *Rpc) AccountBalance(addr string) (*models.Amount, error) {\n\t// use RPC to make the call\n\tvar balance string\n\terr := rpc.Call(&balance, \"ftm_getBalance\", addr, \"latest\")\n\tif err != nil {\n\t\trpc.log.Errorf(\"RPC->AccountBalance(): Error [%s]\", err.Error())\n\t\treturn &models.Amount{}, err\n\t}\n\n\t// decode the response\n\tval, err := hexutil.DecodeBig(balance)\n\tif err != nil {\n\t\trpc.log.Errorf(\"RPC->AccountBalance(): Can not get account balance for [%s]. %s\", addr, err.Error())\n\t\treturn &models.Amount{}, err\n\t}\n\n\treturn &models.Amount{Decimal: decimal.NewFromBigInt(val, 0)}, nil\n}", "func (account *Account) Deposit(amount int) {\r\n\taccount.balance += amount\r\n}", "func (a Account) CheckBalance () int {\n\treturn a.balance\n}", "func (a *Account) Balance() (int64, bool) {\n\ta.Lock()\n\tdefer a.Unlock()\n\tif a.closed {\n\t\treturn 0, false\n\t}\n\n\treturn a.balance, true\n\n}", "func (a *Account) Balance() (int64, bool) {\n\tif !a.open {\n\t\treturn 0, false\n\t}\n\treturn a.balance, true\n}", "func (a Account) Balance() (Balance, error) {\n\treq, err := a.client.NewRequest(http.MethodGet, \"balance\", nil)\n\tif err != nil {\n\t\treturn Balance{}, err\n\t}\n\n\tq := req.URL.Query()\n\tq.Add(\"account_id\", a.ID)\n\treq.URL.RawQuery = q.Encode()\n\n\tresp, _ := a.client.Do(req)\n\n\tb := new(bytes.Buffer)\n\tb.ReadFrom(resp.Body)\n\tstr := b.String()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn Balance{}, fmt.Errorf(\"failed to fetch balance: %s\", str)\n\t}\n\n\tvar bal Balance\n\tif err := json.Unmarshal(b.Bytes(), &bal); err != nil {\n\t\treturn Balance{}, err\n\t}\n\n\treturn bal, nil\n}", "func (theAccount *Account) Deposit(amount int) {\n\ttheAccount.balance += amount\n}", "func (theAccount *Account) Withdraw(amount int) error {\n\tif theAccount.balance < amount {\n\t\treturn errNoMoney\n\t}\n\ttheAccount.balance -= amount\n\treturn nil\n}", "func (a Account) ShowBalance() int {\n\treturn a.balance\n}", "func (dcr *ExchangeWallet) Balance() (*asset.Balance, error) {\n\tlocked, err := dcr.lockedAtoms(dcr.primaryAcct)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tab, err := dcr.wallet.AccountBalance(dcr.ctx, 0, dcr.primaryAcct)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbal := &asset.Balance{\n\t\tAvailable: toAtoms(ab.Spendable) - locked,\n\t\tImmature: toAtoms(ab.ImmatureCoinbaseRewards) +\n\t\t\ttoAtoms(ab.ImmatureStakeGeneration),\n\t\tLocked: locked + toAtoms(ab.LockedByTickets),\n\t}\n\n\tif dcr.unmixedAccount == \"\" {\n\t\treturn bal, nil\n\t}\n\n\t// Mixing is enabled, consider ...\n\t// 1) trading account spendable (-locked) as available,\n\t// 2) all unmixed funds as immature, and\n\t// 3) all locked utxos in the trading account as locked (for swapping).\n\ttradingAcctBal, err := dcr.wallet.AccountBalance(dcr.ctx, 0, dcr.tradingAccount)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttradingAcctLocked, err := dcr.lockedAtoms(dcr.tradingAccount)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tunmixedAcctBal, err := dcr.wallet.AccountBalance(dcr.ctx, 0, dcr.unmixedAccount)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbal.Available += toAtoms(tradingAcctBal.Spendable) - tradingAcctLocked\n\tbal.Immature += toAtoms(unmixedAcctBal.Total)\n\tbal.Locked += tradingAcctLocked\n\treturn bal, nil\n}", "func (f *Fund) Balance() int {\n\treturn f.balance\n}", "func (f *Fund) Balance() int {\n\treturn f.balance\n}", "func (f *Fund) Balance() int {\n\treturn f.balance\n}", "func (a *account) availableBalance() types.Currency {\n\ttotal := a.balance.Add(a.pendingDeposits)\n\tif total.Cmp(a.negativeBalance) <= 0 {\n\t\treturn types.ZeroCurrency\n\t}\n\ttotal = total.Sub(a.negativeBalance)\n\tif a.pendingWithdrawals.Cmp(total) < 0 {\n\t\treturn total.Sub(a.pendingWithdrawals)\n\t}\n\treturn types.ZeroCurrency\n}", "func (_GameJam *GameJamCallerSession) Balance() (*big.Int, error) {\n\treturn _GameJam.Contract.Balance(&_GameJam.CallOpts)\n}", "func (r *Wallet) Balance() Bitcoin {\n\treturn r.balance\n}", "func (a *Account) Balance() (int, bool) {\n\tif !a.isOpen {\n\t\treturn 0, false\n\t}\n\treturn a.balance, true\n}", "func GetAccountBal(cfg *config.Config, c client.Client) string {\n\tvar balanceMsg string\n\n\tbalance := GetAccountBalWithDenomFromdb(cfg, c) // get heimdall account balance\n\tbalanceMsg = fmt.Sprintf(\"Heimdall Node : \\n- Current balance of your account(%s) is %s \\n\", cfg.ValDetails.SignerAddress, balance)\n\n\tborBalance := GetBorBalanceFromDB(cfg, c) + \"ETH\" // get bor account balance\n\tbalanceMsg = balanceMsg + fmt.Sprintf(\"\\nBor Node : \\n- Current balance of your account(%s) is %s \\n\", cfg.ValDetails.SignerAddress, borBalance)\n\n\treturn balanceMsg\n}", "func (w *Wallet) Balance() Bitcoin {\n\treturn w.balance\n}", "func (w *Wallet) Balance() Bitcoin {\n\treturn w.balance\n}", "func (w *Wallet) Balance() Bitcoin {\n\treturn w.balance\n}", "func (w *Wallet) Balance() Bitcoin {\n\treturn w.balance\n}", "func (w *Wallet) Balance() Bitcoin {\n\treturn w.balance\n}", "func (w *Wallet) Balance() Bitcoin {\n\treturn w.balance\n}", "func (w *Wallet) Balance() Bitcoin {\n\treturn w.balance\n}", "func (w *Wallet) Balance() Bitcoin {\n\treturn w.balance\n}", "func (w *Wallet) Balance() Bitcoin {\n\treturn w.balance\n}", "func (w *Wallet) Balance() Bitcoin {\n\treturn w.balance\n}", "func (acc *Account) Balance() (balance int64, ok bool) {\n\tif acc.closed {\n\t\treturn 0, false\n\t}\n\treturn acc.balance, true\n}", "func (c *rpcclient) smartBalance(ctx context.Context, ec *ethConn, assetID uint32, addr common.Address) (bal *big.Int, err error) {\n\ttip, err := c.blockNumber(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"blockNumber error: %v\", err)\n\t}\n\n\t// We need to subtract and pending outgoing value, but ignore any pending\n\t// incoming value since that can't be spent until mined. So we can't using\n\t// PendingBalanceAt or BalanceAt by themselves.\n\t// We'll iterate tx pool transactions and subtract any value and fees being\n\t// sent from this account. The rpc.Client doesn't expose the\n\t// txpool_contentFrom => (*TxPool).ContentFrom RPC method, for whatever\n\t// reason, so we'll have to use CallContext and copy the mimic the\n\t// internal RPCTransaction type.\n\tvar txs map[string]map[string]*RPCTransaction\n\tif err := ec.caller.CallContext(ctx, &txs, \"txpool_contentFrom\", addr); err != nil {\n\t\treturn nil, fmt.Errorf(\"contentFrom error: %w\", err)\n\t}\n\n\tif assetID == BipID {\n\t\tethBalance, err := ec.BalanceAt(ctx, addr, big.NewInt(int64(tip)))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\toutgoingEth := new(big.Int)\n\t\tfor _, group := range txs { // 2 groups, pending and queued\n\t\t\tfor _, tx := range group {\n\t\t\t\toutgoingEth.Add(outgoingEth, tx.Value.ToInt())\n\t\t\t\tgas := new(big.Int).SetUint64(uint64(tx.Gas))\n\t\t\t\tif tx.GasPrice != nil && tx.GasPrice.ToInt().Cmp(bigZero) > 0 {\n\t\t\t\t\toutgoingEth.Add(outgoingEth, new(big.Int).Mul(gas, tx.GasPrice.ToInt()))\n\t\t\t\t} else if tx.GasFeeCap != nil {\n\t\t\t\t\toutgoingEth.Add(outgoingEth, new(big.Int).Mul(gas, tx.GasFeeCap.ToInt()))\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, fmt.Errorf(\"cannot find fees for tx %s\", tx.Hash)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn ethBalance.Sub(ethBalance, outgoingEth), nil\n\t}\n\n\t// For tokens, we'll do something similar, but with checks for pending txs\n\t// that transfer tokens or pay to the swap contract.\n\t// Can't use withTokener because we need to use the same ethConn due to\n\t// txPoolSupported being used to decide between {smart/dumb}Balance.\n\ttkn := ec.tokens[assetID]\n\tif tkn == nil {\n\t\treturn nil, fmt.Errorf(\"no tokener for asset ID %d\", assetID)\n\t}\n\tbal, err = tkn.balanceOf(ctx, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, group := range txs {\n\t\tfor _, rpcTx := range group {\n\t\t\tto := *rpcTx.To\n\t\t\tif to == tkn.tokenAddr {\n\t\t\t\tif sent := tkn.transferred(rpcTx.Input); sent != nil {\n\t\t\t\t\tbal.Sub(bal, sent)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif to == tkn.contractAddr {\n\t\t\t\tif swapped := tkn.swapped(rpcTx.Input); swapped != nil {\n\t\t\t\t\tbal.Sub(bal, swapped)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn bal, nil\n}", "func GetBalance(tx *gorm.DB, requestCreated *models.TransactionRequests) (responses.TransactionResponse, error) {\n\t//first get Balance of the DebitAccount\n\tresponse := responses.TransactionResponse{}\n\tcbalance := models.Accounts{}\n\n\terr := tx.Debug().Model(&models.Accounts{}).Where(\"account_no = ?\", requestCreated.DebitAccount).Take(&cbalance).Error\n\tif err != nil {\n\t\treturn responses.TransactionResponse{}, err\n\t}\n\tresponse.Procode = requestCreated.Procode\n\tresponse.ResponseCode = Successful\n\tresponse.Remarks = \"Balance Enquiry Successful\"\n\tresponse.Reference = requestCreated.TxnRef\n\tamt, _ := strconv.ParseFloat(\"0.00\", 64)\n\tresponse.Amount = amt\n\tresponse.Account = cbalance.AccountNo\n\tbal, _ := strconv.ParseFloat(cbalance.AvailableBal, 64)\n\tresponse.AvailableBalance = bal\n\n\treturn response, nil\n}", "func (_GameJam *GameJamSession) Balance() (*big.Int, error) {\n\treturn _GameJam.Contract.Balance(&_GameJam.CallOpts)\n}", "func (acc *Account) Balance() (hexutil.Big, error) {\n\t// get the balance\n\tval, err, _ := acc.cg.Do(\"balance\", func() (interface{}, error) {\n\t\treturn repository.R().AccountBalance(&acc.Address)\n\t})\n\n\t// can not get the balance?\n\tif err != nil {\n\t\treturn hexutil.Big{}, err\n\t}\n\treturn *val.(*hexutil.Big), nil\n}", "func (w *Wallet) Balance() (balance Bitcoin) {\n\treturn w.balance\n}", "func (s *Store) Balance(minConf int, chainHeight int32) int64 {\n\tbal := int64(0)\n\tfor _, rt := range s.unspent {\n\t\tif confirmed(minConf, rt.Height(), chainHeight) {\n\t\t\ttx := s.txs[rt.blockTx()]\n\t\t\tmsgTx := tx.MsgTx()\n\t\t\ttxOut := msgTx.TxOut[rt.outpoint.Index]\n\t\t\tbal += txOut.Value\n\t\t}\n\t}\n\treturn bal\n}", "func (t *SimpleChaincode) query_balance (stub shim.ChaincodeStubInterface, args []string) pb.Response {\n if len(args) != 1 {\n return shim.Error(\"Incorrect number of arguments. Expecting name of the person to query\")\n }\n\n account_name := args[0]\n\n // Admin is allowed to query_balance, and the account holder is allowed to query_balance.\n if !transactor_is_admin(stub) && !transactor_is(stub, account_name) {\n return shim.Error(fmt.Sprintf(\"User \\\"%s\\\" is not authorized to query account \\\"%s\\\"\", GetTransactorCommonName(stub), account_name))\n }\n\n account,err := get_account_(stub, account_name)\n if err != nil {\n return shim.Error(fmt.Sprintf(\"Could not query_balance for account \\\"%s\\\"; error was %v\", account_name, err))\n }\n\n // Serialize Account struct as JSON\n bytes,err := json.Marshal(account)\n if err != nil {\n return shim.Error(fmt.Sprintf(\"Serializing account failed in query_balance because json.Marshal failed with error %v\", err))\n }\n fmt.Printf(\"query_balance Response: %s\\n\", string(bytes))\n return shim.Success(bytes)\n}", "func (_DogsOfRome *DogsOfRomeCaller) Balance(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _DogsOfRome.contract.Call(opts, out, \"balance\", arg0)\n\treturn *ret0, err\n}", "func (a *Account) Balance() (balance int64, ok bool) {\n\ta.mutex.RLock()\n\tdefer a.mutex.RUnlock()\n\n\treturn a.balance, a.open\n}", "func (player *Athelete) Pay() {\n\tPayAmount := player.Salary / 12\n\tplayer.AccountBalance += PayAmount\n\n}", "func (a *Account) Withdraw(amount int) error {\n\tif amount > a.Balance {\n\t\treturn fmt.Errorf(\"account: not enough funds\")\n\t}\n\n\ta.Balance -= amount\n\n\treturn nil\n}", "func (s *AccountAPIService) AccountBalance(\n\tctx context.Context,\n\trequest *types.AccountBalanceRequest,\n) (*types.AccountBalanceResponse, *types.Error) {\n\taddr, err := address.Parse(request.AccountIdentifier.Address)\n\tif err != nil {\n\t\treturn nil, AddressError\n\t}\n\n\tvar total uint64\n\tcells, err := s.client.GetCells(context.Background(), &indexer.SearchKey{\n\t\tScript: addr.Script,\n\t\tScriptType: indexer.ScriptTypeLock,\n\t}, indexer.SearchOrderAsc, pageSize, \"\")\n\tif err != nil {\n\t\treturn nil, RpcError\n\t}\n\tfor _, cell := range cells.Objects {\n\t\ttotal += cell.Output.Capacity\n\t}\n\tfor ; len(cells.Objects) == pageSize; {\n\t\tcells, err = s.client.GetCells(context.Background(), &indexer.SearchKey{\n\t\t\tScript: addr.Script,\n\t\t\tScriptType: indexer.ScriptTypeLock,\n\t\t}, indexer.SearchOrderAsc, pageSize, cells.LastCursor)\n\t\tif err != nil {\n\t\t\treturn nil, RpcError\n\t\t}\n\t\tfor _, cell := range cells.Objects {\n\t\t\ttotal += cell.Output.Capacity\n\t\t}\n\t}\n\n\theader, err := s.client.GetTipHeader(context.Background())\n\tif err != nil {\n\t\treturn nil, RpcError\n\t}\n\n\treturn &types.AccountBalanceResponse{\n\t\tBlockIdentifier: &types.BlockIdentifier{\n\t\t\tIndex: int64(header.Number),\n\t\t\tHash: header.Hash.String(),\n\t\t},\n\t\tBalances: []*types.Amount{\n\t\t\t{\n\t\t\t\tValue: fmt.Sprintf(\"%d\", total),\n\t\t\t\tCurrency: CkbCurrency,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func displayAccountAlgoBalance(account string, client *algod.Client) {\n\taccountInfo, err := client.AccountInformation(account).Do(context.Background())\n\tif err != nil {\n\t\tfmt.Printf(\"failed to get account info: %v\\n\", err)\n\t\treturn\n\t}\n\tfmt.Printf(\"%s: %v microAlgos\\n\", accountInfo.Address, accountInfo.Amount)\n}", "func (r *Cash) Balance() (types.Balance, error) {\n\trequest := apirequest.NewAPIRequest()\n\tresult := types.Balance{}\n\tsetCustomConfigErr := request.SetCustomConfig(r.Config)\n\tif setCustomConfigErr != nil {\n\t\treturn result, setCustomConfigErr\n\t}\n\tparams := map[string]string{}\n\terr := request.GET(\"cash/v1/balance\", params, &result)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\treturn result, nil\n}", "func (a *Account) Withdraw(money int) error {\n\tif a.balance < money {\n\t\treturn errNotEnoughMoney\n\t}\n\ta.balance -= money\n\treturn nil\n}", "func (h *Handle) BalanceOf() {\n\tvar result types.HexNumber\n\n\taccount := common.HexToAddress(\"0x1b978a1d302335a6f2ebe4b8823b5e17c3c84135\")\n\terr := tokenA.BalanceOf.Call(&result, \"latest\", account)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlog.Println(result.BigInt().String())\n}", "func (p *Poloniex) TransferBalance(ctx context.Context, currency, from, to string, amount float64) (bool, error) {\n\tvalues := url.Values{}\n\tresult := GenericResponse{}\n\n\tvalues.Set(\"currency\", currency)\n\tvalues.Set(\"amount\", strconv.FormatFloat(amount, 'f', -1, 64))\n\tvalues.Set(\"fromAccount\", from)\n\tvalues.Set(\"toAccount\", to)\n\n\terr := p.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost, poloniexTransferBalance, values, &result)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif result.Error != \"\" && result.Success != 1 {\n\t\treturn false, errors.New(result.Error)\n\t}\n\n\treturn true, nil\n}", "func (core *coreService) Account(addr address.Address) (*iotextypes.AccountMeta, *iotextypes.BlockIdentifier, error) {\n\tctx, span := tracer.NewSpan(context.Background(), \"coreService.Account\")\n\tdefer span.End()\n\taddrStr := addr.String()\n\tif addrStr == address.RewardingPoolAddr || addrStr == address.StakingBucketPoolAddr {\n\t\treturn core.getProtocolAccount(ctx, addrStr)\n\t}\n\tspan.AddEvent(\"accountutil.AccountStateWithHeight\")\n\tctx = genesis.WithGenesisContext(ctx, core.bc.Genesis())\n\tstate, tipHeight, err := accountutil.AccountStateWithHeight(ctx, core.sf, addr)\n\tif err != nil {\n\t\treturn nil, nil, status.Error(codes.NotFound, err.Error())\n\t}\n\tspan.AddEvent(\"ap.GetPendingNonce\")\n\tpendingNonce, err := core.ap.GetPendingNonce(addrStr)\n\tif err != nil {\n\t\treturn nil, nil, status.Error(codes.Internal, err.Error())\n\t}\n\tif core.indexer == nil {\n\t\treturn nil, nil, status.Error(codes.NotFound, blockindex.ErrActionIndexNA.Error())\n\t}\n\tspan.AddEvent(\"indexer.GetActionCount\")\n\tnumActions, err := core.indexer.GetActionCountByAddress(hash.BytesToHash160(addr.Bytes()))\n\tif err != nil {\n\t\treturn nil, nil, status.Error(codes.NotFound, err.Error())\n\t}\n\t// TODO: deprecate nonce field in account meta\n\taccountMeta := &iotextypes.AccountMeta{\n\t\tAddress: addrStr,\n\t\tBalance: state.Balance.String(),\n\t\tPendingNonce: pendingNonce,\n\t\tNumActions: numActions,\n\t\tIsContract: state.IsContract(),\n\t}\n\tif state.IsContract() {\n\t\tvar code protocol.SerializableBytes\n\t\t_, err = core.sf.State(&code, protocol.NamespaceOption(evm.CodeKVNameSpace), protocol.KeyOption(state.CodeHash))\n\t\tif err != nil {\n\t\t\treturn nil, nil, status.Error(codes.NotFound, err.Error())\n\t\t}\n\t\taccountMeta.ContractByteCode = code\n\t}\n\tspan.AddEvent(\"bc.BlockHeaderByHeight\")\n\theader, err := core.bc.BlockHeaderByHeight(tipHeight)\n\tif err != nil {\n\t\treturn nil, nil, status.Error(codes.NotFound, err.Error())\n\t}\n\thash := header.HashBlock()\n\tspan.AddEvent(\"coreService.Account.End\")\n\treturn accountMeta, &iotextypes.BlockIdentifier{\n\t\tHash: hex.EncodeToString(hash[:]),\n\t\tHeight: tipHeight,\n\t}, nil\n}", "func Updatebalance(c *gin.Context) {\r\n\r\n\tid := c.Params.ByName(\"id\")\r\n\tvar resp models.Response\r\n\tvar flag bool = false\r\n\tvar customer models.Customer\r\n\tnewbalance, errs := strconv.Atoi(c.Params.ByName(\"amount\"))\r\n\tif errs != nil {\r\n\t\tc.AbortWithStatus(http.StatusBadRequest)\r\n\t}\r\n\tif id != \"\" {\r\n\t\tp, err := models.Askdata()\r\n\t\tif err != nil {\r\n\t\t\tc.AbortWithStatus(http.StatusInternalServerError)\r\n\t\t} else {\r\n\t\t\tfor i, val := range p {\r\n\t\t\t\tif val.Id == id {\r\n\t\t\t\t\tp[i].Balance = newbalance\r\n\t\t\t\t\tcustomer = p[i]\r\n\t\t\t\t\tflag = true\r\n\t\t\t\t\tbreak\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\tif flag == true {\r\n\t\t\t\tresp.Status = \"success\"\r\n\t\t\t\tresp.Message = \"new balance updated\"\r\n\t\t\t\tresp.Data = append(resp.Data, customer)\r\n\t\t\t\tc.JSON(http.StatusOK, resp)\r\n\t\t\t} else {\r\n\t\t\t\tresp.Status = \"error\"\r\n\t\t\t\tresp.Message = \"Customer does not exist\"\r\n\t\t\t\tc.JSON(http.StatusBadRequest, resp)\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n}", "func (c *RPC) Balance() (*cli.Balance, error) {\n\tbal, err := cli.CheckWalletBalance(c.rpcClient, c.walletFile)\n\tif err != nil {\n\t\treturn nil, RPCError{err}\n\t}\n\n\treturn &bal.Spendable, nil\n}", "func isBalanceSufficient(payer common.Address, cache *storage.CacheDB, config *smartcontract.Config, store store.LedgerStore, gas uint64) (uint64, error) {\n\tbalance, err := getBalanceFromNative(config, cache, store, payer)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif balance < gas {\n\t\treturn 0, fmt.Errorf(\"payer gas insufficient, need %d , only have %d\", gas, balance)\n\t}\n\treturn balance, nil\n}", "func (w *Wallet) Balance() Shivcoin {\n\treturn w.balance\n}", "func (a *Account) Balance() (balance int64, ok bool) {\n\ta.mutex.Lock()\n\tdefer a.mutex.Unlock()\n\n\tif !a.open {\n\t\treturn 0, false\n\t}\n\n\treturn a.balance, true\n}", "func (_Token *TokenCaller) BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Token.contract.Call(opts, out, \"balanceOf\", account)\n\treturn *ret0, err\n}", "func (acc *Account) Balance() (balance int64, ok bool) {\n\n\tacc.mux.Lock()\n\tdefer acc.mux.Unlock()\n\tif acc.close {\n\t\treturn 0, false\n\t}\n\treturn acc.balance, true\n}", "func (a *Account) Deposit(amount int64) (newBalance int64, ok bool) {\n\ta.mutex.Lock() // Aquire the lock on the shared resource the balance\n\tdefer a.mutex.Unlock() // Release lock when surrouding function has executed\n\tif !a.open || a.balance+amount < 0 {\n\t\treturn 0, false // If the bank account is not open or even with the deposit the balance is still negativr\n\t}\n\ta.balance += amount\n\treturn a.balance, true\n}", "func (account *Account) Withdraw(amount int) error {\r\n\tif account.balance < amount {\r\n\t\t// return errors.New(\"Can't widthdraw amount is more than yout balance\")\r\n\t\treturn errNoMoney\r\n\t}\r\n\taccount.balance -= amount\r\n\treturn nil\r\n\t// nill is null or None\r\n\r\n}", "func (a *Account) Deposit (amount int) {\n\ta.balance += amount\n}", "func (this *MThrottle) Account(amount int64) {\n\tatomic.AddInt64(&this.used, amount)\n}", "func (_Weth *WethCallerSession) BalanceOf(account common.Address) (*big.Int, error) {\n\treturn _Weth.Contract.BalanceOf(&_Weth.CallOpts, account)\n}", "func GetAccountBalance(ee engine.Exchange) sknet.HandlerFunc {\n\treturn func(c *sknet.Context) error {\n\t\trlt := &pp.EmptyRes{}\n\t\tfor {\n\t\t\treq := pp.GetAccountBalanceReq{}\n\t\t\tif err := c.BindJSON(&req); err != nil {\n\t\t\t\tlogger.Error(err.Error())\n\t\t\t\trlt = pp.MakeErrResWithCode(pp.ErrCode_WrongRequest)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// validate pubkey\n\t\t\tpubkey := req.GetPubkey()\n\t\t\tif err := validatePubkey(pubkey); err != nil {\n\t\t\t\tlogger.Error(err.Error())\n\t\t\t\trlt = pp.MakeErrResWithCode(pp.ErrCode_WrongPubkey)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\ta, err := ee.GetAccount(pubkey)\n\t\t\tif err != nil {\n\t\t\t\trlt = pp.MakeErrResWithCode(pp.ErrCode_NotExits)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tbal := a.GetBalance(req.GetCoinType())\n\t\t\tbres := pp.GetAccountBalanceRes{\n\t\t\t\tResult: pp.MakeResultWithCode(pp.ErrCode_Success),\n\t\t\t\tBalance: &pp.Balance{Amount: pp.PtrUint64(bal)},\n\t\t\t}\n\t\t\treturn c.SendJSON(&bres)\n\t\t}\n\t\treturn c.Error(rlt)\n\t}\n}", "func (cs *CustStoreSqlite) BalanceAdd(\n customerId string, loadAmountCents int64,\n) error {\n // Update customers\n hasCustomer, err := cs.hasCustomer(customerId)\n if err != nil {\n return err\n }\n if !hasCustomer {\n // There is no cuseomer, need to create customer and account.\n err = cs.createCustomerAndAccount(customerId)\n if err != nil {\n return err\n }\n }\n\n // Update accounts\n err = cs.updateAccount(loadAmountCents, customerId)\n if err != nil {\n // Customer and account records will remain created.\n return err\n }\n\n return nil\n}", "func transferHelper(ctx contractapi.TransactionContextInterface, from string, to string, value int) error {\n\n\tif value < 0 { // transfer of 0 is allowed in ERC-20, so just validate against negative amounts\n\t\treturn fmt.Errorf(\"transfer amount cannot be negative\")\n\t}\n\n\tfromCurrentBalanceBytes, err := ctx.GetStub().GetState(from)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read client account %s from world state: %v\", from, err)\n\t}\n\n\tif fromCurrentBalanceBytes == nil {\n\t\treturn fmt.Errorf(\"client account %s has no balance\", from)\n\t}\n\n\tfromCurrentBalance, _ := strconv.Atoi(string(fromCurrentBalanceBytes)) // Error handling not needed since Itoa() was used when setting the account balance, guaranteeing it was an integer.\n\n\tif fromCurrentBalance < value {\n\t\treturn fmt.Errorf(\"client account %s has insufficient funds\", from)\n\t}\n\n\ttoCurrentBalanceBytes, err := ctx.GetStub().GetState(to)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read recipient account %s from world state: %v\", to, err)\n\t}\n\n\tvar toCurrentBalance int\n\t// If recipient current balance doesn't yet exist, we'll create it with a current balance of 0\n\tif toCurrentBalanceBytes == nil {\n\t\ttoCurrentBalance = 0\n\t} else {\n\t\ttoCurrentBalance, _ = strconv.Atoi(string(toCurrentBalanceBytes)) // Error handling not needed since Itoa() was used when setting the account balance, guaranteeing it was an integer.\n\t}\n\n\tfromUpdatedBalance := fromCurrentBalance - value\n\ttoUpdatedBalance := toCurrentBalance + value\n\n\terr = ctx.GetStub().PutState(from, []byte(strconv.Itoa(fromUpdatedBalance)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ctx.GetStub().PutState(to, []byte(strconv.Itoa(toUpdatedBalance)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"client %s balance updated from %d to %d\", from, fromCurrentBalance, fromUpdatedBalance)\n\tlog.Printf(\"recipient %s balance updated from %d to %d\", to, toCurrentBalance, toUpdatedBalance)\n\n\treturn nil\n}", "func (_XStaking *XStakingSession) BalanceOf(account common.Address) (*big.Int, error) {\n\treturn _XStaking.Contract.BalanceOf(&_XStaking.CallOpts, account)\n}", "func (this *FamilyAccount) pay() {\n\tfmt.Println(\"Amount of Expenditure: \")\n\tAmountEx:fmt.Scanln(&this.money)\n\tif this.money > this.balance {\n\t\tfmt.Println(\"Sorry! You don't have enough money!!!\\nPlease enter another amount: \")\n\t\tgoto AmountEx\n\t} else {\n\t\tthis.balance -= this.money\n\t\tfmt.Println(\"Note of Expenditure: \")\n\t\tfmt.Scanln(&this.note)\n\t\t//Splicing revenue into “details”\n\t\tthis.details += fmt.Sprintf(\"\\nExpenditure\\t\\t%v\\t\\t%v\\t%v\", this.balance, this.money, this.note)\n\t\tfmt.Println(\"Successfully added new details!\")\n\t\tthis.flag = true\n\t}\n}", "func (_Bep20 *Bep20CallerSession) BalanceOf(account common.Address) (*big.Int, error) {\n\treturn _Bep20.Contract.BalanceOf(&_Bep20.CallOpts, account)\n}", "func (_Weth *WethSession) BalanceOf(account common.Address) (*big.Int, error) {\n\treturn _Weth.Contract.BalanceOf(&_Weth.CallOpts, account)\n}", "func (am *AccountManager) CalculateBalance(account string, minconf int) (float64, error) {\n\ta, err := am.Account(account)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn a.CalculateBalance(minconf), nil\n}" ]
[ "0.7146157", "0.6948025", "0.68929577", "0.68679476", "0.683919", "0.6832782", "0.6798275", "0.67196774", "0.6715751", "0.6714826", "0.6670434", "0.66580254", "0.6650628", "0.65918165", "0.6572852", "0.65724254", "0.65653", "0.6563838", "0.6559949", "0.6548054", "0.6547226", "0.6536564", "0.6535962", "0.6516455", "0.6506305", "0.64714", "0.6470356", "0.6465895", "0.64596164", "0.6443374", "0.64414525", "0.64348626", "0.6428933", "0.6428734", "0.6428174", "0.6406996", "0.639598", "0.63930917", "0.63862157", "0.6384361", "0.6384178", "0.63672763", "0.6366703", "0.6365518", "0.63483423", "0.63483423", "0.63483423", "0.6346809", "0.6345415", "0.63349897", "0.63187087", "0.6308716", "0.6306133", "0.6306133", "0.6306133", "0.6306133", "0.6306133", "0.6306133", "0.6306133", "0.6306133", "0.6306133", "0.6306133", "0.63035744", "0.62969536", "0.6286237", "0.62795556", "0.6264555", "0.62369514", "0.62293863", "0.6228136", "0.62167966", "0.6214136", "0.62114114", "0.6209735", "0.6206453", "0.6204685", "0.6193283", "0.61927927", "0.61890686", "0.6183553", "0.61827767", "0.617952", "0.6174741", "0.6173381", "0.61721444", "0.61684716", "0.6166286", "0.6164272", "0.615834", "0.6152518", "0.6148337", "0.6141868", "0.6129655", "0.6129577", "0.6127905", "0.6120975", "0.6110214", "0.61063397", "0.61054313", "0.6101887", "0.6101275" ]
0.0
-1
btc send raw transaction
func (client *BtcClient)BtcSendRawTrans(serial string , tx []byte)(string,error ){ var redeemTx wire.MsgTx err := json.Unmarshal(tx,&redeemTx) if err != nil { return "",err } sendResult,err := client.rpcClient.SendRawTransaction(&redeemTx,false) //sendResult,err := btcClient.SendRawTransactionAsync(&redeemTx,false).Receive() if err != nil { return "",err } return sendResult.String(),nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func sendRawTx(eth *thereum.Thereum, msg *rpcMessage) (*rpcMessage, error) {\n\t// unmarshal into temp data structs (passed via json as a slice of a single hex string)\n\tvar hexTx []string\n\terr := json.Unmarshal(msg.Params, &hexTx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// ensure that some data was passed throught the rpc msg\n\tif len(hexTx) == 0 {\n\t\treturn nil, errors.New(\"no parameters provided for raw transaction\")\n\t}\n\t// unmarshal the hex bytes into a transaction\n\tvar tx types.Transaction\n\ttxBytes, err := hex.DecodeString(strings.Replace(hexTx[0], \"0x\", \"\", 1))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = rlp.DecodeBytes(txBytes, &tx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// add the transaction to thereum\n\terr = eth.AddTx(&tx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout := &rpcMessage{\n\t\tVersion: \"2.0\",\n\t\tID: 1,\n\t\tResult: tx.Hash().Hex(),\n\t}\n\n\treturn out, nil\n}", "func bitcoinSendRawTransaction(tx []byte, reply *string) error {\n\tglobalBitcoinData.Lock()\n\tdefer globalBitcoinData.Unlock()\n\n\tif !globalBitcoinData.initialised {\n\t\treturn fault.ErrNotInitialised\n\t}\n\n\t// need to be in hex for bitcoind\n\targuments := []interface{}{\n\t\thex.EncodeToString(tx),\n\t}\n\treturn bitcoinCall(\"sendrawtransaction\", arguments, reply)\n}", "func sendRawTransaction(_privateKey string, recipientAddress string, methodName string, value int64, argAmount string) {\n\t//connect to ropsten through infura\n\tec, err := ethclient.Dial(\"https://ropsten.infura.io/\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tchainID := big.NewInt(3) //Ropsten\n\n\t//private key of sender\n\t//TODO: hide key when actual system is implemented\n\tprivateKey, err := crypto.HexToECDSA(_privateKey)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t//get Public Key of sender\n\tpublicKey := privateKey.Public()\n\tpublicKey_ECDSA, valid := publicKey.(*ecdsa.PublicKey)\n\tif !valid {\n\t\tlog.Fatal(\"error casting public key to ECDSA\")\n\t}\n\n\t//get address of sender\n\tfromAddress := crypto.PubkeyToAddress(*publicKey_ECDSA)\n\n\t//get nonce of address\n\tnonce, err := ec.PendingNonceAt(context.Background(), fromAddress)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t//get recipient address\n\trecipient := common.HexToAddress(recipientAddress)\n\n\tamount := big.NewInt(value) // 0 ether\n\tgasLimit := uint64(2000000)\n\tgasPrice, err := ec.SuggestGasPrice(context.Background())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttransferFnSignature := []byte(methodName)\n\thash := sha3.NewLegacyKeccak256()\n\thash.Write(transferFnSignature)\n\tmethodID := hash.Sum(nil)[:4]\n\t//fmt.Println(hexutil.Encode(methodID)) // 0xa9059cbb\n\n\targumentAmount := new(big.Int)\n\targumentAmount.SetString(argAmount, 10) //\n\tpaddedAmount := common.LeftPadBytes(argumentAmount.Bytes(), 32)\n\t//fmt.Println(hexutil.Encode(paddedAmount)) // 0x00000000000000000000000000000000000000000000003635c9adc5dea00000\n\n\tvar data []byte\n\tdata = append(data, methodID...)\n\tdata = append(data, paddedAmount...)\n\t//data := []byte(\"0x5c22b6b60000000000000000000000000000000000000000000000000000000000000007\")\n\t// fmt.Printf(\"nonce: %i\\n\", nonce)\n\t// fmt.Printf(\"amount: %i\\n\", amount)\n\t// fmt.Printf(\"gasLimit: %s\\n\", gasLimit)\n\t// fmt.Printf(\"gasPrice: %s\\n\", gasPrice)\n\tfmt.Printf(\"data: %x\\n\", data)\n\n\t//create raw transaction\n\ttransaction := types.NewTransaction(nonce, recipient, amount, gasLimit, gasPrice, data)\n\n\t//sign transaction for ropsten network\n\tsigner := types.NewEIP155Signer(chainID)\n\tsignedTx, err := types.SignTx(transaction, signer, privateKey)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// var buff bytes.Buffer\n\t// signedTx.EncodeRLP(&buff)\n\t// fmt.Printf(\"0x%x\\n\", buff.Bytes())\n\n\t//fmt.Println(signedTx)\n\t//broadcast transaction\n\terr = ec.SendTransaction(context.Background(), signedTx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Printf(\"tx sent: %s\\n\", signedTx.Hash().Hex())\n\n\t// jsonData := fmt.Sprintf(` {\"jsonrpc\":\"2.0\", \"method\":\"eth_sendRawTransaction\", \"params\": [\"0x%x\"], \"id\":4}`, buff.Bytes())\n\t// //params := buff.String()\n\t// fmt.Printf(\"%s\\n\", jsonData)\n\t// response, err := http.Post(\"https://rinkeby.infura.io/gnNuNKvHFmjf9xkJ0StE\", \"application/json\", strings.NewReader(jsonData))\n\t// if err != nil {\n\n\t// \tfmt.Printf(\"Request to INFURA failed with an error: %s\\n\", err)\n\t// \tfmt.Println()\n\n\t// } else {\n\t// \tdata, _ := ioutil.ReadAll(response.Body)\n\n\t// \tfmt.Println(\"INFURA response:\")\n\t// \tfmt.Println(string(data))\n\t// }\n}", "func (a API) SendRawTransaction(cmd *btcjson.SendRawTransactionCmd) (e error) {\n\tRPCHandlers[\"sendrawtransaction\"].Call <-API{a.Ch, cmd, nil}\n\treturn\n}", "func (pgb *ChainDBRPC) SendRawTransaction(txhex string) (string, error) {\n\tmsg, err := txhelpers.MsgTxFromHex(txhex)\n\tif err != nil {\n\t\tlog.Errorf(\"SendRawTransaction failed: could not decode hex\")\n\t\treturn \"\", err\n\t}\n\thash, err := pgb.Client.SendRawTransaction(msg, true)\n\tif err != nil {\n\t\tlog.Errorf(\"SendRawTransaction failed: %v\", err)\n\t\treturn \"\", err\n\t}\n\treturn hash.String(), err\n}", "func (c BitcoinCoreChain) RawTx(cxt context.Context, from, to, amount, memo, asset string) (string, error) {\n if configure.ChainAssets[asset] != Bitcoin {\n return \"\", fmt.Errorf(\"Unsupport %s in bitcoincore\", asset)\n }\n amountF, err := strconv.ParseFloat(amount, 64)\n if err != nil {\n return \"\", err\n }\n txAmountSatoshi, err := btcutil.NewAmount(amountF)\n if err != nil {\n return \"\", err\n }\n\n fromPkScript, err := BitcoincoreAddressP2AS(from, c.Mode)\n if err != nil {\n return \"\", err\n }\n toPkScript, err := BitcoincoreAddressP2AS(to, c.Mode)\n if err != nil {\n return \"\", err\n }\n\n // query bitcoin chain info\n chaininfo, err := c.Client.GetBlockChainInfo()\n if err != nil {\n return \"\", err\n }\n // feeKB, err := c.Client.EstimateFee(int64(6))\n feeKB, err := c.Client.EstimateSmartFee(int64(6))\n if err != nil {\n return \"\", err\n }\n feeRate := mempool.SatoshiPerByte(feeKB.FeeRate)\n\n if feeKB.FeeRate <= 0 {\n feeRate = mempool.SatoshiPerByte(100)\n }\n\n var (\n selectedutxos, unselectedutxos []db.UTXO\n selectedCoins coinset.Coins\n )\n\n // Coin Select\n if strings.ToLower(configure.ChainsInfo[Bitcoin].Coin) == strings.ToLower(asset) {\n // select coins for BTC transfer\n if selectedutxos, unselectedutxos, selectedCoins, err = CoinSelect(int64(chaininfo.Headers), txAmountSatoshi, c.Wallet.Address.UTXOs); err != nil {\n return \"\", fmt.Errorf(\"Select UTXO for tx %s\", err)\n }\n }else {\n // select coins for Token transfer\n // 300: https://bitcoin.stackexchange.com/questions/1195/how-to-calculate-transaction-size-before-sending-legacy-non-segwit-p2pkh-p2sh\n inputAmount := feeRate.Fee(uint32(300))\n if selectedutxos, unselectedutxos, selectedCoins, err = CoinSelect(int64(chaininfo.Headers), inputAmount, c.Wallet.Address.UTXOs); err != nil {\n return \"\", fmt.Errorf(\"Select UTXO for tx %s\", err)\n }\n }\n\n var vinAmount int64\n for _, coin := range selectedCoins.Coins() {\n vinAmount += int64(coin.Value())\n }\n msgTx := coinset.NewMsgTxWithInputCoins(wire.TxVersion, selectedCoins)\n\n token := configure.ChainsInfo[Bitcoin].Tokens[strings.ToLower(asset)]\n if token != \"\" && strings.ToLower(asset) != strings.ToLower(configure.ChainsInfo[Bitcoin].Coin) {\n // OmniToken transfer\n b := txscript.NewScriptBuilder()\n b.AddOp(txscript.OP_RETURN)\n\n omniVersion := util.Int2byte(uint64(0), 2)\t// omnicore version\n txType := util.Int2byte(uint64(0), 2)\t// omnicore tx type: simple send\n propertyID := configure.ChainsInfo[Bitcoin].Tokens[asset]\n tokenPropertyid, err := strconv.Atoi(propertyID)\n if err != nil {\n return \"\", fmt.Errorf(\"tokenPropertyid to int %s\", err)\n }\n // tokenPropertyid := configure.Config.OmniToken[\"omni_first_token\"].(int)\n tokenIdentifier := util.Int2byte(uint64(tokenPropertyid), 4)\t// omni token identifier\n tokenAmount := util.Int2byte(uint64(txAmountSatoshi), 8)\t// omni token transfer amount\n\n b.AddData([]byte(\"omni\"))\t// transaction maker\n b.AddData(omniVersion)\n b.AddData(txType)\n b.AddData(tokenIdentifier)\n b.AddData(tokenAmount)\n pkScript, err := b.Script()\n if err != nil {\n return \"\", fmt.Errorf(\"Bitcoin Token pkScript %s\", err)\n }\n msgTx.AddTxOut(wire.NewTxOut(0, pkScript))\n txOutReference := wire.NewTxOut(0, toPkScript)\n msgTx.AddTxOut(txOutReference)\n }else {\n // BTC transfer\n txOutTo := wire.NewTxOut(int64(txAmountSatoshi), toPkScript)\n msgTx.AddTxOut(txOutTo)\n\n // recharge\n // 181, 34: https://bitcoin.stackexchange.com/questions/1195/how-to-calculate-transaction-size-before-sending-legacy-non-segwit-p2pkh-p2sh\n fee := feeRate.Fee(uint32(msgTx.SerializeSize() + 181 + 34))\n if (vinAmount - int64(txAmountSatoshi) - int64(fee)) > 0 {\n txOutReCharge := wire.NewTxOut((vinAmount-int64(txAmountSatoshi) - int64(fee)), fromPkScript)\n msgTx.AddTxOut(txOutReCharge)\n }else {\n selectedutxoForFee, _, selectedCoinsForFee, err := CoinSelect(int64(chaininfo.Headers), fee, unselectedutxos)\n if err != nil {\n return \"\", fmt.Errorf(\"Select UTXO for fee %s\", err)\n }\n for _, coin := range selectedCoinsForFee.Coins() {\n vinAmount += int64(coin.Value())\n }\n txOutReCharge := wire.NewTxOut((vinAmount - int64(txAmountSatoshi) - int64(fee)), fromPkScript)\n msgTx.AddTxOut(txOutReCharge)\n selectedutxos = append(selectedutxos, selectedutxoForFee...)\n }\n }\n\n buf := bytes.NewBuffer(make([]byte, 0, msgTx.SerializeSize()))\n msgTx.Serialize(buf)\n rawTxHex := hex.EncodeToString(buf.Bytes())\n c.Wallet.SelectedUTXO = selectedutxos\n return rawTxHex, nil\n}", "func sendTransaction(scid string, entry string, to string, amount int64, id string) {\n\t\n\twalletURL:= \"http://127.0.0.1:30309/json_rpc\"\n\tvar amountString string\t\n\tif amount == 0 {\n\t\tamountString = \"\"\n\t} else {\t\n\t\tamountString = strconv.FormatInt(amount, 10)\n\t}\n\tdata:= PayloadGeneral{\n\t\tJsonrpc: \"2.0\", \n\t\tID: \"0\",\n\t\tMethod: \"transfer_split\",\n\t\tParams: Params2{\n\t\t\tMixin: 5,\n\t\t\tGetTxKey: true,\n\t\t\tScTx: ScTx2{\n\t\t\t\tEntrypoint: entry,\n\t\t\t\tScid: scid,\n\t\t\t\tValue: 0,\n\t\t\t\tParams: Params3{\n\t\t\t\t\t\tTo: to,\n\t\t\t\t\t\tAmount: amountString,\n\t\t\t\t\t\tID: id,\n\t\t\t\t},\n\t\t\t}, \n\t\t},\n\t}\n\n\t\n\tpayloadBytes, err := json.Marshal(data)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tbody := bytes.NewReader(payloadBytes)\n\t\n\t_, err=rpcPost(body, walletURL)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\t\n\t//println(result)\t\n\tfmt.Println(\"Transaction sent to wallet!\")\n\t\n}", "func (w *rpcWallet) SendRawTransaction(ctx context.Context, tx *wire.MsgTx, allowHighFees bool) (*chainhash.Hash, error) {\n\thash, err := w.client().SendRawTransaction(ctx, tx, allowHighFees)\n\treturn hash, translateRPCCancelErr(err)\n}", "func (eth *EthClient) SendRawTx(hex string) (common.Hash, error) {\n\tresult := common.Hash{}\n\terr := eth.Call(&result, \"eth_sendRawTransaction\", hex)\n\treturn result, err\n}", "func (h *TransactionHandler) sendTransaction(userbank_id int, transaction_id int, totalAmount int) error {\r\n\tjsonW := &SendTransaction{\r\n\t\tUserBankID: userbank_id,\r\n\t\tTransactionID: transaction_id,\r\n\t\tAmount: totalAmount,\r\n\t}\r\n\r\n\tjsonR, _ := json.Marshal(jsonW)\r\n\tjsonStr := []byte(string(jsonR))\r\n\r\n\treq, _ := http.NewRequest(\"POST\", h.Config.BankAPIUrl+\"/transaction\", bytes.NewBuffer(jsonStr))\r\n\treq.Header.Set(\"Content-Type\", \"application/json\")\r\n\r\n\tclient := &http.Client{}\r\n\t_, err2 := client.Do(req)\r\n\r\n\tif err2 != nil {\r\n\t\treturn errors.New(\"Gagal menghubungkan ke server 2\")\r\n\t}\r\n\r\n\treturn nil\r\n}", "func (api *PublicEthereumAPI) SendRawTransaction(data hexutil.Bytes) (common.Hash, error) {\n\tapi.logger.Debug(\"eth_sendRawTransaction\", \"data\", data)\n\ttx := new(evmtypes.MsgEthereumTx)\n\n\t// RLP decode raw transaction bytes\n\tif err := rlp.DecodeBytes(data, tx); err != nil {\n\t\t// Return nil is for when gasLimit overflows uint64\n\t\treturn common.Hash{}, nil\n\t}\n\n\t// Encode transaction by default Tx encoder\n\ttxEncoder := authclient.GetTxEncoder(api.clientCtx.Codec)\n\ttxBytes, err := txEncoder(tx)\n\tif err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\n\t// TODO: Possibly log the contract creation address (if recipient address is nil) or tx data\n\t// If error is encountered on the node, the broadcast will not return an error\n\tres, err := api.clientCtx.BroadcastTx(txBytes)\n\tif err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\n\tif res.Code != abci.CodeTypeOK {\n\t\treturn common.Hash{}, fmt.Errorf(res.RawLog)\n\t}\n\t// Return transaction hash\n\treturn common.HexToHash(res.TxHash), nil\n}", "func SendRawTx(rawTx []byte) (common.Hash, error) {\n\thash := new(common.Hash)\n\terr := ClientCall(\"ft_sendRawTransaction\", hash, hexutil.Bytes(rawTx))\n\treturn *hash, err\n}", "func broadcastTransaction(tx transaction) error {\n\tjsonTx, _ := json.Marshal(&tx)\n\tresp, err := http.Post(fmt.Sprintf(\"%s/transaction/send\", proxyHost), \"\",\n\t\tstrings.NewReader(string(jsonTx)))\n\tif err != nil {\n\t\tlog.Println(errSendingTx)\n\t\treturn err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tdefer func() {\n\t\t_ = resp.Body.Close()\n\t}()\n\tif err != nil {\n\t\tlog.Println(errSendingTx)\n\t\treturn err\n\t}\n\tres := string(body)\n\tfmt.Printf(\"Result: %s\\n\\r\", res)\n\treturn nil\n}", "func (c Client) SendTransaction(t txn.Transaction) (string, error) {\n\tvar result string\n\terr := c.Call(&result, \"eth_sendRawTransaction\", \"0x\"+hex.EncodeToString(t.Encode()))\n\treturn result, err\n}", "func handleWalletSendRawTransaction(s *rpcServer, cmd btcjson.Cmd, wallet walletChan) error {\n\tresult, err := handleSendRawTransaction(s, cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// The result is already guaranteed to be a valid hash string if no\n\t// error was returned above, so it's safe to ignore the error here.\n\ttxSha, _ := btcwire.NewShaHashFromStr(result.(string))\n\n\t// Request to be notified when the transaction is mined.\n\ts.ws.AddMinedTxRequest(wallet, txSha)\n\treturn nil\n}", "func handleSendRawTransaction(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {\n\tc := cmd.(*rpcmodel.SendRawTransactionCmd)\n\t// Deserialize and send off to tx relay\n\thexStr := c.HexTx\n\tif len(hexStr)%2 != 0 {\n\t\thexStr = \"0\" + hexStr\n\t}\n\tserializedTx, err := hex.DecodeString(hexStr)\n\tif err != nil {\n\t\treturn nil, rpcDecodeHexError(hexStr)\n\t}\n\tvar msgTx wire.MsgTx\n\terr = msgTx.Deserialize(bytes.NewReader(serializedTx))\n\tif err != nil {\n\t\treturn nil, &rpcmodel.RPCError{\n\t\t\tCode: rpcmodel.ErrRPCDeserialization,\n\t\t\tMessage: \"TX decode failed: \" + err.Error(),\n\t\t}\n\t}\n\n\t// Use 0 for the tag to represent local node.\n\ttx := util.NewTx(&msgTx)\n\tacceptedTxs, err := s.cfg.TxMemPool.ProcessTransaction(tx, false, 0)\n\tif err != nil {\n\t\t// When the error is a rule error, it means the transaction was\n\t\t// simply rejected as opposed to something actually going wrong,\n\t\t// so log it as such. Otherwise, something really did go wrong,\n\t\t// so log it as an actual error. In both cases, a JSON-RPC\n\t\t// error is returned to the client with the deserialization\n\t\t// error code\n\t\tif errors.As(err, &mempool.RuleError{}) {\n\t\t\tlog.Debugf(\"Rejected transaction %s: %s\", tx.ID(),\n\t\t\t\terr)\n\t\t} else {\n\t\t\tlog.Errorf(\"Failed to process transaction %s: %s\",\n\t\t\t\ttx.ID(), err)\n\t\t}\n\t\treturn nil, &rpcmodel.RPCError{\n\t\t\tCode: rpcmodel.ErrRPCVerify,\n\t\t\tMessage: \"TX rejected: \" + err.Error(),\n\t\t}\n\t}\n\n\t// When the transaction was accepted it should be the first item in the\n\t// returned array of accepted transactions. The only way this will not\n\t// be true is if the API for ProcessTransaction changes and this code is\n\t// not properly updated, but ensure the condition holds as a safeguard.\n\t//\n\t// Also, since an error is being returned to the caller, ensure the\n\t// transaction is removed from the memory pool.\n\tif len(acceptedTxs) == 0 || !acceptedTxs[0].Tx.ID().IsEqual(tx.ID()) {\n\t\terr := s.cfg.TxMemPool.RemoveTransaction(tx, true, true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terrStr := fmt.Sprintf(\"transaction %s is not in accepted list\",\n\t\t\ttx.ID())\n\t\treturn nil, internalRPCError(errStr, \"\")\n\t}\n\n\t// Generate and relay inventory vectors for all newly accepted\n\t// transactions into the memory pool due to the original being\n\t// accepted.\n\ts.cfg.ConnMgr.RelayTransactions(acceptedTxs)\n\n\t// Notify both websocket and getBlockTemplate long poll clients of all\n\t// newly accepted transactions.\n\ts.NotifyNewTransactions(acceptedTxs)\n\n\t// Keep track of all the sendRawTransaction request txns so that they\n\t// can be rebroadcast if they don't make their way into a block.\n\ttxD := acceptedTxs[0]\n\tiv := wire.NewInvVect(wire.InvTypeTx, (*daghash.Hash)(txD.Tx.ID()))\n\ts.cfg.ConnMgr.AddRebroadcastInventory(iv, txD)\n\n\treturn tx.ID().String(), nil\n}", "func (ec *Client) SendTransaction(ctx context.Context, tx *types.Transaction) error {\n\tdata, err := rlp.EncodeToBytes(tx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ec.c.CallContext(ctx, nil, \"siot_sendRawTransaction\", helper.ToHex(data))\n}", "func (cli *CommandLine) send(from, to string, amount int) {\n\tchain := blockchain.ContinueBlockChain(from)\n\tdefer chain.Database.Close()\n\n\ttx := blockchain.NewTransaction(from, to, amount, chain)\n\tchain.AddBlock([]*blockchain.Transaction{tx})\n\tfmt.Println(\"Success send token\")\n}", "func (client *Client) CommitTx(data string) (string, error) {\n\n\tresponse := &rawTxResp{}\n\terrorResp := &errorResponse{}\n\n\tresp, err := client.client.Post(\"/eth/sendRawTransaction\").BodyJSON(&rawTx{\n\t\tData: data,\n\t}).Receive(response, errorResp)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif code := resp.StatusCode; 200 < code || code > 299 {\n\t\treturn \"\", fmt.Errorf(\"(%s) %s\", resp.Status, errorResp.Message)\n\t}\n\n\tjsontext, _ := json.Marshal(response)\n\n\tclient.DebugF(\"response(%d) :%s\", resp.StatusCode, string(jsontext))\n\n\treturn response.TxHash, nil\n}", "func (c *client) PostTx(hexTx []byte, param map[string]string) ([]TxCommitResult, error) {\n\tif len(hexTx) == 0 {\n\t\treturn nil, fmt.Errorf(\"Invalid tx %s\", hexTx)\n\t}\n\n\tbody := hexTx\n\tresp, err := c.Post(\"/broadcast\", body, param)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttxResult := make([]TxCommitResult, 0)\n\tif err := json.Unmarshal(resp, &txResult); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn txResult, nil\n}", "func (c *Client) SendRawTransactionT(txHex string, allowHighFees bool) (string, error) {\n\treturn c.SendRawTransactionAsyncT(txHex, allowHighFees).ReceiveT()\n}", "func SendTransaction(from, to common.Address,\n\tprivKey *ecdsa.PrivateKey,\n\tethclient ComponentConfig,\n\tdata []byte) ([]byte, error) {\n\n\t// Set the parameters of the transaction\n\tvalue := big.NewInt(0)\n\tgasLimit := uint64(400000)\n\tgasPrice := big.NewInt(0)\n\n\tnonce, err := ethclient.EthereumClient.PendingNonceAt(context.Background(), from)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Create the transaction\n\ttx := types.NewTransaction(nonce, to, value, gasLimit, gasPrice, data)\n\n\t// Sign the transaction with the private key of the sender\n\tchainID, err := ethclient.EthereumClient.NetworkID(context.Background())\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\tsignedTx, err := types.SignTx(tx, types.NewEIP155Signer(chainID), privKey)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\t// Send the transaction\n\terr = ethclient.EthereumClient.SendTransaction(context.Background(), signedTx)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\treturn signedTx.Hash().Bytes(), nil\n}", "func (api *PublicEthereumAPI) SendTransaction(args rpctypes.SendTxArgs) (common.Hash, error) {\n\tapi.logger.Debug(\"eth_sendTransaction\", \"args\", args)\n\t// TODO: Change this functionality to find an unlocked account by address\n\n\tkey, exist := rpctypes.GetKeyByAddress(api.keys, args.From)\n\tif !exist {\n\t\tapi.logger.Debug(\"failed to find key in keyring\", \"key\", args.From)\n\t\treturn common.Hash{}, keystore.ErrLocked\n\t}\n\n\t// Mutex lock the address' nonce to avoid assigning it to multiple requests\n\tif args.Nonce == nil {\n\t\tapi.nonceLock.LockAddr(args.From)\n\t\tdefer api.nonceLock.UnlockAddr(args.From)\n\t}\n\n\t// Assemble transaction from fields\n\ttx, err := api.generateFromArgs(args)\n\tif err != nil {\n\t\tapi.logger.Debug(\"failed to generate tx\", \"error\", err)\n\t\treturn common.Hash{}, err\n\t}\n\n\tif err := tx.ValidateBasic(); err != nil {\n\t\tapi.logger.Debug(\"tx failed basic validation\", \"error\", err)\n\t\treturn common.Hash{}, err\n\t}\n\n\t// Sign transaction\n\tif err := tx.Sign(api.chainIDEpoch, key.ToECDSA()); err != nil {\n\t\tapi.logger.Debug(\"failed to sign tx\", \"error\", err)\n\t\treturn common.Hash{}, err\n\t}\n\n\t// Encode transaction by default Tx encoder\n\ttxEncoder := authclient.GetTxEncoder(api.clientCtx.Codec)\n\ttxBytes, err := txEncoder(tx)\n\tif err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\n\t// Broadcast transaction in sync mode (default)\n\t// NOTE: If error is encountered on the node, the broadcast will not return an error\n\tres, err := api.clientCtx.BroadcastTx(txBytes)\n\tif err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\n\tif res.Code != abci.CodeTypeOK {\n\t\treturn common.Hash{}, fmt.Errorf(res.RawLog)\n\t}\n\t// Return transaction hash\n\treturn common.HexToHash(res.TxHash), nil\n}", "func (c *Client) SendTransaction(tx *Transaction) (*HashResponse, error) {\n\trequest := c.newRequest(EthTransaction)\n\n\trequest.Params = []interface{}{\n\t\ttx,\n\t}\n\n\tresponse := &HashResponse{}\n\n\treturn response, c.send(request, response)\n}", "func (ec *Client) SendTransaction(ctx context.Context, tx *types.Transaction) error {\n\tdata, err := rlp.EncodeToBytes(tx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ec.c.CallContext(ctx, nil, \"eth_sendRawTransaction\", common.ToHex(data))\n}", "func (mc *MoacChain) sendTransaction(fromAddr, contractAddr, toAddr, tradePassword string, gasTotal, value float64, precision int) (tradeHash string, err error) {\n\n\tdefer func() {\n\t\tif re := recover(); re != nil {\n\t\t\terr = re.(error)\n\t\t}\n\t}()\n\n\tif precision > 4 {\n\t\treturn \"\", errors.New(\"\")\n\t}\n\n\thexStr := hex.EncodeToString(new(big.Int).Mul(big.NewInt(int64(value*10000)), big.NewInt(int64(math.Pow10(precision-4)))).Bytes())\n\n\tvar dict map[string]interface{}\n\tif contractAddr == \"\" {\n\n\t\tdict = map[string]interface{}{\n\t\t\t\"from\": fromAddr,\n\t\t\t\"to\": toAddr,\n\t\t\t\"gas\": \"0x\" + strconv.FormatInt(int64(gasTotal*50000000), 16),\n\t\t\t\"gasPrice\": \"0x4a817c800\", //20000000000\n\t\t\t\"value\": \"0x\" + hexStr,\n\t\t\t\"data\": \"0x\",\n\t\t}\n\t} else {\n\t\tplaceholderStr := \"0000000000000000000000000000000000000000000000000000000000000000\"\n\t\tnumberStr := hexStr\n\n\t\tdict = map[string]interface{}{\n\t\t\t\"from\": fromAddr,\n\t\t\t\"to\": contractAddr,\n\t\t\t\"gas\": \"0x\" + strconv.FormatInt(int64(gasTotal*50000000), 16),\n\t\t\t\"gasPrice\": \"0x4a817c800\", //20000000000\n\t\t\t\"value\": \"0x0\",\n\t\t\t\"data\": \"0xa9059cbb\" + placeholderStr[:(64-len(toAddr[2:len(toAddr)]))] + toAddr[2:len(toAddr)] + placeholderStr[:(64-len(numberStr))] + numberStr,\n\t\t}\n\t}\n\n\terr = rpcClient.Call(&tradeHash, \"personal_sendTransaction\", dict, tradePassword)\n\n\treturn tradeHash, err\n}", "func (h *Handler) SendTransaction(\n route string,\n transaction []byte,\n) (*entityApi.TransactionStatus, error) {\n\n apiResponse, err := h.apiClient.Post(route, nil, transaction)\n if err != nil {\n return nil, err\n }\n var transactionStatus entityApi.TransactionStatus\n if err := unmarshalApiResponse(apiResponse, &transactionStatus); err != nil {\n return nil, err\n }\n return &transactionStatus, nil\n}", "func (_BtlCoin *BtlCoinRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {\n\treturn _BtlCoin.Contract.BtlCoinTransactor.contract.Transact(opts, method, params...)\n}", "func sendRegisterTx(cdc *wire.Codec) client.CommandTxCallback {\n\treturn func(cmd *cobra.Command, args []string) error {\n\t\tctx := client.NewCoreContextFromViper()\n\t\tname := viper.GetString(client.FlagUser)\n\t\treferrer := viper.GetString(client.FlagReferrer)\n\t\tamount := viper.GetString(client.FlagAmount)\n\n\t\tresetPriv := secp256k1.GenPrivKey()\n\t\ttransactionPriv := secp256k1.GenPrivKey()\n\t\tappPriv := secp256k1.GenPrivKey()\n\n\t\tfmt.Println(\"reset private key is:\", strings.ToUpper(hex.EncodeToString(resetPriv.Bytes())))\n\t\tfmt.Println(\"transaction private key is:\", strings.ToUpper(hex.EncodeToString(transactionPriv.Bytes())))\n\t\tfmt.Println(\"app private key is:\", strings.ToUpper(hex.EncodeToString(appPriv.Bytes())))\n\n\t\t// // create the message\n\t\tmsg := acc.NewRegisterMsg(\n\t\t\treferrer, name, types.LNO(amount),\n\t\t\tresetPriv.PubKey(), transactionPriv.PubKey(), appPriv.PubKey())\n\n\t\t// build and sign the transaction, then broadcast to Tendermint\n\t\tres, err := ctx.SignBuildBroadcast([]sdk.Msg{msg}, cdc)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Printf(\"Committed at block %d. Hash: %s\\n\", res.Height, res.Hash.String())\n\t\treturn nil\n\t}\n}", "func (c *CoinTest) sendTransfer(stub shim.ChaincodeStubInterface, args []string) pb.Response {\r\n\tvar A, B string // Entities\r\n\tvar Aval, Bval int // Asset holdings\r\n\tvar X int // Transaction value\r\n\tvar err error\r\n\r\n\tif len(args) != 3 {\r\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 3\")\r\n\t}\r\n\r\n\tA = args[0]\r\n\tB = args[1]\r\n\r\n\t// Get the state from the ledger\r\n\t// TODO: will be nice to have a GetAllState call to ledger\r\n\tAvalbytes, err := stub.GetState(A)\r\n\tif err != nil {\r\n\t\treturn shim.Error(\"Failed to get state\")\r\n\t}\r\n\tif Avalbytes == nil {\r\n\t\treturn shim.Error(\"Entity not found\")\r\n\t}\r\n\tAval, _ = strconv.Atoi(string(Avalbytes))\r\n\r\n\tBvalbytes, err := stub.GetState(B)\r\n\tif err != nil {\r\n\t\treturn shim.Error(\"Failed to get state\")\r\n\t}\r\n\tif Bvalbytes == nil {\r\n\t\treturn shim.Error(\"Entity not found\")\r\n\t}\r\n\tBval, _ = strconv.Atoi(string(Bvalbytes))\r\n\r\n\t// Perform the execution\r\n\tX, err = strconv.Atoi(args[2])\r\n\tif err != nil {\r\n\t\treturn shim.Error(\"Invalid transaction amount, expecting a integer value\")\r\n\t}\r\n\tAval = Aval - X\r\n\tBval = Bval + X\r\n\tfmt.Printf(\"Aval = %d, Bval = %d\\n\", Aval, Bval)\r\n\r\n\t// Write the state back to the ledger\r\n\terr = stub.PutState(A, []byte(strconv.Itoa(Aval)))\r\n\tif err != nil {\r\n\t\treturn shim.Error(err.Error())\r\n\t}\r\n\r\n\terr = stub.PutState(B, []byte(strconv.Itoa(Bval)))\r\n\tif err != nil {\r\n\t\treturn shim.Error(err.Error())\r\n\t}\r\n\r\n\t//TODO: make a transfer to update token out-of-ledger balance\r\n\r\n\treturn shim.Success(nil)\r\n}", "func SignRawTransaction(cmd *SignRawTransactionCmd, chainCfg *chaincfg.Params) (*btcjson.SignRawTransactionResult, error) {\n\tserializedTx, err := DecodeHexStr(cmd.RawTx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar tx wire.MsgTx\n\terr = tx.Deserialize(bytes.NewBuffer(serializedTx))\n\tif err != nil {\n\t\te := errors.New(\"TX decode failed\")\n\t\treturn nil, e\n\t}\n\n\tvar hashType txscript.SigHashType\n\tswitch *cmd.Flags {\n\tcase \"ALL\":\n\t\thashType = txscript.SigHashAll\n\tcase \"NONE\":\n\t\thashType = txscript.SigHashNone\n\tcase \"SINGLE\":\n\t\thashType = txscript.SigHashSingle\n\tcase \"ALL|ANYONECANPAY\":\n\t\thashType = txscript.SigHashAll | txscript.SigHashAnyOneCanPay\n\tcase \"NONE|ANYONECANPAY\":\n\t\thashType = txscript.SigHashNone | txscript.SigHashAnyOneCanPay\n\tcase \"SINGLE|ANYONECANPAY\":\n\t\thashType = txscript.SigHashSingle | txscript.SigHashAnyOneCanPay\n\tdefault:\n\t\te := errors.New(\"Invalid sighash parameter\")\n\t\treturn nil, e\n\t}\n\n\t// TODO: really we probably should look these up with btcd anyway to\n\t// make sure that they match the blockchain if present.\n\tinputs := make(map[wire.OutPoint][]byte)\n\tscripts := make(map[string][]byte)\n\tvar cmdInputs []RawTxInput\n\tif cmd.Inputs != nil {\n\t\tcmdInputs = *cmd.Inputs\n\t}\n\tfor _, rti := range cmdInputs {\n\t\tinputHash, err := chainhash.NewHashFromStr(rti.Txid)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tscript, err := DecodeHexStr(rti.ScriptPubKey)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// redeemScript is only actually used iff the user provided\n\t\t// private keys. In which case, it is used to get the scripts\n\t\t// for signing. If the user did not provide keys then we always\n\t\t// get scripts from the wallet.\n\t\t// Empty strings are ok for this one and hex.DecodeString will\n\t\t// DTRT.\n\t\tif cmd.PrivKeys != nil && len(*cmd.PrivKeys) != 0 {\n\t\t\tredeemScript, err := DecodeHexStr(rti.RedeemScript)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\taddr, err := btcutil.NewAddressScriptHash(redeemScript, chainCfg)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tscripts[addr.String()] = redeemScript\n\t\t}\n\t\tinputs[wire.OutPoint{\n\t\t\tHash: *inputHash,\n\t\t\tIndex: rti.Vout,\n\t\t}] = script\n\t}\n\n\t// Parse list of private keys, if present. If there are any keys here\n\t// they are the keys that we may use for signing. If empty we will\n\t// use any keys known to us already.\n\tvar keys map[string]*btcutil.WIF\n\tif cmd.PrivKeys != nil {\n\t\tkeys = make(map[string]*btcutil.WIF)\n\n\t\tfor _, key := range *cmd.PrivKeys {\n\t\t\twif, err := btcutil.DecodeWIF(key)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif !wif.IsForNet(chainCfg) {\n\t\t\t\ts := \"key network doesn't match wallet's\"\n\t\t\t\treturn nil, errors.New(s)\n\t\t\t}\n\n\t\t\taddr, err := btcutil.NewAddressPubKey(wif.SerializePubKey(), chainCfg)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tkeys[addr.EncodeAddress()] = wif\n\n\t\t\tbechAddr, err := btcutil.NewAddressWitnessPubKeyHash(btcutil.Hash160(wif.PrivKey.PubKey().SerializeCompressed()), chainCfg)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"new bech addr err\")\n\t\t\t}\n\t\t\tkeys[bechAddr.EncodeAddress()] = wif\n\n\t\t\tfor _, rti := range cmdInputs {\n\t\t\t\tredeemScript, err := DecodeHexStr(rti.RedeemScript)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\taddr, err := btcutil.NewAddressScriptHash(redeemScript, chainCfg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tkeys[addr.EncodeAddress()] = wif\n\t\t\t}\n\t\t}\n\n\t}\n\n\tgetScriptPubKey := func(txID string) (scriptPubKey []byte, err error) {\n\t\tfor i := range *cmd.Inputs {\n\t\t\tif (*cmd.Inputs)[i].Txid == txID {\n\t\t\t\treturn hex.DecodeString((*cmd.Inputs)[i].ScriptPubKey)\n\t\t\t}\n\t\t}\n\t\terr = errors.Errorf(\"not fund scriptPubKey: %s\", txID)\n\t\treturn\n\t}\n\tfor i := range tx.TxIn {\n\t\tvar scriptPubKey []byte\n\t\tscriptPubKey, err = getScriptPubKey(tx.TxIn[i].PreviousOutPoint.Hash.String())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tinputs[tx.TxIn[i].PreviousOutPoint] = scriptPubKey\n\t}\n\n\tfnFindTxInAmount := func(op wire.OutPoint) int64 {\n\t\tfor _, in := range *cmd.Inputs {\n\t\t\tif in.Txid == op.Hash.String() && in.Vout == op.Index {\n\t\t\t\treturn decimal.NewFromFloat(in.Amount).Shift(8).IntPart()\n\t\t\t}\n\t\t}\n\t\treturn -1\n\t}\n\n\t// All args collected. Now we can sign all the inputs that we can.\n\t// `complete' denotes that we successfully signed all outputs and that\n\t// all scripts will run to completion. This is returned as part of the\n\t// reply.\n\tsignErrs, err := signTransaction(&tx, hashType, inputs, keys, scripts, chainCfg, fnFindTxInAmount)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(signErrs) > 0 {\n\t\terrMsgs := []string{}\n\t\tfor _, e := range signErrs {\n\t\t\tif !strings.Contains(e.Error.Error(), \"not all signatures empty on failed checkmultisig\") { //忽略多重签名未完成的错误\n\t\t\t\terrMsgs = append(errMsgs, e.Error.Error())\n\t\t\t}\n\t\t}\n\t\tif len(errMsgs) > 0 {\n\t\t\treturn nil, errors.New(strings.Join(errMsgs, \",\"))\n\t\t}\n\t}\n\n\tvar buf bytes.Buffer\n\tbuf.Grow(tx.SerializeSize())\n\n\t// All returned errors (not OOM, which panics) encounted during\n\t// bytes.Buffer writes are unexpected.\n\tif err = tx.Serialize(&buf); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsignErrors := make([]btcjson.SignRawTransactionError, 0, len(signErrs))\n\tfor _, e := range signErrs {\n\t\tinput := tx.TxIn[e.InputIndex]\n\t\tsignErrors = append(signErrors, btcjson.SignRawTransactionError{\n\t\t\tTxID: input.PreviousOutPoint.Hash.String(),\n\t\t\tVout: input.PreviousOutPoint.Index,\n\t\t\tScriptSig: hex.EncodeToString(input.SignatureScript),\n\t\t\tSequence: input.Sequence,\n\t\t\tError: e.Error.Error(),\n\t\t})\n\t}\n\n\treturn &btcjson.SignRawTransactionResult{\n\t\tHex: hex.EncodeToString(buf.Bytes()),\n\t\tComplete: len(signErrors) == 0,\n\t\tErrors: signErrors,\n\t}, nil\n}", "func (tr *Transactor) SendTransaction(ctx context.Context, tx *Transaction) (string, error) {\n\tif len(tx.Signature()) == 0 {\n\t\treturn \"\", errors.New(\"transaction is missing signature\")\n\t}\n\tserialised, err := json.Marshal(tx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn tr.Client.Commit(ctx, serialised)\n}", "func (a API) CreateRawTransaction(cmd *btcjson.CreateRawTransactionCmd) (e error) {\n\tRPCHandlers[\"createrawtransaction\"].Call <-API{a.Ch, cmd, nil}\n\treturn\n}", "func (c *Jrpc) CreateRawTransaction(in *pty.ReqCreatePrivacyTx, result *interface{}) error {\n\treply, err := c.cli.CreateRawTransaction(context.Background(), in)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*result = hex.EncodeToString(types.Encode(reply))\n\treturn err\n}", "func (c *Client) SendTransaction(ctx context.Context, transaction *transactions.Transaction) (*TransactionSendResponse, error) {\n\treq := c.restClient.R().SetContext(ctx)\n\n\t//posts := make([]*transactions.Transaction, 0)\n\t//posts = append(posts, transaction)\n\n\t//resultByte,_ := transaction.Serialize()\n\tresult,err := json.Marshal(transaction)\n\t//fmt.Println(hex.EncodeToString(resultByte))\n\n\n\ttransactionResult := `{\"transaction\":` + string(result) + `}`\n\tfmt.Println(transactionResult)\n\treq.SetBody(transactionResult)\n\n\n\n\treq.SetResult(&TransactionSendResponse{})\n\treq.SetError(Error{})\n\treq.SetHeader(\"Content-Type\",\"application/json\")\n\tres, err := req.Put(\"api/transactions\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfmt.Println(string(res.Body()))\n\tif res.IsError() {\n\t\treturn res.Result().(*TransactionSendResponse), res.Error().(error)\n\t}\n\n\tif !res.Result().(*TransactionSendResponse).Success {\n\t\treturn nil, errors.New(\"SendTransaction not success\")\n\t}\n\treturn res.Result().(*TransactionSendResponse), nil\n}", "func SendTx(rpcURL string, fromSK string, toAddr string, value uint64, data []byte, gasPrice uint64, gasLimit uint64) (common.Hash, error) {\n\tclient, err := dial(rpcURL)\n\tif err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\tdefer client.Close()\n\n\tprivK, _, fromAddress, err := HexToAccount(fromSK)\n\tif err != nil {\n\t\tlogrus.Errorf(\"convert hex sk to ECDSA error: %v\", err)\n\t\treturn common.Hash{}, err\n\t}\n\tvar toAddress common.Address\n\tif toAddr != \"\" {\n\t\ttoAddress = common.HexToAddress(toAddr)\n\t}\n\t//nonce\n\tnonce, err := client.PendingNonceAt(context.Background(), fromAddress)\n\tif err != nil {\n\t\treturn common.Hash{}, fmt.Errorf(\"PendingNonceAt() error: %s\", err.Error())\n\t}\n\n\t//gas price\n\tif gasPrice == 0 {\n\t\tsuggestGasPrice, err := client.SuggestGasPrice(context.Background())\n\t\tif err != nil {\n\t\t\treturn common.Hash{}, fmt.Errorf(\"SuggestGasPrice() error: %s\", err.Error())\n\t\t}\n\t\tgasPrice = suggestGasPrice.Uint64()\n\t}\n\tprice := new(big.Int)\n\tprice.SetUint64(gasPrice)\n\n\t// value\n\tval := new(big.Int)\n\tval.SetUint64(value)\n\n\tvar tx *types.Transaction\n\tif toAddr != \"\" {\n\t\ttx = types.NewTransaction(nonce, toAddress, val, gasLimit, price, data)\n\t} else {\n\t\ttx = types.NewContractCreation(nonce, val, gasLimit, price, data)\n\t}\n\tsignedTx, err := types.SignTx(tx, types.HomesteadSigner{}, privK)\n\treturn signedTx.Hash(), client.SendTransaction(context.Background(), signedTx)\n}", "func (c *GethClient) SendTransaction(ctx context.Context,\n\tfrom, to common.Address, amount *big.Int) (result *string, err error) {\n\tgas := uint64(34000)\n\n\tgasPrice, err := c.ethCli.SuggestGasPrice(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\targs := SendTxArgs{\n\t\tFrom: from.Hex(),\n\t\tTo: to.Hex(),\n\t\tGas: hexutil.EncodeUint64(gas),\n\t\tGasPrice: hexutil.EncodeBig(gasPrice),\n\t\tValue: hexutil.EncodeBig(amount),\n\t}\n\n\terr = c.rpcCli.CallContext(ctx, &result,\n\t\t\"eth_sendTransaction\", args)\n\treturn result, err\n}", "func testRawTransactionVerbose(msgTx *wire.MsgTx, txid, blockHash *chainhash.Hash,\n\tconfirmations int64) *btcjson.TxRawResult {\n\n\tvar hash string\n\tif blockHash != nil {\n\t\thash = blockHash.String()\n\t}\n\tw := bytes.NewBuffer(make([]byte, 0))\n\terr := msgTx.Serialize(w)\n\tif err != nil {\n\t\tfmt.Printf(\"error encoding MsgTx\\n\")\n\t}\n\thexTx := w.Bytes()\n\treturn &btcjson.TxRawResult{\n\t\tHex: hex.EncodeToString(hexTx),\n\t\tTxid: txid.String(),\n\t\tBlockHash: hash,\n\t\tConfirmations: uint64(confirmations),\n\t}\n\n}", "func (_BtlCoin *BtlCoinTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {\n\treturn _BtlCoin.Contract.contract.Transact(opts, method, params...)\n}", "func send(\n\tclient *http.Client,\n\tappservice config.ApplicationService,\n\ttxnID int,\n\ttransaction []byte,\n) (err error) {\n\t// PUT a transaction to our AS\n\t// https://matrix.org/docs/spec/application_service/r0.1.2#put-matrix-app-v1-transactions-txnid\n\taddress := fmt.Sprintf(\"%s/transactions/%d?access_token=%s\", appservice.URL, txnID, url.QueryEscape(appservice.HSToken))\n\treq, err := http.NewRequest(\"PUT\", address, bytes.NewBuffer(transaction))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer checkNamedErr(resp.Body.Close, &err)\n\n\t// Check the AS received the events correctly\n\tif resp.StatusCode != http.StatusOK {\n\t\t// TODO: Handle non-200 error codes from application services\n\t\treturn fmt.Errorf(\"non-OK status code %d returned from AS\", resp.StatusCode)\n\t}\n\n\treturn nil\n}", "func (_BtlCoin *BtlCoinRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BtlCoin.Contract.BtlCoinTransactor.contract.Transfer(opts)\n}", "func SendSmartContract() {\n\t// mainKeyStore := convertToKeystore(mainPrivateKey)\n\t// SendTransaction(mainKeyStore, nil, , data string, nonce uint64)\n}", "func (w *rpcWallet) SignRawTransaction(ctx context.Context, inTx *wire.MsgTx) (*wire.MsgTx, error) {\n\tbaseTx := inTx.Copy()\n\ttxHex, err := msgTxToHex(baseTx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to encode MsgTx: %w\", err)\n\t}\n\tvar res walletjson.SignRawTransactionResult\n\terr = w.rpcClientRawRequest(ctx, methodSignRawTransaction, anylist{txHex}, &res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor i := range res.Errors {\n\t\tsigErr := &res.Errors[i]\n\t\treturn nil, fmt.Errorf(\"signing %v:%d, seq = %d, sigScript = %v, failed: %v (is wallet locked?)\",\n\t\t\tsigErr.TxID, sigErr.Vout, sigErr.Sequence, sigErr.ScriptSig, sigErr.Error)\n\t\t// Will be incomplete below, so log each SignRawTransactionError and move on.\n\t}\n\n\tif !res.Complete {\n\t\tbaseTxB, _ := baseTx.Bytes()\n\t\tw.log.Errorf(\"Incomplete raw transaction signatures (input tx: %x / incomplete signed tx: %s)\",\n\t\t\tbaseTxB, res.Hex)\n\t\treturn nil, fmt.Errorf(\"incomplete raw tx signatures (is wallet locked?)\")\n\t}\n\n\treturn msgTxFromHex(res.Hex)\n}", "func CoinbaseTX(to, data string) *Transaction {\n\tif data == \"\" {\n\t\tdata = fmt.Sprintf(\"Coin to %s\", to)\n\t}\n\ttxinput := TXInput{[]byte{}, -1, nil, []byte(data)}\n\ttxoutput := NewTXOutput(100, to)\n\ttransaction := Transaction{nil, []TXOutput{*txoutput}, []TXInput{txinput}}\n\ttransaction.SetID()\n\n\treturn &transaction\n}", "func CoinbaseTx(r, d string) *Transaction {\n\t// If the data is empty then assign data to default string\n\tif d == \"\" {\n\t\td = fmt.Sprintf(\"Coins to %s\", r)\n\t}\n\n\t// Create a transaction input and output with the given data and recepient\n\ttIn := TxInput{[]byte{}, -1, d}\n\ttOut := TxOutput{100, r}\n\n\t// Use the above to construct a new transaction\n\tt := Transaction{nil, []TxInput{tIn}, []TxOutput{tOut}}\n\n\t// Call the SetID method\n\tt.SetID()\n\n\t// Return the transaction\n\treturn &t\n}", "func (s *Socket) sendTradeTxSuccess(p *TxSuccessPayload) error {\n\tm := &Message{MessageType: TRADE_TX_SUCCESS, Payload: p}\n\n\tif err := s.connection.WriteJSON(&m); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func TestHttpBrToChain(t *testing.T) {\n\tfromStr := \"address1vpszt2jp2j8m5l3mutvqserzuu9uylmzydqaj9\"\n\ttoStr := \"address1eep59h9ez4thymept8nxl0padlrc6r78fsjmp3\"\n\tcoinstr := \"2qos\"\n\t//generate singed Tx\n\tchainid := \"capricorn-2000\"\n\tnonce := int64(1)\n\t//gas := NewBigInt(int64(0))\n\t//PrivKey output\n\tprivkey := \"sV5sRbwnR8DddL5e4UC1ntKPiOtGEaOFAqvePTfhJFI9GcC28zmPURSUI6C1oBlnk2ykBcAtIbYUazuCexWyqg==\"\n\n\tjasonpayload := LocalTxGen(fromStr, toStr, coinstr, chainid, privkey, nonce)\n\n\t//tbt := new(types.Tx)\n\t//err := Cdc.UnmarshalJSON(jasonpayload, tbt)\n\t//if err != nil {\n\t//\tfmt.Println(err)\n\t//}\n\t//\n\t//txBytes, err := Cdc.MarshalBinaryBare(jasonpayload)\n\t//if err != nil {\n\t//\tpanic(\"use cdc encode object fail\")\n\t//}\n\n\tclient := client.NewHTTP(\"tcp://192.168.1.183:26657\", \"/websocket\")\n\tresult, err := client.BroadcastTxCommit(jasonpayload)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tt.Log(result)\n}", "func CreateRawTransaction(amount_s string, fee_s string, from_ucs_s string, to_ucs_s string, refund_ucs_s string, spendingTxs_s string) (string, error) {\n\tamount, ok := scanAmount(amount_s)\n\tif !ok {\n\t\treturn \"\", errors.New(\"could not read amount from '\" + amount_s + \"'\")\n\t}\n\tfee, ok := scanAmount(fee_s)\n\tif !ok {\n\t\treturn \"\", errors.New(\"could not read fee from '\" + fee_s + \"'\")\n\t}\n\n\tvar spendingTx []SpendingTransaction\n\terr := json.Unmarshal([]byte(spendingTxs_s), &spendingTx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar from_ucs types.UnlockConditions\n\terr = json.Unmarshal([]byte(from_ucs_s), &from_ucs)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvar to_ucs types.UnlockConditions\n\terr = json.Unmarshal([]byte(to_ucs_s), &to_ucs)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvar refund_ucs *types.UnlockConditions = nil\n\tif len(refund_ucs_s) != 0 {\n\t\trefund_ucs = &types.UnlockConditions{}\n\t\terr = json.Unmarshal([]byte(refund_ucs_s), &refund_ucs)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\toutput := types.SiacoinOutput{\n\t\tValue: amount,\n\t\tUnlockHash: to_ucs.UnlockHash(),\n\t}\n\n\ttxnBuilder, err := startTransaction()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = txnBuilder.FundSiacoins(amount.Add(fee), spendingTx, from_ucs, refund_ucs)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttxnBuilder.AddMinerFee(fee)\n\ttxnBuilder.AddSiacoinOutput(output)\n\n\tresult, err := json.Marshal(txnBuilder)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(result), nil\n}", "func (w *xcWallet) SendTransaction(tx []byte) ([]byte, error) {\n\tbonder, ok := w.Wallet.(asset.Broadcaster)\n\tif !ok {\n\t\treturn nil, errors.New(\"wallet is not a Broadcaster\")\n\t}\n\treturn bonder.SendTransaction(tx)\n}", "func submitTx(r *http.Request, pb transactionPb.TransactionService) (proto.Message, error) {\n\t// Parse incoming JSON\n\tif err := r.ParseForm(); err != nil {\n\t\tfmt.Println(err)\n\t}\n\t// Marshall JSON request\n\tvar newTransaction Transaction\n\terr := json.Unmarshal([]byte(r.FormValue(\"transaction\")), &newTransaction)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar txProducts []*transactionPb.Product\n\tfor _, product := range newTransaction.Products {\n\t\ttxProducts = append(txProducts, &transactionPb.Product{\n\t\t\tProductName: product.ProductName,\n\t\t\tPrice: product.Price,\n\t\t})\n\t}\n\t// Create protobuf request\n\tpbRequest := transactionPb.Transaction{\n\t\tTotalPrice: newTransaction.TotalPrice,\n\t\tMerchantUuid: newTransaction.MerchantUUID,\n\t\tRecipientCryptoId: newTransaction.RecipientCryptoID,\n\t\tProducts: txProducts,\n\t}\n\t// Call RPC function and get protobuf response\n\tpbResponse, err := pb.SubmitTx(context.Background(), &transactionPb.SubmitTxReq{\n\t\tTransaction: &pbRequest,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfmt.Println(pbResponse)\n\treturn pbResponse, nil\n}", "func (c *Client) SendTransaction(ctx context.Context, tx types.Transaction) (string, error) {\n\trawTx, err := tx.Serialize()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to serialize tx, err: %v\", err)\n\t}\n\tres, err := c.RpcClient.SendTransactionWithConfig(\n\t\tctx,\n\t\tbase64.StdEncoding.EncodeToString(rawTx),\n\t\trpc.SendTransactionConfig{\n\t\t\tEncoding: rpc.SendTransactionConfigEncodingBase64,\n\t\t},\n\t)\n\terr = checkRpcResult(res.GeneralResponse, err)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn res.Result, nil\n}", "func (e Endpoints) SendBTCTx(ctx context.Context, signedTxHex string) (txID string, err error) {\n\trequest := SendBTCTxRequest{SignedTxHex: signedTxHex}\n\tresponse, err := e.SendBTCTxEndpoint(ctx, request)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn response.(SendBTCTxResponse).TxID, nil\n}", "func (s *Sudt) Send(client rpc.Client) (*types.Hash, error) {\n\treturn client.SendTransaction(context.Background(), s.tx)\n}", "func SendRaw(client ioctl.Client, cmd *cobra.Command, selp *iotextypes.Action) error {\n\tcli, err := client.APIServiceClient()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to connect to endpoint\")\n\t}\n\n\tctx := context.Background()\n\tif jwtMD, err := util.JwtAuth(); err == nil {\n\t\tctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)\n\t}\n\n\t_, err = cli.SendAction(ctx, &iotexapi.SendActionRequest{Action: selp})\n\tif err != nil {\n\t\treturn handleClientRequestError(err, \"SendAction\")\n\t}\n\n\tshash := hash.Hash256b(byteutil.Must(proto.Marshal(selp)))\n\ttxhash := hex.EncodeToString(shash[:])\n\tURL := \"https://\"\n\tendpoint := client.Config().Endpoint\n\texplorer := client.Config().Explorer\n\tswitch explorer {\n\tcase \"iotexscan\":\n\t\tif strings.Contains(endpoint, \"testnet\") {\n\t\t\tURL += \"testnet.\"\n\t\t}\n\t\tURL += \"iotexscan.io/action/\" + txhash\n\tcase \"iotxplorer\":\n\t\tURL = \"iotxplorer.io/actions/\" + txhash\n\tdefault:\n\t\tURL = explorer + txhash\n\t}\n\tcmd.Printf(\"Action has been sent to blockchain.\\nWait for several seconds and query this action by hash: %s\\n\", URL)\n\treturn nil\n}", "func (b *Backend) SendTransaction(ctx context.Context, tx sdk.Transaction) error {\n\terr := b.emulator.AddTransaction(tx)\n\tif err != nil {\n\t\tswitch t := err.(type) {\n\t\tcase *emulator.DuplicateTransactionError:\n\t\t\treturn status.Error(codes.InvalidArgument, err.Error())\n\t\tcase *types.FlowError:\n\t\t\t// TODO - confirm these\n\t\t\tswitch t.FlowError.(type) {\n\t\t\tcase *fvmerrors.AccountAuthorizationError,\n\t\t\t\t*fvmerrors.InvalidEnvelopeSignatureError,\n\t\t\t\t*fvmerrors.InvalidPayloadSignatureError,\n\t\t\t\t*fvmerrors.InvalidProposalSignatureError,\n\t\t\t\t*fvmerrors.AccountNotFoundError,\n\t\t\t\t*fvmerrors.AccountPublicKeyNotFoundError,\n\t\t\t\t*fvmerrors.InvalidProposalSeqNumberError,\n\t\t\t\t*fvmerrors.InvalidAddressError:\n\n\t\t\t\treturn status.Error(codes.InvalidArgument, err.Error())\n\t\t\tdefault:\n\t\t\t\treturn status.Error(codes.Internal, err.Error())\n\t\t\t}\n\t\tdefault:\n\t\t\treturn status.Error(codes.Internal, err.Error())\n\t\t}\n\t} else {\n\t\tb.logger.\n\t\t\tWithField(\"txID\", tx.ID().String()).\n\t\t\tDebug(\"️✉️ Transaction submitted\")\n\t}\n\n\tif b.automine {\n\t\tb.CommitBlock()\n\t}\n\n\treturn nil\n}", "func (n *nodeEOS) pushTransaction(contract, from, to, memo, symbol string, isMain bool, amount, fee string) (string, error) {\n\tapi := n.api\n\n\tapi.WalletUnlock(\"default\", n.coin.Password)\n\tdefer func() {\n\t\tapi.WalletLock(\"default\")\n\t}()\n\n\tquantity, err := eos.NewAsset(amount + \" \" + symbol)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\taction := eostoken.NewTransferCommon(eos.AN(contract), eos.AN(from), eos.AN(to), quantity, memo)\n\tpushed, err := api.SignPushActions(action)\n\tutils.Logger.Info(\"pushed action:%v, error:%v\", pushed, err)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn pushed.TransactionID, nil\n}", "func (_Aggregator *AggregatorTransactor) Transmit(opts *bind.TransactOpts, _report []byte, _rs [][32]byte, _ss [][32]byte, _rawVs [32]byte) (*types.Transaction, error) {\n\treturn _Aggregator.contract.Transact(opts, \"transmit\", _report, _rs, _ss, _rawVs)\n}", "func bitcoinGetRawTransaction(hash string, reply *bitcoinTransaction) error {\n\tglobalBitcoinData.Lock()\n\tdefer globalBitcoinData.Unlock()\n\n\tif !globalBitcoinData.initialised {\n\t\treturn fault.ErrNotInitialised\n\t}\n\n\targuments := []interface{}{\n\t\thash,\n\t\t1,\n\t}\n\treturn bitcoinCall(\"getrawtransaction\", arguments, reply)\n}", "func (t *SimpleChaincode) submitTransaction(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\n\tvar err error\n\tfmt.Println(\"Running submitTransaction\")\n\n\tif len(args) != 10 {\n\t\tfmt.Println(\"Incorrect number of arguments. Expecting 10 - MT103 format\")\n\t\treturn nil, errors.New(\"Incorrect number of arguments. Expecting 10 - MT103 format\")\n\t}\n\n\tvar tx Transaction\n\ttx.RefNumber = args[0]\n\ttx.OpCode = args[1]\n\ttx.VDate = args[2]\n\ttx.Currency = args[3]\n\ttx.Sender = args[5]\n\ttx.Receiver = args[6]\n\ttx.OrdCust = args[7]\n\ttx.BenefCust = args[8]\n\ttx.DetCharges = args[9]\n\ttx.StatusCode = 1\n\ttx.StatusMsg = \"Transaction Completed\"\n\n\tamountValue, err := strconv.ParseFloat(args[4], 64)\n\tif err != nil {\n\t\ttx.StatusCode = 0\n\t\ttx.StatusMsg = \"Invalid Amount\"\n\t} else {\n\t\ttx.Amount = amountValue\n\t}\n\n\t// Check Nostro Account\n\trfidBytes, err := stub.GetState(tx.Receiver)\n\tif err != nil {\n\t\treturn nil, errors.New(\"submitTransaction Failed to get Financial Institution\")\n\t}\n\tvar rfid FinancialInst\n\tfmt.Println(\"submitTransaction Unmarshalling Financial Institution\")\n\terr = json.Unmarshal(rfidBytes, &rfid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfound := false\n\tamountSent := 0.0\n\tfor i := range rfid.Accounts {\n\n\t\tif rfid.Accounts[i].Holder == tx.Sender {\n\t\t\tfmt.Println(\"submitTransaction Find Sender Nostro Account\")\n\t\t\tfound = true\n\t\t\tfxRate, err := getFXRate(tx.Currency, rfid.Accounts[i].Currency)\n\t\t\tfmt.Println(\"submitTransaction Get FX Rate \" + FloatToString(fxRate))\n\t\t\t//Transaction currency invalid\n\t\t\tif err != nil {\n\t\t\t\ttx.StatusCode = 0\n\t\t\t\ttx.StatusMsg = \"Invalid Currency\"\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tamountSent = tx.Amount * fxRate\n\t\t\tfmt.Println(\"submitTransaction Amount To Send \" + FloatToString(amountSent))\n\t\t\tif rfid.Accounts[i].CashBalance-amountSent < 0 {\n\t\t\t\ttx.StatusCode = 0\n\t\t\t\ttx.StatusMsg = \"Insufficient funds on Nostro Account\"\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif !found {\n\t\ttx.StatusCode = 0\n\t\ttx.StatusMsg = \"Nostro Account for \" + tx.Sender + \" doesn't exist in \" + tx.Receiver\n\t}\n\n\t//Check Vostro Account\n\tsfidBytes, err := stub.GetState(tx.Sender)\n\tif err != nil {\n\t\treturn nil, errors.New(\"submitTransaction Failed to get Financial Institution\")\n\t}\n\tvar sfid FinancialInst\n\tfmt.Println(\"submitTransaction Unmarshalling Financial Institution\")\n\terr = json.Unmarshal(sfidBytes, &sfid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfound = false\n\tfor i := range sfid.Accounts {\n\n\t\tif sfid.Accounts[i].Holder == tx.Receiver {\n\t\t\tfmt.Println(\"submitTransaction Find Vostro Account\")\n\t\t\tfound = true\n\n\t\t\tif sfid.Accounts[i].Currency != tx.Currency {\n\t\t\t\ttx.StatusCode = 0\n\t\t\t\ttx.StatusMsg = tx.Receiver + \" doesn't have an account in \" + tx.Currency + \" with \" + tx.Sender\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif !found {\n\t\ttx.StatusCode = 0\n\t\ttx.StatusMsg = \"Vostro Account for \" + tx.Receiver + \" doesn't exist in \" + tx.Sender\n\t}\n\n\tif tx.StatusCode == 1 {\n\t\t//Credit and debit Accounts\n\t\tfmt.Println(\"submitTransaction Credit Vostro Account\")\n\t\t_, err = t.creditVostroAccount(stub, tx.Sender, tx.Receiver, tx.Amount)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"submitTransaction Failed to Credit Vostro Account\")\n\t\t}\n\n\t\tfmt.Println(\"submitTransaction Debit Nostro Account\")\n\t\t_, err = t.debitNostroAccount(stub, tx.Sender, tx.Receiver, amountSent)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"submitTransaction Failed to Debit Nostro Account\")\n\t\t}\n\t}\n\n\t//get the AllTransactions index\n\tallTxAsBytes, err := stub.GetState(\"allTx\")\n\tif err != nil {\n\t\treturn nil, errors.New(\"submitTransaction Failed to get all Transactions\")\n\t}\n\n\t//Commit transaction to ledger\n\tfmt.Println(\"submitTransaction Commit Transaction To Ledger\")\n\tvar txs AllTransactions\n\tjson.Unmarshal(allTxAsBytes, &txs)\n\t// txs.Transactions = append(txs.Transactions, tx)\n\ttxs.Transactions = append([]Transaction{tx}, txs.Transactions...)\n\ttxsAsBytes, _ := json.Marshal(txs)\n\terr = stub.PutState(\"allTx\", txsAsBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nil, nil\n}", "func (as *AddrServer) HandleTransactionSend(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar tx TxPost\n\n\t// Read post body\n\tb, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tw.Write(NewPostError(\"unable to read post body\", err))\n\t\treturn\n\t}\n\n\t// Unmarshal\n\terr = json.Unmarshal(b, &tx)\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tw.Write(NewPostError(\"unable to unmarshall body\", err))\n\t\treturn\n\t}\n\n\t// Convert hex to string\n\tdec, err := hex.DecodeString(tx.Tx)\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tw.Write(NewPostError(\"unable to decode hex string\", err))\n\t\treturn\n\t}\n\n\t// Convert tansaction to send format\n\ttxn, err := btcutil.NewTxFromBytes(dec)\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tw.Write(NewPostError(\"unable to parse transaction\", err))\n\t\treturn\n\t}\n\n\tret, err := as.Client.SendRawTransaction(txn.MsgTx(), true)\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tw.Write(NewPostError(\"unable to post transaction to node\", err))\n\t\treturn\n\t}\n\n\tout, _ := json.Marshal(ret)\n\tw.Write(out)\n}", "func (wm *WalletManager) BroadcastTransaction(txHex string) (string, error) {\n\t//txBytes, err := hex.DecodeString(txHex)\n\t//if err != nil {\n\t//\treturn \"\", fmt.Errorf(\"transaction decode failed, unexpected error: %v\", err)\n\t//}\n\t//signedEncodedTx := aeternity.Encode(aeternity.PrefixTransaction, txBytes)\n\t// calculate the hash of the decoded txRLP\n\t//rlpTxHashRaw := owcrypt.Hash(txBytes, 32, owcrypt.HASH_ALG_BLAKE2B)\n\t//// base58/64 encode the hash with the th_ prefix\n\t//signedEncodedTxHash := aeternity.Encode(aeternity.PrefixTransactionHash, rlpTxHashRaw)\n\n\t// send it to the network\n\t//return postTransaction(wm.Api.Node, signedEncodedTx)\n\treturn \"\", nil\n}", "func CoinbaseTx(to, data string) *Transaction {\n\tif data == \"\" {\n\t\trandData := make([]byte, 24)\n\t\t_, err := rand.Read(randData)\n\t\t// Handle(err)\n\t\tfmt.Println(err)\n\t\tdata = fmt.Sprintf(\"%x\", randData)\n\t}\n\n\ttxin := TxInput{[]byte{}, -1, nil, []byte(data)}\n\ttxout := NewTxOutput(100, to)\n\ttx := Transaction{nil, []TxInput{txin}, []TxOutput{*txout}}\n\ttx.ID = tx.Hash()\n\n\treturn &tx\n}", "func (b backend) SendTransaction(ctx context.Context, tx *types.Transaction) error {\n\tdefer b.Commit()\n\treturn b.SimulatedBackend.SendTransaction(ctx, tx)\n}", "func (_m *CommMock) SendTransaction(targetID uint64, request []byte) {\n\t_m.Called(targetID, request)\n}", "func (c BitcoinCoreChain) SignedTx(rawTxHex, wif string, options *ChainsOptions) (string, error) {\n // https://www.experts-exchange.com/questions/29108851/How-to-correctly-create-and-sign-a-Bitcoin-raw-transaction-using-Btcutil-library.html\n tx, err := DecodeBtcTxHex(rawTxHex)\n if err != nil {\n return \"\", fmt.Errorf(\"Fail to decode raw tx %s\", err)\n }\n\n ecPriv, err := btcutil.DecodeWIF(wif)\n if err != nil {\n return \"\", fmt.Errorf(\"Fail to decode wif %s\", err)\n }\n fromAddress, _ := btcutil.DecodeAddress(options.From, c.Mode)\n subscript, _ := txscript.PayToAddrScript(fromAddress)\n for i, txIn := range tx.MsgTx().TxIn {\n if txIn.SignatureScript, err = txscript.SignatureScript(tx.MsgTx(), i, subscript, txscript.SigHashAll, ecPriv.PrivKey, true); err != nil{\n return \"\", fmt.Errorf(\"SignatureScript %s\", err)\n }\n }\n\n //Validate signature\n flags := txscript.StandardVerifyFlags\n vm, err := txscript.NewEngine(subscript, tx.MsgTx(), 0, flags, nil, nil, options.VinAmount)\n if err != nil {\n return \"\", fmt.Errorf(\"Txscript.NewEngine %s\", err)\n }\n if err := vm.Execute(); err != nil {\n return \"\", fmt.Errorf(\"Fail to sign tx %s\", err)\n }\n\n // txToHex\n buf := bytes.NewBuffer(make([]byte, 0, tx.MsgTx().SerializeSize()))\n tx.MsgTx().Serialize(buf)\n txHex := hex.EncodeToString(buf.Bytes())\n return txHex, nil\n}", "func (_Lelecoin *LelecoinRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {\n\treturn _Lelecoin.Contract.LelecoinTransactor.contract.Transact(opts, method, params...)\n}", "func (_BtlCoin *BtlCoinTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BtlCoin.Contract.contract.Transfer(opts)\n}", "func (a API) SendRawTransactionChk() (isNew bool) {\n\tselect {\n\tcase o := <-a.Ch.(chan SendRawTransactionRes):\n\t\tif o.Err != nil {\n\t\t\ta.Result = o.Err\n\t\t} else {\n\t\t\ta.Result = o.Res\n\t\t}\n\t\tisNew = true\n\tdefault:\n\t}\n\treturn\n}", "func (c *Client) SendRowTransaction(tx Data) (*HashResponse, error) {\n\trequest := c.newRequest(EthSendRawTransaction)\n\n\trequest.Params = []interface{}{\n\t\ttx,\n\t}\n\n\tresponse := &HashResponse{}\n\n\treturn response, c.send(request, response)\n}", "func (tx *EthereumTx) ToRawTransaction() (string, error) {\n\n\t// Decode Address from hex\n\t// TODO: should be specialized type on EthereumTx so i don't need to always do this\n\tto := tx.Recipient\n\tif strings.HasPrefix(to, \"0x\") || strings.HasPrefix(to, \"0X\") {\n\t\tto = to[2:]\n\t}\n\taddrBytes := make([]byte, 20)\n\tif _, err := hex.Decode(addrBytes, []byte(to)); err != nil {\n\t\treturn \"\", errors.New(\"malformed to address:\" + err.Error())\n\t}\n\n\t// Prepare tx data to be encoded. Fields must be in the following order\n\t// nonce, gasPrice, gasLimit, to, value, data, sig v, sig r, sig s\n\tbuf := new(bytes.Buffer)\n\tvar arr []interface{}\n\tarr = append(arr, tx.Nonce)\n\tarr = append(arr, tx.GasPrice)\n\tarr = append(arr, tx.GasLimit)\n\tarr = append(arr, addrBytes)\n\tarr = append(arr, tx.Amount)\n\tarr = append(arr, tx.Data)\n\tarr = append(arr, tx.V)\n\tarr = append(arr, tx.R)\n\tarr = append(arr, tx.S)\n\n\tif err := rlp.Encode(buf, arr); err != nil {\n\t\treturn \"\", errors.New(\"malformed tx:\" + err.Error())\n\t}\n\n\treturn \"0x\" + hex.EncodeToString(buf.Bytes()), nil\n}", "func genStdSendTx(cdc *amino.Codec, sendTx txs.ITx, priKey ed25519.PrivKeyEd25519, tochainid string, fromchainid string, nonce int64) *txs.TxStd {\n\tgas := qbasetypes.NewInt(int64(config.MaxGas))\n\tstx := txs.NewTxStd(sendTx, tochainid, gas)\n\tsignature, _ := stx.SignTx(priKey, nonce, fromchainid, tochainid)\n\tstx.Signature = []txs.Signature{txs.Signature{\n\t\tPubkey: priKey.PubKey(),\n\t\tSignature: signature,\n\t\tNonce: nonce,\n\t}}\n\n\treturn stx\n}", "func (_WandappETH *WandappETHRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {\n\treturn _WandappETH.Contract.WandappETHTransactor.contract.Transact(opts, method, params...)\n}", "func (t *TxAPI) Send(data map[string]interface{}) (*api.ResultHash, error) {\n\tout, statusCode, err := t.c.call(\"tx_sendPayload\", data)\n\tif err != nil {\n\t\treturn nil, makeReqErrFromCallErr(statusCode, err)\n\t}\n\n\tvar result api.ResultHash\n\tif err = util.DecodeMap(out, &result); err != nil {\n\t\treturn nil, errors.ReqErr(500, ErrCodeDecodeFailed, \"\", err.Error())\n\t}\n\n\treturn &result, nil\n}", "func (a API) SendRawTransactionGetRes() (out *None, e error) {\n\tout, _ = a.Result.(*None)\n\te, _ = a.Result.(error)\n\treturn \n}", "func (s *Socket) sendOrderTxSuccess(p *TxSuccessPayload) error {\n\tm := &Message{MessageType: ORDER_TX_SUCCESS, Payload: p}\n\n\tif err := s.connection.WriteJSON(&m); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (service *Stainless) ExecuteTransaction(\n\treq *proto.TransactionRequest) (network.Message, error) {\n\tabi, err := abi.JSON(strings.NewReader(req.Abi))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\targs, err := bevm.DecodeEvmArgs(req.Args, abi.Methods[req.Method].Inputs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcallData, err := abi.Pack(req.Method, args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttx := types.NewTransaction(req.Nonce,\n\t\tcommon.BytesToAddress(req.ContractAddress),\n\t\tbig.NewInt(int64(req.Amount)),\n\t\treq.GasLimit, big.NewInt(int64(req.GasPrice)), callData)\n\n\tsigner := types.HomesteadSigner{}\n\thashedTx := signer.Hash(tx)\n\n\tunsignedBuffer, err := tx.MarshalJSON()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Lvl4(\"Returning\", unsignedBuffer, hashedTx)\n\n\treturn &proto.TransactionHashResponse{Transaction: unsignedBuffer,\n\t\tTransactionHash: hashedTx[:]}, nil\n}", "func (_Lelecoin *LelecoinTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {\n\treturn _Lelecoin.Contract.contract.Transact(opts, method, params...)\n}", "func (_Ethdkg *EthdkgRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {\n\treturn _Ethdkg.Contract.EthdkgTransactor.contract.Transact(opts, method, params...)\n}", "func (pool *TxPool) processTransaction(tx *ethutil.Transaction) error {\n\t// Get the last block so we can retrieve the sender and receiver from\n\t// the merkle trie\n\tblock := pool.server.blockManager.bc.LastBlock\n\t// Something has gone horribly wrong if this happens\n\tif block == nil {\n\t\treturn errors.New(\"No last block on the block chain\")\n\t}\n\n\tvar sender, receiver *ethutil.Ether\n\n\t// Get the sender\n\tdata := block.State().Get(string(tx.Sender()))\n\t// If it doesn't exist create a new account. Of course trying to send funds\n\t// from this account will fail since it will hold 0 Wei\n\tif data == \"\" {\n\t\tsender = ethutil.NewEther(big.NewInt(0))\n\t} else {\n\t\tsender = ethutil.NewEtherFromData([]byte(data))\n\t}\n\t// Defer the update. Whatever happens it should be persisted\n\tdefer block.State().Update(string(tx.Sender()), string(sender.RlpEncode()))\n\n\t// Make sure there's enough in the sender's account. Having insufficient\n\t// funds won't invalidate this transaction but simple ignores it.\n\tif sender.Amount.Cmp(tx.Value) < 0 {\n\t\treturn errors.New(\"Insufficient amount in sender's account\")\n\t}\n\n\t// Subtract the amount from the senders account\n\tsender.Amount.Sub(sender.Amount, tx.Value)\n\t// Increment the nonce making each tx valid only once to prevent replay\n\t// attacks\n\tsender.Nonce += 1\n\n\t// Get the receiver\n\tdata = block.State().Get(tx.Recipient)\n\t// If the receiver doesn't exist yet, create a new account to which the\n\t// funds will be send.\n\tif data == \"\" {\n\t\treceiver = ethutil.NewEther(big.NewInt(0))\n\t} else {\n\t\treceiver = ethutil.NewEtherFromData([]byte(data))\n\t}\n\t// Defer the update\n\tdefer block.State().Update(tx.Recipient, string(receiver.RlpEncode()))\n\n\t// Add the amount to receivers account which should conclude this transaction\n\treceiver.Amount.Add(receiver.Amount, tx.Value)\n\n\treturn nil\n}", "func convertRawTransaction(rawTx string) (*bitcoin.Transaction, error) {\n\tt, err := decodeTransaction(rawTx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode a transaction: [%w]\", err)\n\t}\n\n\tresult := &bitcoin.Transaction{\n\t\tVersion: int32(t.Version),\n\t\tLocktime: t.LockTime,\n\t}\n\n\tfor _, vin := range t.TxIn {\n\t\tinput := &bitcoin.TransactionInput{\n\t\t\tOutpoint: &bitcoin.TransactionOutpoint{\n\t\t\t\tTransactionHash: bitcoin.Hash(vin.PreviousOutPoint.Hash),\n\t\t\t\tOutputIndex: vin.PreviousOutPoint.Index,\n\t\t\t},\n\t\t\tSignatureScript: vin.SignatureScript,\n\t\t\tWitness: vin.Witness,\n\t\t\tSequence: vin.Sequence,\n\t\t}\n\n\t\tresult.Inputs = append(result.Inputs, input)\n\t}\n\n\tfor _, vout := range t.TxOut {\n\t\toutput := &bitcoin.TransactionOutput{\n\t\t\tValue: vout.Value,\n\t\t\tPublicKeyScript: vout.PkScript,\n\t\t}\n\n\t\tresult.Outputs = append(result.Outputs, output)\n\t}\n\n\treturn result, nil\n}", "func (_WandappETH *WandappETHTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {\n\treturn _WandappETH.Contract.contract.Transact(opts, method, params...)\n}", "func PostTx(tx core.Transaction, fluff bool) error {\n\tclient := RPCHTTPClient{URL: url}\n\tparams := []interface{}{tx, fluff}\n\tparamsBytes, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\tenvl, err := client.Request(\"post_tx\", paramsBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif envl == nil {\n\t\treturn errors.New(\"OwnerAPI: Empty RPC Response from grin-wallet\")\n\t}\n\tif envl.Error != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"code\": envl.Error.Code,\n\t\t\t\"message\": envl.Error.Message,\n\t\t}).Error(\"OwnerAPI: RPC Error during PostTx\")\n\t\treturn errors.New(string(envl.Error.Code) + \"\" + envl.Error.Message)\n\t}\n\tvar result Result\n\tif err = json.Unmarshal(envl.Result, &result); err != nil {\n\t\treturn err\n\t}\n\tif result.Err != nil {\n\t\treturn errors.New(string(result.Err))\n\t}\n\treturn nil\n}", "func sendTransaction(t *Transaction) {\n\ttransactionToSend := &Message{}\n\ttransactionToSend.Transaction = t\n\tfor _, conn := range connections{\n\t\tgo send(transactionToSend, conn)\n\t}\n\ttransactionIsUsed[t.Id] = true\n}", "func (c *Client) QuickSendTransaction(ctx context.Context, param QuickSendTransactionParam) (string, error) {\n\trecentBlockhashRes, err := c.GetRecentBlockhash(ctx)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to get recent blockhash, err: %v\", err)\n\t}\n\ttx, err := types.NewTransaction(types.NewTransactionParam{\n\t\tMessage: types.NewMessage(types.NewMessageParam{\n\t\t\tInstructions: param.Instructions,\n\t\t\tFeePayer: param.FeePayer,\n\t\t\tRecentBlockhash: recentBlockhashRes.Blockhash,\n\t\t}),\n\t\tSigners: param.Signers,\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create new tx, err: %v\", err)\n\t}\n\trawTx, err := tx.Serialize()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to serialize tx, err: %v\", err)\n\t}\n\tres, err := c.RpcClient.SendTransactionWithConfig(\n\t\tctx,\n\t\tbase64.StdEncoding.EncodeToString(rawTx),\n\t\trpc.SendTransactionConfig{Encoding: rpc.SendTransactionConfigEncodingBase64},\n\t)\n\terr = checkRpcResult(res.GeneralResponse, err)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn res.Result, nil\n}", "func (suite *Suite) SendTx(\n\tcontractAddr types.InternalEVMAddress,\n\tfrom common.Address,\n\tsignerKey *ethsecp256k1.PrivKey,\n\ttransferData []byte,\n) (*evmtypes.MsgEthereumTxResponse, error) {\n\tctx := sdk.WrapSDKContext(suite.Ctx)\n\tchainID := suite.App.GetEvmKeeper().ChainID()\n\n\targs, err := json.Marshal(&evmtypes.TransactionArgs{\n\t\tTo: &contractAddr.Address,\n\t\tFrom: &from,\n\t\tData: (*hexutil.Bytes)(&transferData),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgasRes, err := suite.QueryClientEvm.EstimateGas(ctx, &evmtypes.EthCallRequest{\n\t\tArgs: args,\n\t\tGasCap: config.DefaultGasCap,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnonce := suite.App.GetEvmKeeper().GetNonce(suite.Ctx, suite.Address)\n\n\tbaseFee := suite.App.GetFeeMarketKeeper().GetBaseFee(suite.Ctx)\n\tsuite.Require().NotNil(baseFee, \"base fee is nil\")\n\n\t// Mint the max gas to the FeeCollector to ensure balance in case of refund\n\tsuite.MintFeeCollector(sdk.NewCoins(\n\t\tsdk.NewCoin(\n\t\t\t\"ukava\",\n\t\t\tsdkmath.NewInt(baseFee.Int64()*int64(gasRes.Gas*2)),\n\t\t)))\n\n\tercTransferTx := evmtypes.NewTx(\n\t\tchainID,\n\t\tnonce,\n\t\t&contractAddr.Address,\n\t\tnil, // amount\n\t\tgasRes.Gas*2, // gasLimit, TODO: runs out of gas with just res.Gas, ex: estimated was 21572 but used 24814\n\t\tnil, // gasPrice\n\t\tsuite.App.GetFeeMarketKeeper().GetBaseFee(suite.Ctx), // gasFeeCap\n\t\tbig.NewInt(1), // gasTipCap\n\t\ttransferData,\n\t\t&ethtypes.AccessList{}, // accesses\n\t)\n\n\tercTransferTx.From = hex.EncodeToString(signerKey.PubKey().Address())\n\terr = ercTransferTx.Sign(ethtypes.LatestSignerForChainID(chainID), etherminttests.NewSigner(signerKey))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trsp, err := suite.App.GetEvmKeeper().EthereumTx(ctx, ercTransferTx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Do not check vm error here since we want to check for errors later\n\n\treturn rsp, nil\n}", "func SubmitTransaction(peers []*peer.Connection, o *orderer.Connection, channel, chaincode, function string, args ...string) ([]byte, error) {\n\tbyteArgs := [][]byte{}\n\tfor _, arg := range args {\n\t\tbyteArgs = append(byteArgs, []byte(arg))\n\t}\n\tproposal, responses, endorsements, err := executeTransaction(peers, o, channel, chaincode, function, byteArgs...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = orderTransaction(peers, o, channel, proposal, responses, endorsements)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn responses[0].Response.Payload, nil\n}", "func MakeTransactionBTC(from, to string, amount, fee uint64, unspent []Unspent) ([]byte, error) {\n\tcoins, err := ToUTXO(unspent, from)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\t//prepare send addresses and its amount.\n\t//last address must be refund address and its amount must be 0.\n\tsend := []*tx.Send{\n\t\t&tx.Send{\n\t\t\tAddr: to,\n\t\t\tAmount: amount,\n\t\t},\n\t\t&tx.Send{\n\t\t\tAddr: \"\",\n\t\t\tAmount: 0,\n\t\t},\n\t}\n\tlocktime := uint32(0)\n\ttx, err := tx.NewP2PK(fee, coins, locktime, send...)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\treturn tx.Pack()\n}", "func (s *Server) GenericSendCoin(gscp GenericSendCoinParams, _ *struct{}) (err error) {\n\t// Get the wallet associated with the id.\n\tgw, err := s.genericWallet(gscp.GWID)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Get the current height of the quorum, for setting the deadline on\n\t// the script input.\n\tinput := state.ScriptInput{\n\t\tWalletID: gw.WalletID,\n\t\tInput: delta.SendCoinInput(gscp.Destination, gscp.Amount),\n\t\tDeadline: s.metadata.Height + state.MaxDeadline,\n\t}\n\terr = delta.SignScriptInput(&input, gw.SecretKey)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ts.broadcast(network.Message{\n\t\tProc: \"Participant.AddScriptInput\",\n\t\tArgs: input,\n\t\tResp: nil,\n\t})\n\treturn\n}", "func (_EthCrossChain *EthCrossChainTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {\n\treturn _EthCrossChain.Contract.contract.Transact(opts, method, params...)\n}", "func (owner *WalletOwnerAPI) PostTx(slate slateversions.SlateV4, fluff bool) error {\n\tparams := struct {\n\t\tToken string `json:\"token\"`\n\t\tSlate slateversions.SlateV4 `json:\"slate\"`\n\t\tFluff bool `json:\"fluff\"`\n\t}{\n\t\tToken: owner.token,\n\t\tSlate: slate,\n\t\tFluff: fluff,\n\t}\n\tparamsBytes, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\tenvl, err := owner.client.EncryptedRequest(\"post_tx\", paramsBytes, owner.sharedSecret)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif envl == nil {\n\t\treturn errors.New(\"WalletOwnerAPI: Empty RPC Response from grin-wallet\")\n\t}\n\tif envl.Error != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"code\": envl.Error.Code,\n\t\t\t\"message\": envl.Error.Message,\n\t\t}).Error(\"WalletOwnerAPI: RPC Error during PostTx\")\n\t\treturn errors.New(string(envl.Error.Code) + \"\" + envl.Error.Message)\n\t}\n\tvar result Result\n\tif err = json.Unmarshal(envl.Result, &result); err != nil {\n\t\treturn err\n\t}\n\tif result.Err != nil {\n\t\treturn errors.New(string(result.Err))\n\t}\n\treturn nil\n}", "func (dcr *ExchangeWallet) signTx(baseTx *wire.MsgTx) (*wire.MsgTx, error) {\n\ttxHex, err := msgTxToHex(baseTx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to encode MsgTx: %w\", err)\n\t}\n\tvar res walletjson.SignRawTransactionResult\n\terr = dcr.nodeRawRequest(methodSignRawTransaction, anylist{txHex}, &res)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"rawrequest error: %w\", err)\n\t}\n\n\tfor i := range res.Errors {\n\t\tsigErr := &res.Errors[i]\n\t\tdcr.log.Errorf(\"Signing %v:%d, seq = %d, sigScript = %v, failed: %v (is wallet locked?)\",\n\t\t\tsigErr.TxID, sigErr.Vout, sigErr.Sequence, sigErr.ScriptSig, sigErr.Error)\n\t\t// Will be incomplete below, so log each SignRawTransactionError and move on.\n\t}\n\n\tsignedTx, err := msgTxFromHex(res.Hex)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to deserialize signed MsgTx: %w\", err)\n\t}\n\n\tif !res.Complete {\n\t\tdcr.log.Errorf(\"Incomplete raw transaction signatures (input tx: %x / incomplete signed tx: %x): \",\n\t\t\tdcr.wireBytes(baseTx), dcr.wireBytes(signedTx))\n\t\treturn nil, fmt.Errorf(\"incomplete raw tx signatures (is wallet locked?)\")\n\t}\n\n\treturn signedTx, nil\n}", "func (a API) GetRawTransaction(cmd *btcjson.GetRawTransactionCmd) (e error) {\n\tRPCHandlers[\"getrawtransaction\"].Call <-API{a.Ch, cmd, nil}\n\treturn\n}", "func (_EthCrossChain *EthCrossChainRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {\n\treturn _EthCrossChain.Contract.EthCrossChainTransactor.contract.Transact(opts, method, params...)\n}", "func (t *TxCreate) CreateTransferTx(sender, receiver account.AccountType, floatValue float64) (string, string, error) {\n\ttargetAction := action.ActionTypeTransfer\n\n\t// validation account\n\tif receiver == account.AccountTypeClient || receiver == account.AccountTypeAuthorization {\n\t\treturn \"\", \"\", errors.New(\"invalid receiver account. client, authorization account is not allowed as receiver\")\n\t}\n\tif sender == receiver {\n\t\treturn \"\", \"\", errors.New(\"invalid account. sender and receiver is same\")\n\t}\n\n\t// check sernder's balance\n\tsenderAddr, err := t.addrRepo.GetOneUnAllocated(sender)\n\tif err != nil {\n\t\treturn \"\", \"\", errors.Wrap(err, \"fail to call addrRepo.GetOneUnAllocated(sender)\")\n\t}\n\tsenderBalance, err := t.xrp.GetBalance(senderAddr.WalletAddress)\n\tif err != nil {\n\t\treturn \"\", \"\", errors.Wrap(err, \"fail to call xrp.GetAccountInfo()\")\n\t}\n\tif senderBalance <= 20 {\n\t\treturn \"\", \"\", errors.New(\"sender balance is insufficient to send\")\n\t}\n\tif floatValue != 0 && senderBalance <= floatValue {\n\t\treturn \"\", \"\", errors.New(\"sender balance is insufficient to send\")\n\t}\n\n\tt.logger.Debug(\"amount\",\n\t\tzap.Float64(\"floatValue\", floatValue),\n\t\tzap.Float64(\"senderBalance\", senderBalance),\n\t)\n\n\t// get receiver address\n\treceiverAddr, err := t.addrRepo.GetOneUnAllocated(receiver)\n\tif err != nil {\n\t\treturn \"\", \"\", errors.Wrap(err, \"fail to call addrRepo.GetOneUnAllocated(receiver)\")\n\t}\n\n\t// call CreateRawTransaction\n\tinstructions := &pb.Instructions{\n\t\tMaxLedgerVersionOffset: xrp.MaxLedgerVersionOffset,\n\t}\n\ttxJSON, rawTxString, err := t.xrp.CreateRawTransaction(senderAddr.WalletAddress, receiverAddr.WalletAddress, floatValue, instructions)\n\tif err != nil {\n\t\treturn \"\", \"\", errors.Wrapf(err, \"fail to call xrp.CreateRawTransaction(), sender address: %s\", senderAddr.WalletAddress)\n\t}\n\tt.logger.Debug(\"txJSON\", zap.Any(\"txJSON\", txJSON))\n\tgrok.Value(txJSON)\n\n\t// generate UUID to trace transaction because unsignedTx is not unique\n\tuid := uuid.NewV4().String()\n\n\tserializedTxs := []string{fmt.Sprintf(\"%s,%s\", uid, rawTxString)}\n\n\t// create insert data for eth_detail_tx\n\ttxDetailItem := &models.XRPDetailTX{\n\t\tUUID: uid,\n\t\tCurrentTXType: tx.TxTypeUnsigned.Int8(),\n\t\tSenderAccount: sender.String(),\n\t\tSenderAddress: senderAddr.WalletAddress,\n\t\tReceiverAccount: receiver.String(),\n\t\tReceiverAddress: receiverAddr.WalletAddress,\n\t\tAmount: txJSON.Amount,\n\t\tXRPTXType: txJSON.TransactionType,\n\t\tFee: txJSON.Fee,\n\t\tFlags: txJSON.Flags,\n\t\tLastLedgerSequence: txJSON.LastLedgerSequence,\n\t\tSequence: txJSON.Sequence,\n\t\t// SigningPubkey: txJSON.SigningPubKey,\n\t\t// TXNSignature: txJSON.TxnSignature,\n\t\t// Hash: txJSON.Hash,\n\t}\n\ttxDetailItems := []*models.XRPDetailTX{txDetailItem}\n\n\treturn t.afterTxCreation(targetAction, sender, serializedTxs, txDetailItems, nil)\n}", "func (f *HubTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {\n\treturn f.Contract.contract.Transact(opts, method, params...)\n}", "func test_sendEth(t *testing.T) {\n\tt.Skip(nil)\n\teth_gateway.RunOnTestNet()\n\t// transfer\n\ttransferValue := big.NewInt(1)\n\ttransferValueInWei := new(big.Int).Mul(transferValue, oneWei)\n\t// Send ether to test account\n\ttxs, _, _, err := eth_gateway.EthWrapper.SendETH(eth_gateway.MainWalletAddress, eth_gateway.MainWalletPrivateKey, ethAddress02, transferValueInWei)\n\tif err != nil {\n\t\tt.Logf(\"failed to send ether to %v ether to %v\\n\", transferValue, ethAddress02.Hex())\n\t\tt.Fatalf(\"transaction error: %v\\n\", err)\n\t}\n\tfor tx := range txs {\n\t\ttransaction := txs[tx]\n\t\t// Store For Next Test\n\t\tlastTransaction = *transaction\n\t\tprintTx(transaction)\n\n\t\t// wait for confirmation\n\t\tconfirmed := eth_gateway.EthWrapper.WaitForConfirmation(lastTransaction.Hash(), 3)\n\t\tif confirmed == 1 {\n\t\t\tt.Logf(\"confirmed ether was sent to : %v\", ethAddress02.Hex())\n\t\t} else if confirmed == 0 {\n\t\t\tt.Logf(\"failed to confirm sending ether\")\n\t\t}\n\t}\n}", "func (w Wallet) Send(to string, amount uint, utxoSetPath string) error {\n\treturn nil\n}", "func (s *ethereumPaymentObligation) sendMintTransaction(contract ethereumPaymentObligationContract, opts *bind.TransactOpts, requestData *MintRequest) error {\n\ttx, err := s.ethClient.SubmitTransactionWithRetries(contract.Mint, opts, requestData.To, requestData.TokenID, requestData.TokenURI, requestData.AnchorID,\n\t\trequestData.MerkleRoot, requestData.Values, requestData.Salts, requestData.Proofs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Infof(\"Sent off tx to mint [tokenID: %x, anchor: %x, registry: %x] to payment obligation contract. Ethereum transaction hash [%x] and Nonce [%v] and Check [%v]\",\n\t\trequestData.TokenID, requestData.AnchorID, requestData.To, tx.Hash(), tx.Nonce(), tx.CheckNonce())\n\tlog.Infof(\"Transfer pending: 0x%x\\n\", tx.Hash())\n\treturn nil\n}", "func (_Ethdkg *EthdkgTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {\n\treturn _Ethdkg.Contract.contract.Transact(opts, method, params...)\n}", "func sendEth(t *testing.T, key ethkey.KeyV2, ec *backends.SimulatedBackend, to common.Address, eth int) {\n\tnonce, err := ec.PendingNonceAt(context.Background(), key.Address.Address())\n\trequire.NoError(t, err)\n\ttx := gethtypes.NewTx(&gethtypes.DynamicFeeTx{\n\t\tChainID: big.NewInt(1337),\n\t\tNonce: nonce,\n\t\tGasTipCap: big.NewInt(1),\n\t\tGasFeeCap: big.NewInt(10e9), // block base fee in sim\n\t\tGas: uint64(21_000),\n\t\tTo: &to,\n\t\tValue: big.NewInt(0).Mul(big.NewInt(int64(eth)), big.NewInt(1e18)),\n\t\tData: nil,\n\t})\n\tsignedTx, err := gethtypes.SignTx(tx, gethtypes.NewLondonSigner(big.NewInt(1337)), key.ToEcdsaPrivKey())\n\trequire.NoError(t, err)\n\terr = ec.SendTransaction(context.Background(), signedTx)\n\trequire.NoError(t, err)\n\tec.Commit()\n}" ]
[ "0.75618804", "0.7431252", "0.7406614", "0.700521", "0.6965261", "0.69638187", "0.69369", "0.6910495", "0.68955934", "0.6840324", "0.682656", "0.6720636", "0.66406167", "0.6629154", "0.6592851", "0.6590758", "0.6585054", "0.6555446", "0.65178925", "0.65044934", "0.6460261", "0.64488757", "0.6354232", "0.63469374", "0.63329935", "0.63038963", "0.62897146", "0.62554526", "0.62386715", "0.6230905", "0.62217903", "0.6221673", "0.62089884", "0.620776", "0.6197475", "0.61825436", "0.6154928", "0.615453", "0.6152728", "0.6125781", "0.6122733", "0.6121612", "0.60953087", "0.6088089", "0.60817504", "0.6078506", "0.60686946", "0.60653245", "0.60646266", "0.606323", "0.6062763", "0.6060692", "0.60576946", "0.6052397", "0.6050347", "0.6049683", "0.60372853", "0.6010448", "0.60035855", "0.5988572", "0.59756607", "0.5968163", "0.5963495", "0.5950229", "0.5947508", "0.5945446", "0.5925241", "0.5906344", "0.59057313", "0.5903546", "0.59003377", "0.58963627", "0.58929145", "0.58779174", "0.58749497", "0.5859731", "0.5858618", "0.5857922", "0.5856246", "0.5855804", "0.5852371", "0.58495533", "0.5845252", "0.5843605", "0.5842381", "0.584228", "0.58422196", "0.5841203", "0.58363473", "0.5825594", "0.5820359", "0.581968", "0.58024687", "0.5797459", "0.5796405", "0.5795427", "0.579308", "0.5785863", "0.5784584", "0.5782744" ]
0.7520837
1
decodeAddress from string to decodedAddress
func decodeAddress(address string, cfg *chaincfg.Params) (btcutil.Address, error) { decodedAddress, err := btcutil.DecodeAddress(address, cfg) if err != nil { return nil, err } return decodedAddress, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func DecodeString(addr string) (Address, error) {\n\t// Remove any leading slashes.\n\tif strings.HasPrefix(addr, \"/\") {\n\t\taddr = addr[1:]\n\t}\n\n\taddrParts := strings.Split(addr, \"/\")\n\tif len(addrParts) != 4 {\n\t\treturn Address{}, fmt.Errorf(\"invalid format %v\", addr)\n\t}\n\tvar protocol Protocol\n\tswitch addrParts[0] {\n\tcase \"tcp\":\n\t\tprotocol = TCP\n\tcase \"udp\":\n\t\tprotocol = UDP\n\tcase \"ws\":\n\t\tprotocol = WebSocket\n\tdefault:\n\t\treturn Address{}, fmt.Errorf(\"invalid protocol %v\", addrParts[0])\n\t}\n\tvalue := addrParts[1]\n\tnonce, err := strconv.ParseUint(addrParts[2], 10, 64)\n\tif err != nil {\n\t\treturn Address{}, err\n\t}\n\tvar sig id.Signature\n\tsigBytes, err := base64.RawURLEncoding.DecodeString(addrParts[3])\n\tif err != nil {\n\t\treturn Address{}, err\n\t}\n\tif len(sigBytes) != 65 {\n\t\treturn Address{}, fmt.Errorf(\"invalid signature %v\", addrParts[3])\n\t}\n\tcopy(sig[:], sigBytes)\n\treturn Address{\n\t\tProtocol: protocol,\n\t\tValue: value,\n\t\tNonce: nonce,\n\t\tSignature: sig,\n\t}, nil\n}", "func DecodeAddress(raw string) (res Address, err error) {\n\tvar rbytes []byte\n\tvar prefix string\n\n\tif strings.HasPrefix(raw, B32Prefix) || strings.HasPrefix(raw, StakePrefix) {\n\t\tprefix, rbytes, err = bech32.Decode(raw)\n\t} else {\n\t\trbytes, err = base58.Decode(raw)\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tres, err = DecodeRawAddress(rbytes)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif p := res.Prefix(); p != prefix {\n\t\terr = errors.New(\"invalid address prefix\")\n\t}\n\n\treturn\n}", "func DecodeAddress(r stdio.Reader) (Address, error) {\n\treturn wallet.DecodeAddress(r)\n}", "func (c *Client) DecodeAddress(resp *http.Response) (*Address, error) {\n\tvar decoded Address\n\terr := c.Decoder.Decode(&decoded, resp.Body, resp.Header.Get(\"Content-Type\"))\n\treturn &decoded, err\n}", "func DecodePeerAddress(x string) string {\n\treturn nettools.BinaryToDottedPort(x)\n}", "func DecodeAddress(b []byte) (net.IP, []byte, error) {\n\tif len(b) < 6 {\n\t\treturn nil, nil, errors.New(\"too short\")\n\t}\n\n\t// IPv4\n\tif b[0] == 4 && b[1] == 4 {\n\t\treturn net.IP(b[2:6]), b[6:], nil\n\t}\n\n\t// IPv6\n\tif len(b) < 18 {\n\t\treturn nil, nil, errors.New(\"too short\")\n\t}\n\tif b[0] == 6 && b[1] == 16 {\n\t\treturn net.IP(b[2:18]), b[18:], nil\n\t}\n\n\treturn nil, nil, errors.New(\"unrecognized format\")\n}", "func (cdc AddressCodec) Decode(b []byte) (v interface{}, s string, err error) {\n\tip := net.IP(b)\n\treturn ip, ip.String(), nil\n\n}", "func DecodeAddr(address []byte) string {\n\tvar stringAddr string\n\tvar ip []byte\n\tvar port []byte\n\n\tip = address[:4]\n\tport = address[4:]\n\n\t// Decode IP\n\tfor index, octet := range ip {\n\t\tstringAddr = stringAddr + strconv.Itoa(int(octet))\n\t\tif index != 3 {\n\t\t\tstringAddr += \".\"\n\t\t}\n\t}\n\tstringAddr += \":\"\n\n\t// Decode Port\n\tb := make([]byte, 8)\n\tfor i := 0; i < 6; i++ {\n\t\tb[i] = byte(0)\n\t}\n\tb[6] = port[0]\n\tb[7] = port[1]\n\tp := binary.BigEndian.Uint64(b)\n\tstringAddr += strconv.FormatUint(p, 10)\n\t//fmt.Println(\"Complete IP:\", stringAddr)\n\treturn stringAddr\n}", "func DecodeAddress(addr string, params AddressParams) (Address, error) {\n\t// Parsing code for future address/script versions should be added as the\n\t// most recent case in the switch statement. The expectation is that newer\n\t// version addresses will become more common, so they should be checked\n\t// first.\n\tswitch {\n\tcase probablyV0Base58Addr(addr):\n\t\treturn DecodeAddressV0(addr, params)\n\t}\n\n\tstr := fmt.Sprintf(\"address %q is not a supported type\", addr)\n\treturn nil, makeError(ErrUnsupportedAddress, str)\n}", "func DecodeAddress(address string) (*Address, error) {\n\t// if address[:3] == \"BM-\" { // Clients should accept addresses without BM-\n\t//\taddress = address[3:]\n\t// }\n\t//\n\t// decodeAddress says this but then UI checks for a missingbm status from\n\t// decodeAddress, which doesn't exist. So I choose NOT to accept addresses\n\t// without the initial \"BM-\"\n\n\ti, err := base58.DecodeToBig([]byte(address[3:]))\n\tif err != nil {\n\t\treturn nil, errors.New(\"input address not valid base58 string\")\n\t}\n\tdata := i.Bytes()\n\n\thashData := data[:len(data)-4]\n\tchecksum := data[len(data)-4:]\n\n\t// Take two rounds of SHA512 hashes\n\tsha := sha512.New()\n\tsha.Write(hashData)\n\tcurrentHash := sha.Sum(nil)\n\tsha.Reset()\n\tsha.Write(currentHash)\n\n\tif !bytes.Equal(checksum, sha.Sum(nil)[0:4]) {\n\t\treturn nil, errors.New(\"checksum failed\")\n\t}\n\t// create the address\n\taddr := new(Address)\n\n\tbuf := bytes.NewReader(data)\n\n\terr = addr.Version.DeserializeReader(buf) // get the version\n\tif err != nil {\n\t\treturn nil, types.DeserializeFailedError(\"version: \" + err.Error())\n\t}\n\n\terr = addr.Stream.DeserializeReader(buf)\n\tif err != nil {\n\t\treturn nil, types.DeserializeFailedError(\"stream: \" + err.Error())\n\t}\n\n\tripe := make([]byte, buf.Len()-4) // exclude bytes already read and checksum\n\tn, err := buf.Read(ripe)\n\tif n != len(ripe) || err != nil {\n\t\treturn nil, types.DeserializeFailedError(\"ripe: \" + err.Error())\n\t}\n\n\tswitch addr.Version {\n\tcase 2:\n\t\tfallthrough\n\tcase 3:\n\t\tif len(ripe) > 20 || len(ripe) < 18 { // improper size\n\t\t\treturn nil, errors.New(\"version 3, the ripe length is invalid\")\n\t\t}\n\tcase 4:\n\t\t// encoded ripe data MUST have null bytes removed from front\n\t\tif ripe[0] == 0x00 {\n\t\t\treturn nil, errors.New(\"version 4, ripe data has null bytes in\" +\n\t\t\t\t\" the beginning, not properly encoded\")\n\t\t}\n\t\tif len(ripe) > 20 || len(ripe) < 4 { // improper size\n\t\t\treturn nil, errors.New(\"version 4, the ripe length is invalid\")\n\t\t}\n\tdefault:\n\t\treturn nil, errors.New(\"unsupported address version\")\n\t}\n\n\t// prepend null bytes to make sure that the total ripe length is 20\n\tnumPadding := 20 - len(ripe)\n\tripe = append(make([]byte, numPadding), ripe...)\n\tcopy(addr.Ripe[:], ripe)\n\n\treturn addr, nil\n}", "func decodePeerAddress(chunk string) string {\n\tip := net.IPv4(chunk[0], chunk[1], chunk[2], chunk[3])\n\tremotePort := 256*int(chunk[4]) + int(chunk[5]) // Port is given in network encoding.\n\treturn fmt.Sprintf(\"%s:%d\", ip.String(), remotePort)\n}", "func decodeAddress(address string) (uint32, error) {\n\tsplit := strings.Split(address, \".\")\n\tif len(split) != 4 {\n\t\treturn 0, errors.New(\"Error decoding IPv4 address: wrong amount of octets\")\n\t}\n\tvar IPaddress uint32\n\tfor i, octetstr := range split {\n\t\tsegment, err := strconv.Atoi(octetstr)\n\t\tif err != nil {\n\t\t\treturn 0, errors.Wrap(err, \"Error decoding IPv4 address\")\n\t\t}\n\t\tif segment > math.MaxUint8 {\n\t\t\treturn 0, errors.New(\"Error decoding IPv4 address: value overflow\")\n\t\t}\n\t\t// Shift octets by determined amount of bits.\n\t\tswitch i {\n\t\tcase 0:\n\t\t\tsegment = segment << 24\n\t\tcase 1:\n\t\t\tsegment = segment << 16\n\t\tcase 2:\n\t\t\tsegment = segment << 8\n\t\t}\n\t\tIPaddress += uint32(segment)\n\t}\n\treturn IPaddress, nil\n}", "func HexToAddress(s string) Address { return BytesToAddress(FromHex(s)) }", "func decodeAddresses(val []byte) ([]common.Address, error) {\n\ts := string(val)\n\tvar res []common.Address\n\tif s == \"\" {\n\t\treturn res, nil\n\t}\n\tfor _, a := range strings.Split(s, \",\") {\n\t\tif !common.IsHexAddress(a) {\n\t\t\treturn nil, errors.Errorf(\"malformed address: %q\", s)\n\t\t}\n\n\t\tres = append(res, common.HexToAddress(a))\n\t}\n\treturn res, nil\n}", "func (c *Client) DecodeEasypostAddress(resp *http.Response) (*EasypostAddress, error) {\n\tvar decoded EasypostAddress\n\terr := c.Decoder.Decode(&decoded, resp.Body, resp.Header.Get(\"Content-Type\"))\n\treturn &decoded, err\n}", "func DecodePeerAddress(encoded bencoding.String) (addr net.TCPAddr, err error) {\n\tif len(encoded) != 6 {\n\t\terr = errors.New(\"encoded address has wrong length (should be 6)\")\n\t} else {\n\t\taddr = net.TCPAddr{\n\t\t\tIP: net.IPv4(encoded[0], encoded[1], encoded[2], encoded[3]),\n\t\t\tPort: int(encoded[4])<<8 + int(encoded[5]),\n\t\t}\n\t}\n\n\treturn\n}", "func HexToAddress(s string) types.Address { return BytesToAddress(FromHex(s)) }", "func dnsDecodeString(raw string) ([]byte, error) {\n\tpad := 8 - (len(raw) % 8)\n\tnb := []byte(raw)\n\tif pad != 8 {\n\t\tnb = make([]byte, len(raw)+pad)\n\t\tcopy(nb, raw)\n\t\tfor index := 0; index < pad; index++ {\n\t\t\tnb[len(raw)+index] = '='\n\t\t}\n\t}\n\treturn sliverBase32.DecodeString(string(nb))\n}", "func DecodeAddress(address string) (*Address, error) {\n\tif address[:3] == \"BM-\" { // Clients should accept addresses without BM-\n\t\taddress = address[3:]\n\t}\n\n\tdata := base58.Decode(address)\n\tif len(data) <= 12 { // rough lower bound, also don't want it to be empty\n\t\treturn nil, ErrUnknownAddressType\n\t}\n\n\thashData := data[:len(data)-4]\n\tchecksum := data[len(data)-4:]\n\n\tif !bytes.Equal(checksum, DoubleSha512(hashData)[0:4]) {\n\t\treturn nil, ErrChecksumMismatch\n\t}\n\t// create the address\n\taddr := new(Address)\n\n\tbuf := bytes.NewReader(data)\n\tvar err error\n\n\taddr.Version, err = ReadVarInt(buf) // read version\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taddr.Stream, err = ReadVarInt(buf) // read stream\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tripe := make([]byte, buf.Len()-4) // exclude bytes already read and checksum\n\tbuf.Read(ripe) // this can never cause an error\n\n\tswitch addr.Version {\n\tcase 2:\n\t\tfallthrough\n\tcase 3:\n\t\tif len(ripe) > 19 || len(ripe) < 18 { // improper size\n\t\t\treturn nil, errors.New(\"version 3, the ripe length is invalid\")\n\t\t}\n\tcase 4:\n\t\t// encoded ripe data MUST have null bytes removed from front\n\t\tif ripe[0] == 0x00 {\n\t\t\treturn nil, errors.New(\"version 4, ripe data has null bytes in\" +\n\t\t\t\t\" the beginning, not properly encoded\")\n\t\t}\n\t\tif len(ripe) > 19 || len(ripe) < 4 { // improper size\n\t\t\treturn nil, errors.New(\"version 4, the ripe length is invalid\")\n\t\t}\n\tdefault:\n\t\treturn nil, ErrUnknownAddressType\n\t}\n\n\t// prepend null bytes to make sure that the total ripe length is 20\n\tnumPadding := 20 - len(ripe)\n\tripe = append(make([]byte, numPadding), ripe...)\n\tcopy(addr.Ripe[:], ripe)\n\n\treturn addr, nil\n}", "func (a *Address) Decode(r io.Reader) error {\n\tvar b []byte\n\terr := cio.ReadBytesUint16(r, &b)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"reading byte stream: %w\", err)\n\t}\n\n\tpk := secp256k1.PubKey{\n\t\tKey: b,\n\t}\n\t*a = *NewAddress(&pk)\n\treturn nil\n}", "func NewFromString(addr string) (*Address, error) {\n\tlegaddr, err := legacy.Decode(addr)\n\tif err == nil {\n\t\taddr, err := NewFromLegacy(legaddr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn addr, nil\n\t}\n\n\tcashaddr, err := cashaddress.Decode(addr, cashaddress.MainNet)\n\tif err == nil {\n\t\taddr, err := NewFromCashAddress(cashaddr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn addr, nil\n\t}\n\n\treturn nil, errors.New(\"unable to decode address\")\n}", "func AddressDecode(dec *WasmDecoder) ScAddress {\n\taddr := ScAddress{}\n\tcopy(addr.id[:], dec.FixedBytes(ScAddressLength))\n\treturn addr\n}", "func ConvertUserStrToAddress(userFAddr string) []byte {\n\tv := base58.Decode(userFAddr)\n\treturn v[2:34]\n}", "func DecodeRawAddress(s []byte) (Address, error) {\n\tif len(s) == 0 {\n\t\treturn nil, errors.New(\"empty address\")\n\t}\n\n\theader := s[0]\n\tnetwork := Network(header & 0x0f)\n\n\treadAddrCred := func(bit byte, pos int) StakeCredential {\n\t\thashBytes := s[pos : pos+Hash28Size]\n\t\tif header&(1<<bit) == 0 {\n\t\t\treturn StakeCredential{Kind: KeyStakeCredentialType, Data: hashBytes}\n\t\t}\n\t\treturn StakeCredential{Kind: ScriptStakeCredentialype, Data: hashBytes}\n\t}\n\n\tswitch (header & 0xf0) >> 4 {\n\t// Base type\n\tcase 0b0000, 0b0001, 0b0010, 0b0011:\n\t\t// header + keyhash\n\t\tif len(s) != 57 {\n\t\t\treturn nil, errors.New(\"Invalid length for base address\")\n\t\t}\n\t\treturn &BaseAddress{Network: network, Payment: readAddrCred(4, 1),\n\t\t\tStake: readAddrCred(5, Hash28Size+1)}, nil\n\t// Pointer type\n\tcase 0b0100, 0b0101:\n\t\t// header + keyhash + 3 natural numbers (min 1 byte each)\n\t\tif len(s) < 32 {\n\t\t\treturn nil, errors.New(\"Invalid length for pointer address\")\n\t\t}\n\t\tbyteIndex := 1\n\t\tpaymentCred := readAddrCred(4, 1)\n\t\tslot, slotBytes, ok := VariableNatDecode(s[byteIndex:])\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"slot variable decode failed\")\n\t\t}\n\t\tbyteIndex += slotBytes\n\n\t\ttxIndex, txBytes, ok := VariableNatDecode(s[byteIndex:])\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"txIndex variable decode failed\")\n\t\t}\n\t\tbyteIndex += txBytes\n\n\t\tcertIndex, certBytes, ok := VariableNatDecode(s[byteIndex:])\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"certIndex variable decode failed\")\n\t\t}\n\t\tbyteIndex += certBytes\n\n\t\tif byteIndex > len(s) {\n\t\t\treturn nil, errors.New(\"byte index is out range of pointer lenght\")\n\t\t}\n\n\t\treturn &PointerAddress{\n\t\t\tNetwork: network, Payment: paymentCred,\n\t\t\tStake: StakePoint{Slot: slot, TxIndex: txIndex, CertIndex: certIndex},\n\t\t}, nil\n\t// Enterprise type\n\tcase 0b0110, 0b0111:\n\t\t// header + keyhash\n\t\tif len(s) != 29 {\n\t\t\treturn nil, errors.New(\"invalid length for enterprise address\")\n\t\t}\n\t\treturn &EnterpriseAddress{Network: network, Payment: readAddrCred(4, 1)}, nil\n\t// Reward type\n\tcase 0b1110, 0b1111:\n\t\tif len(s) != 29 {\n\t\t\treturn nil, errors.New(\"invalid length for reward address\")\n\t\t}\n\t\treturn &Reward{Network: network, Payment: readAddrCred(4, 1)}, nil\n\t// Legacy byron type\n\tcase 0b1000:\n\t\tvar byron LegacyAddress\n\t\tif err := cbor.Unmarshal(s, &byron); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &byron, nil\n\t}\n\treturn nil, errors.New(\"unsupports address type\")\n}", "func decodeAddr(addr []uint8) (fidlnet.SocketAddress, bool, error) {\n\tvar sockaddrStorage C.struct_sockaddr_storage\n\tif err := sockaddrStorage.Unmarshal(addr); err != nil {\n\t\treturn fidlnet.SocketAddress{}, false, err\n\t}\n\treturn sockaddrStorage.Decode()\n}", "func DecodeBase58Address(addr string) (Address, error) {\n\tb, err := base58.Base582Hex(addr)\n\tif err != nil {\n\t\treturn Address{}, err\n\t}\n\treturn addressFromBytes(b)\n}", "func extractAddress(str string) string {\n\tvar addr string\n\n\tswitch {\n\tcase strings.Contains(str, `]`):\n\t\t// IPv6 address [2001:db8::1%lo0]:48467\n\t\taddr = strings.Split(str, `]`)[0]\n\t\taddr = strings.Split(addr, `%`)[0]\n\t\taddr = strings.TrimLeft(addr, `[`)\n\tdefault:\n\t\t// IPv4 address 192.0.2.1:48467\n\t\taddr = strings.Split(str, `:`)[0]\n\t}\n\treturn addr\n}", "func extractAddress(str string) string {\n\tvar addr string\n\n\tswitch {\n\tcase strings.Contains(str, `]`):\n\t\t// IPv6 address [2001:db8::1%lo0]:48467\n\t\taddr = strings.Split(str, `]`)[0]\n\t\taddr = strings.Split(addr, `%`)[0]\n\t\taddr = strings.TrimLeft(addr, `[`)\n\tdefault:\n\t\t// IPv4 address 192.0.2.1:48467\n\t\taddr = strings.Split(str, `:`)[0]\n\t}\n\treturn addr\n}", "func ParseAddress(addr string) (*Address, error) {\n\taddr = strings.ToUpper(addr)\n\tl := len(addr)\n\tif l < 50 {\n\t\treturn nil, InvalidAccountAddrError{reason: \"length\"}\n\t}\n\ti := l - 50 // start index of hex\n\n\tidh, err := hex.DecodeString(addr[i:])\n\tif err != nil {\n\t\treturn nil, InvalidAccountAddrError{reason: \"hex\"}\n\t}\n\n\t_addr := &Address{}\n\t_addr.Code = addr[0:i]\n\t_addr.Type = AccountType(idh[0])\n\t_addr.Hash = idh[1:]\n\n\tif err = _addr.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn _addr, nil\n}", "func addressFromString(s string) (WavesAddress, error) {\n\tab, err := base58.Decode(s)\n\tif err != nil {\n\t\treturn WavesAddress{}, err\n\t}\n\ta := WavesAddress{}\n\tcopy(a[:], ab)\n\treturn a, nil\n}", "func (addr *Address) UnmarshalString(s string) error {\n\terr := json.Unmarshal([]byte(s), addr)\n\tif nil == err && stringutil.IsWhiteSpace(addr.Host) && addr.Port < 1 {\n\t\terr = errors.New(\"Invalid Address JSON '%s'\", s)\n\t}\n\treturn err\n}", "func addressFromBytes(b []byte) (Address, error) {\n\ta := Address{}\n\tkeyLen := len(a.Key)\n\tif len(b) != keyLen+len(a.Checksum)+1 {\n\t\treturn a, errors.New(\"Invalid address bytes\")\n\t}\n\tcopy(a.Key[:], b[:keyLen])\n\ta.Version = b[keyLen]\n\tcopy(a.Checksum[:], b[keyLen+1:])\n\tif !a.HasValidChecksum() {\n\t\treturn a, errors.New(\"Invalid checksum\")\n\t} else {\n\t\treturn a, nil\n\t}\n}", "func (addr *Address) Unmarshal(buf []byte, rem int) ([]byte, int, error) {\n\tvar err error\n\tbuf, rem, err = surge.UnmarshalU8((*uint8)(&addr.Protocol), buf, rem)\n\tif err != nil {\n\t\treturn buf, rem, err\n\t}\n\tbuf, rem, err = surge.UnmarshalString(&addr.Value, buf, rem)\n\tif err != nil {\n\t\treturn buf, rem, err\n\t}\n\tbuf, rem, err = surge.UnmarshalU64(&addr.Nonce, buf, rem)\n\tif err != nil {\n\t\treturn buf, rem, err\n\t}\n\treturn addr.Signature.Unmarshal(buf, rem)\n}", "func (p *AddressParser) Parse(address string) (*Address, error)", "func ExtractAddressFromReverse(reverseName string) string {\n\tsearch := \"\"\n\n\tf := reverse\n\n\tswitch {\n\tcase strings.HasSuffix(reverseName, IP4arpa):\n\t\tsearch = strings.TrimSuffix(reverseName, IP4arpa)\n\tcase strings.HasSuffix(reverseName, IP6arpa):\n\t\tsearch = strings.TrimSuffix(reverseName, IP6arpa)\n\t\tf = reverse6\n\tdefault:\n\t\treturn \"\"\n\t}\n\n\t// Reverse the segments and then combine them.\n\treturn f(strings.Split(search, \".\"))\n}", "func (c *Client) DecodeAddressGeneral(resp *http.Response) (*AddressGeneral, error) {\n\tvar decoded AddressGeneral\n\terr := c.Decoder.Decode(&decoded, resp.Body, resp.Header.Get(\"Content-Type\"))\n\treturn &decoded, err\n}", "func (c *Client) DecodeEasypostAddressVerification(resp *http.Response) (*EasypostAddressVerification, error) {\n\tvar decoded EasypostAddressVerification\n\terr := c.Decoder.Decode(&decoded, resp.Body, resp.Header.Get(\"Content-Type\"))\n\treturn &decoded, err\n}", "func ParseAddress(s string) (Address, error) {\n\n\tvar family uint8\n\tvar sn uint64\n\tvar crcStr string\n\tcnt, err := fmt.Sscanf(s, \"%x.%x.%s\", &family, &sn, &crcStr)\n\n\tif (nil != err) || (3 != cnt) || (sn != (0xffffffffffff & sn)) {\n\t\treturn 0, errors.New(\"onewire: invalid address \" + s)\n\t}\n\ta := sn<<8 | (uint64(family) << 56)\n\n\tbuf := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(buf, sn<<8|(uint64(family)<<56))\n\n\tcrc := RevCrc8(buf[1:])\n\n\tif \"--\" != crcStr {\n\t\tvar c uint8\n\t\tcnt, err = fmt.Sscanf(crcStr, \"%x\", &c)\n\t\tif c != crc {\n\t\t\treturn 0, errors.New(\"onewire: invalid crc \" + s)\n\t\t}\n\t}\n\n\ta |= 0xff & uint64(crc)\n\n\treturn Address(a), nil\n}", "func DecodeFromHex(str string, target interface{}) error {\n\tbz, err := HexDecodeString(str)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn Decode(bz, target)\n}", "func (at *Address) Load() btcaddr.Address {\n\taddr, e := btcaddr.Decode(at.String.Load(), at.ForNet)\n\tif e != nil {\n\t\treturn nil\n\t}\n\treturn addr\n}", "func ParseAddress(address string) Address {\n\tif !TrackPositions {\n\t\treturn 0\n\t}\n\taddr, _ := strconv.ParseUint(address, 0, 64)\n\n\treturn Address(addr)\n}", "func ParseAddress(s string) (Address, error) {\n\tvar addr Address\n\terr := addr.parse(s)\n\treturn addr, err\n}", "func ParseAddress(address string) (common.Address, error) {\n\tif common.IsHexAddress(address) {\n\t\treturn common.HexToAddress(address), nil\n\t}\n\treturn common.Address{}, fmt.Errorf(\"invalid address: %v\", address)\n}", "func ParseAddress(address string) (*mail.Address, error)", "func decodeMulticastAnnounceBytes(bytes []byte) (string, []byte, error) {\n\tnameBytesLen := int(bytes[0])\n\n\tif nameBytesLen+1 > len(bytes) {\n\t\treturn \"\", nil, errors.New(\"Invalid multicast message received\")\n\t}\n\n\tnameBytes := bytes[1 : nameBytesLen+1]\n\tname := string(nameBytes)\n\tmsgBytes := bytes[nameBytesLen+1 : len(bytes)]\n\n\treturn name, msgBytes, nil\n}", "func DecodeString(s string) ([]byte, error) {\n\tb, err := hex.DecodeString(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Reverse(b), nil\n}", "func readAddr(r io.Reader, b []byte) (Addr, error) {\n\tif len(b) < MaxAddrLen {\n\t\treturn nil, io.ErrShortBuffer\n\t}\n\t_, err := io.ReadFull(r, b[:1]) // read 1st byte for address type\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch b[0] {\n\tcase AtypIPv4:\n\t\t_, err = io.ReadFull(r, b[1:1+net.IPv4len+2])\n\t\treturn b[:1+net.IPv4len+2], err\n\tcase AtypDomainName:\n\t\t_, err = io.ReadFull(r, b[1:2]) // read 2nd byte for domain length\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t_, err = io.ReadFull(r, b[2:2+int(b[1])+2])\n\t\treturn b[:1+1+int(b[1])+2], err\n\tcase AtypIPv6:\n\t\t_, err = io.ReadFull(r, b[1:1+net.IPv6len+2])\n\t\treturn b[:1+net.IPv6len+2], err\n\t}\n\n\treturn nil, ErrAddressNotSupported\n}", "func ParseAddress(addr string) Address {\n\t// Handle IPv6 address in form as \"[2001:4860:0:2001::68]\"\n\tlenAddr := len(addr)\n\tif lenAddr > 0 && addr[0] == '[' && addr[lenAddr-1] == ']' {\n\t\taddr = addr[1 : lenAddr-1]\n\t}\n\taddr = strings.TrimSpace(addr)\n\n\tip := net.ParseIP(addr)\n\tif ip != nil {\n\t\treturn IPAddress(ip)\n\t}\n\treturn DomainAddress(addr)\n}", "func Decode(input string) ([]byte, error) {\n\tif len(input) == 0 {\n\t\treturn nil, ErrEmptyString\n\t}\n\tif !has0xPrefix(input) {\n\t\treturn nil, ErrMissingPrefix\n\t}\n\tb, err := hex.DecodeString(input[2:])\n\tif err != nil {\n\t\terr = mapError(err)\n\t}\n\treturn b, err\n}", "func parseAddressesFromStr(s string) ([]cipher.Address, error) {\n\taddrsStr := splitCommaString(s)\n\n\tvar addrs []cipher.Address\n\tfor _, s := range addrsStr {\n\t\ta, err := cipher.DecodeBase58Address(s)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\taddrs = append(addrs, a)\n\t}\n\n\treturn addrs, nil\n}", "func ParseAddress(addr interface{}) (a Address, err error) {\n\t// handle the allowed types\n\tswitch addrVal := addr.(type) {\n\tcase string: // simple string value\n\t\tif addrVal == \"\" {\n\t\t\terr = errors.New(\"Recipient.Address may not be empty\")\n\t\t} else {\n\t\t\ta.Email = addrVal\n\t\t}\n\n\tcase Address:\n\t\ta = addr.(Address)\n\n\tcase map[string]interface{}:\n\t\t// auto-parsed nested json object\n\t\tfor k, v := range addrVal {\n\t\t\tswitch vVal := v.(type) {\n\t\t\tcase string:\n\t\t\t\tif strings.EqualFold(k, \"name\") {\n\t\t\t\t\ta.Name = vVal\n\t\t\t\t} else if strings.EqualFold(k, \"email\") {\n\t\t\t\t\ta.Email = vVal\n\t\t\t\t} else if strings.EqualFold(k, \"header_to\") {\n\t\t\t\t\ta.HeaderTo = vVal\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"strings are required for all Recipient.Address values\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\tcase map[string]string:\n\t\t// user-provided json literal (convenience)\n\t\tfor k, v := range addrVal {\n\t\t\tif strings.EqualFold(k, \"name\") {\n\t\t\t\ta.Name = v\n\t\t\t} else if strings.EqualFold(k, \"email\") {\n\t\t\t\ta.Email = v\n\t\t\t} else if strings.EqualFold(k, \"header_to\") {\n\t\t\t\ta.HeaderTo = v\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\terr = errors.Errorf(\"unsupported Recipient.Address value type [%T]\", addrVal)\n\t}\n\n\treturn\n}", "func NewMixedcaseAddressFromString(hexaddr string) (*MixedcaseAddress, error) {\n\tif !IsHexAddress(hexaddr) {\n\t\treturn nil, fmt.Errorf(\"Invalid address\")\n\t}\n\ta := FromHex(hexaddr)\n\treturn &MixedcaseAddress{addr: BytesToAddress(a), original: hexaddr}, nil\n}", "func BytesToAddress(bytes []byte) (Address, error) { return V1.BytesToAddress(bytes) }", "func unpackAddr(value nlgo.Binary, af Af) (net.IP, error) {\n\tbuf := ([]byte)(value)\n\tsize := 0\n\n\tswitch af {\n\tcase syscall.AF_INET:\n\t\tsize = 4\n\tcase syscall.AF_INET6:\n\t\tsize = 16\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"ipvs: unknown af=%d addr=%v\", af, buf)\n\t}\n\n\tif size > len(buf) {\n\t\treturn nil, fmt.Errorf(\"ipvs: short af=%d addr=%v\", af, buf)\n\t}\n\n\treturn (net.IP)(buf[:size]), nil\n}", "func Explode(address string) (*Address, error) {\n\tif client == nil {\n\t\terr := initClient()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Build the API request.\n\treq := &maps.GeocodingRequest{\n\t\tAddress: address,\n\t}\n\n\t// Execute the request.\n\tresp, err := client.Geocode(context.Background(), req)\n\tif len(resp) < 1 {\n\t\treturn nil, err\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Using the first/closest match in our response, grab the values we need.\n\tcomponents := resp[0].AddressComponents\n\tformattedAddress := resp[0].FormattedAddress\n\tlat := resp[0].Geometry.Location.Lat\n\tlng := resp[0].Geometry.Location.Lng\n\n\t// Construct the return *Address{}\n\tresponse := &Address{\n\t\tAddressLine1: compose(addressLine1Composition, \"\", components, false),\n\t\tAddressLine2: compose(addressLine2Composition, addressLineDelimeter, components, false),\n\t\tAddressCity: compose(addressCityComposition, addressLineDelimeter, components, false),\n\t\tAddressState: compose(addressStateComposition, addressLineDelimeter, components, false),\n\t\tAddressCountry: compose(addressCountryComposition, addressLineDelimeter, components, false),\n\t\tAddressCountryCode: compose(addressCountryCodeComposition, addressLineDelimeter, components, true),\n\t\tAddressZip: compose(addressPostalCodeComposition, addressLineDelimeter, components, false),\n\t\tAddressLat: &lat,\n\t\tAddressLng: &lng,\n\t\tFormattedAddress: &formattedAddress,\n\t}\n\n\treturn response, err\n}", "func (g *Getter) decodeA(buf []byte, res string) (int, error) {\n\t/* Parse as an IP address */\n\tip := net.ParseIP(res)\n\tif nil == ip {\n\t\treturn 0, fmt.Errorf(\"invalid IP address %q\", res)\n\t}\n\t/* Parse with the appropriate length */\n\tvar plen, start int\n\tswitch g.Type {\n\tcase TypeA:\n\t\tip = ip.To4()\n\t\tplen = 4\n\t\tstart = 1\n\tcase TypeAAAA:\n\t\tip = ip.To16()\n\t\tplen = 16\n\t\tstart = 8\n\t}\n\t/* If we didn't get an address of the right size, someone goofed */\n\tif nil == ip {\n\t\treturn 0, fmt.Errorf(\"unable to parse IP address %s\", res)\n\t}\n\t/* Make sure we have enough buffer */\n\tif plen > len(buf) {\n\t\treturn 0, fmt.Errorf(\n\t\t\t\"buffer too small for record of type %s\",\n\t\t\tg.Type,\n\t\t)\n\t}\n\t/* Extract the payload */\n\treturn copy(buf, ip[start:]), nil\n}", "func (a *Address) UnmarshalText(input []byte) error {\n\treturn hexutil.UnmarshalFixedText(\"Address\", input, a[:])\n}", "func (a *Address) UnmarshalText(input []byte) error {\n\treturn hexutil.UnmarshalFixedText(\"Address\", input, a[:])\n}", "func Bech32ToAddress(encodedAddr string) (Address, error) { return V1.Bech32ToAddress(encodedAddr) }", "func TestAddressString(t *testing.T) {\n\ttests := []struct {\n\t\taddr, want Address // If want==nil, want is set to addr.\n\t}{\n\t\t{addr: Dot},\n\t\t{addr: End},\n\t\t{addr: All},\n\t\t{addr: Rune(0)},\n\t\t{addr: Rune(100)},\n\t\t// Rune(-100) is the string -#100, when parsed, the implicit . is inserted: .-#100.\n\t\t{addr: Rune(-100), want: Dot.Minus(Rune(100))},\n\t\t{addr: Line(0)},\n\t\t{addr: Line(100)},\n\t\t// Line(-100) is the string -100, when parsed, the implicit . is inserted: .-100.\n\t\t{addr: Line(-100), want: Dot.Minus(Line(100))},\n\t\t{addr: Mark('a')},\n\t\t{addr: Mark('z')},\n\t\t{addr: Regexp(\"/☺☹\")},\n\t\t{addr: Regexp(\"/☺☹/\")},\n\t\t{addr: Regexp(\"?☺☹\")},\n\t\t{addr: Regexp(\"?☺☹?\")},\n\t\t{addr: Dot.Plus(Line(1))},\n\t\t{addr: Dot.Minus(Line(1))},\n\t\t{addr: Dot.Minus(Line(1)).Plus(Line(1))},\n\t\t{addr: Rune(1).To(Rune(2))},\n\t\t{addr: Rune(1).Then(Rune(2))},\n\t\t{addr: Regexp(\"/func\").Plus(Regexp(`/\\(`))},\n\t}\n\tfor _, test := range tests {\n\t\tif test.want == nil {\n\t\t\ttest.want = test.addr\n\t\t}\n\t\tstr := test.addr.String()\n\t\tgot, _, err := Addr([]rune(str))\n\t\tif err != nil || got != test.want {\n\t\t\tt.Errorf(\"Addr(%q)=%v,%v want %q,nil\", str, got.String(), err, test.want.String())\n\t\t}\n\t}\n}", "func (s *String) DecodeFromBytes(b []byte) error {\n\tif len(b) < 4 {\n\t\treturn errors.NewErrTooShortToDecode(s, \"should be longer than 4 bytes\")\n\t}\n\n\ts.Length = int32(binary.LittleEndian.Uint32(b[:4]))\n\tif s.Length <= 0 {\n\t\treturn nil\n\t}\n\ts.Value = b[4 : 4+s.Length]\n\treturn nil\n}", "func (a *Address) UnmarshalJSON(buf []byte) (err error) {\n\tstr := string(buf[1 : len(buf)-1])\n\t_, _, err = ParseAddrPort(str)\n\tif err == nil {\n\t\t*a = Address(str)\n\t}\n\treturn\n}", "func ParseAddress(addr string) (proto string, path string, err error) {\n\tm := netAddrRx.FindStringSubmatch(addr)\n\tif m == nil {\n\t\treturn \"\", \"\", goof.WithField(\"address\", addr, \"invalid address\")\n\t}\n\treturn m[1], m[2], nil\n}", "func BytesToAddress(b []byte) Address {\n\tvar a Address\n\tif len(b) > AddressLength {\n\t\tb = b[len(b)-AddressLength:]\n\t}\n\tcopy(a[AddressLength-len(b):], b)\n\treturn a\n}", "func ParseID(body string) (_ ID, err error) {\n\tif len(body) <= 1 {\n\t\terr = errors.New(\"address too short\")\n\t\treturn\n\t}\n\tif body[0] != 'b' {\n\t\terr = fmt.Errorf(\"invalid codec %x\", body[0])\n\t\treturn\n\t}\n\t// remove 'b' byte\n\tbody = body[1:]\n\ts, err := base32i.CheckDecodeString(body)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn DecodeID(s), nil\n}", "func (_BaseAccessWallet *BaseAccessWalletFilterer) ParseDbgAddress(log types.Log) (*BaseAccessWalletDbgAddress, error) {\n\tevent := new(BaseAccessWalletDbgAddress)\n\tif err := _BaseAccessWallet.contract.UnpackLog(event, \"dbgAddress\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}", "func DnsDecoder(urlStr string) (*string, *string, error) {\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\thostTmp := u.Host\n\tIP := Dns(u.Host)\n\tif IP != nil {\n\t\tu.Host = IP.String()\n\t\turlStr = u.String()\n\t\treturn &urlStr, &hostTmp, nil\n\t}\n\treturn nil, nil, fmt.Errorf(\"dnsDecoder fail\")\n}", "func (c *Client) DecodeEasypostAddressVerifications(resp *http.Response) (*EasypostAddressVerifications, error) {\n\tvar decoded EasypostAddressVerifications\n\terr := c.Decoder.Decode(&decoded, resp.Body, resp.Header.Get(\"Content-Type\"))\n\treturn &decoded, err\n}", "func ParseAddress(address string) (*Address, errors.TracerError) {\n\taddr := &Address{}\n\tif ValidateIPv6Address(address) {\n\t\tclean, testPort := cleanIPv6(address)\n\t\thasPort := false\n\t\tport := 0\n\t\tif testPort > 0 {\n\t\t\thasPort = true\n\t\t\tport = testPort\n\t\t}\n\t\treturn &Address{Host: clean, Port: port, IsIPv6: true, HasPort: hasPort}, nil\n\t}\n\tcolons := strings.Count(address, \":\")\n\tif colons > 1 {\n\t\treturn nil, errors.New(\"Invalid address: too many colons '%s'\", address)\n\t} else if colons == 0 {\n\t\treturn &Address{Host: address, HasPort: false}, nil\n\t}\n\tsplit := strings.Split(address, \":\")\n\taddr.Host = split[0]\n\tport, err := strconv.Atoi(split[1])\n\tif err != nil {\n\t\treturn nil, errors.New(\"address '%s' is invalid: could not parse port data, %s\", address, err)\n\t}\n\tif port <= 0 || port > math.MaxUint16 {\n\t\treturn nil, errors.New(\"port '%d' is not a valid port number, must be uint16\", port)\n\t}\n\taddr.Port = port\n\taddr.HasPort = true\n\treturn addr, nil\n}", "func ParseAddress(address []byte, options AddressParserOptions) *AddressParserResponse {\n\tcaddress, _ := (*C.char)(unsafe.Pointer((*sliceHeader)(unsafe.Pointer(&address)).Data)), cgoAllocsUnknown\n\tcoptions, _ := options.PassValue()\n\t__ret := C.parse_address(caddress, coptions)\n\t__v := NewAddressParserResponseRef(unsafe.Pointer(__ret))\n\treturn __v\n}", "func HexDecodeString(s string) ([]byte, error) {\n\ts = strings.TrimPrefix(s, \"0x\")\n\n\tif len(s)%2 != 0 {\n\t\ts = \"0\" + s\n\t}\n\n\tb, err := hex.DecodeString(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn b, nil\n}", "func BytesToAddress(b []byte) Address {\n\tvar a Address\n\ta.SetBytes(b)\n\treturn a\n}", "func StringToPeerInfo(addrStr string) (*peerstore.PeerInfo, error) {\n\taddr, err := iaddr.ParseString(addrStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpeerinfo, err := peerstore.InfoFromP2pAddr(addr.Multiaddr())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn peerinfo, nil\n}", "func parseEPRTtoAddr(line string) (string, string, error) {\n\taddr := strings.Split(line, \"|\")\n\n\tif len(addr) != 5 {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t}\n\n\tnetProtocol := addr[1]\n\tIP := addr[2]\n\n\t// check port is valid\n\tport := addr[3]\n\tif integerPort, err := strconv.Atoi(port); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t} else if integerPort <= 0 || integerPort > 65535 {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t}\n\n\tswitch netProtocol {\n\tcase \"1\", \"2\":\n\t\t// use protocol 1 means IPv4. 2 means IPv6\n\t\t// net.ParseIP for validate IP\n\t\tif net.ParseIP(IP) == nil {\n\t\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t\t}\n\t\tbreak\n\tdefault:\n\t\t// wrong network protocol\n\t\treturn \"\", \"\", fmt.Errorf(\"unknown network protocol\")\n\t}\n\n\treturn IP, port, nil\n}", "func TestAddress(t *testing.T) {\n addr, err := StringAddress(m_pub2)\n if err != nil {\n t.Errorf(\"%s should have been nil\",err.Error())\n }\n expected_addr := \"1AEg9dFEw29kMgaN4BNHALu7AzX5XUfzSU\"\n if addr != expected_addr {\n t.Errorf(\"\\n%s\\nshould be\\n%s\",addr,expected_addr)\n }\n}", "func NewAddressFromBech32(data string) (a Address) {\n\terr := a.UnmarshalText([]byte(data))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn\n}", "func hexDecodeStr(s string) ([]byte, error) {\n\tsrc := []byte(s)\n\tdst := make([]byte, len(src)/2)\n\tif len(s)%2 != 0 {\n\t\treturn dst, errors.New(\"Not a valid hex string\")\n\t}\n\n\tfor i := range dst {\n\t\tfirst, ok := unhex(src[i*2])\n\t\tif !ok {\n\t\t\treturn dst, errors.New(\"Invalid hex char\") // Would def be nicer to have the byte in the message but w/e\n\t\t}\n\t\tsecond, ok := unhex(src[i*2+1])\n\t\tif !ok {\n\t\t\treturn dst, errors.New(\"Invalid hex char\")\n\t\t}\n\n\t\tdst[i] = first<<4 | second // equivalent to first * 16 + second\n\t}\n\n\treturn dst, nil\n}", "func ParseCode(addr string) (string, error) {\n\tl := len(addr)\n\tif l < 50 {\n\t\treturn \"\", InvalidAccountAddrError{reason: \"length\"}\n\t}\n\ti := l - 50 // start index of hex\n\treturn strings.ToUpper(addr[0:i]), nil\n}", "func (a *Address) UnmarshalJSON(input []byte) error {\n\treturn hexutil.UnmarshalFixedJSON(addressT, input, a[:])\n}", "func (a *Address) UnmarshalJSON(input []byte) error {\n\treturn hexutil.UnmarshalFixedJSON(addressT, input, a[:])\n}", "func decode(field reflect.Value, value string) error {\n\tif !canDecode(field) {\n\t\treturn errors.New(\"value cannot decode itself\")\n\t}\n\n\td, ok := field.Interface().(Decoder)\n\tif !ok && field.CanAddr() {\n\t\td, ok = field.Addr().Interface().(Decoder)\n\t}\n\n\tif ok {\n\t\treturn d.Decode(value)\n\t}\n\n\tt, ok := field.Interface().(encoding.TextUnmarshaler)\n\tif !ok && field.CanAddr() {\n\t\tt, ok = field.Addr().Interface().(encoding.TextUnmarshaler)\n\t}\n\n\tif ok {\n\t\treturn t.UnmarshalText([]byte(value))\n\t}\n\n\treturn errors.New(\"failed to find a decoding type\")\n}", "func DecodeAddressToPublicKey(in []byte) (*keys.StealthAddress, error) {\n\tpk := keys.NewStealthAddress()\n\tvar buf = &bytes.Buffer{}\n\t_, err := buf.Write(in)\n\tif err != nil {\n\t\treturn pk, err\n\t}\n\n\tpk.RG = new(common.JubJubCompressed)\n\tpk.PkR = new(common.JubJubCompressed)\n\tpk.RG.Data = make([]byte, 32)\n\tpk.PkR.Data = make([]byte, 32)\n\n\tif _, err = buf.Read(pk.RG.Data); err != nil {\n\t\treturn pk, err\n\t}\n\n\tif _, err = buf.Read(pk.PkR.Data); err != nil {\n\t\treturn pk, err\n\t}\n\n\treturn pk, nil\n}", "func ParseAddr(s string) (Addr, error) {\n\tcomma := strings.IndexByte(s, ',')\n\tif comma < 0 {\n\t\treturn Addr{}, serrors.New(\"invalid address: expected comma\", \"value\", s)\n\t}\n\tia, err := ParseIA(s[0:comma])\n\tif err != nil {\n\t\treturn Addr{}, err\n\t}\n\th, err := ParseHost(s[comma+1:])\n\tif err != nil {\n\t\treturn Addr{}, err\n\t}\n\treturn Addr{IA: ia, Host: h}, nil\n}", "func HexToAddress(h string) Address {\n\ttrimmed := strings.TrimPrefix(h, \"0x\")\n\tif len(trimmed)%2 == 1 {\n\t\ttrimmed = \"0\" + trimmed\n\t}\n\tb, _ := hex.DecodeString(trimmed)\n\treturn BytesToAddress(b)\n}", "func parseAddress(mailAddress string) (address *mail.Address, err error) {\n\tstrimmed := strings.TrimSpace(mailAddress)\n\n\tif address, err = mail.ParseAddress(strimmed); err == nil {\n\t\treturn address, nil\n\t}\n\n\tlog.Printf(\"[mail] parseAddress: %s\\n\", err)\n\treturn nil, err\n}", "func Fixed8DecodeString(s string) (Fixed8, error) {\n\tparts := strings.SplitN(s, \".\", 2)\n\tip, err := strconv.Atoi(parts[0])\n\tif err != nil {\n\t\treturn 0, errInvalidString\n\t} else if len(parts) == 1 {\n\t\treturn NewFixed8(ip), nil\n\t}\n\n\tfp, err := strconv.Atoi(parts[1])\n\tif err != nil || fp >= decimals {\n\t\treturn 0, errInvalidString\n\t}\n\tfor i := len(parts[1]); i < precision; i++ {\n\t\tfp *= 10\n\t}\n\treturn Fixed8(ip*decimals + fp), nil\n}", "func ParseAddress(address string) (string, string) {\n\tsplit := strings.Split(address, \":\")\n\tip := split[0]\n\tport := split[1]\n\n\treturn ip, port\n}", "func decode(id *ID, src []byte) {\n\tencoder.Decode(id[:], src)\n}", "func UserAddressFromAddress(a Address) *UserAddress {\n\tvar streetNum, streetName string\n\tparts := strings.Split(a.LineOne(), \" \")\n\n\tif _, err := strconv.Atoi(parts[0]); err == nil {\n\t\tstreetNum = parts[0]\n\t}\n\tstreetName = strings.Join(parts[1:], \" \")\n\n\tif addr, ok := a.(*UserAddress); ok {\n\t\tif len(addr.StreetNumber) == 0 {\n\t\t\taddr.StreetNumber = streetNum\n\t\t}\n\t\tif len(addr.StreetName) == 0 {\n\t\t\taddr.StreetName = streetName\n\t\t}\n\t\treturn addr\n\t}\n\n\treturn &UserAddress{\n\t\tStreet: a.LineOne(),\n\t\tStreetNumber: streetNum,\n\t\tStreetName: streetName,\n\t\tCityName: a.City(),\n\t\tPostalCode: a.Zip(),\n\t\tRegion: a.StateCode(),\n\t}\n}", "func DecodeBase58BitcoinAddress(addr string) (BitcoinAddress, error) {\n\tb, err := base58.Decode(addr)\n\tif err != nil {\n\t\treturn BitcoinAddress{}, err\n\t}\n\treturn BitcoinAddressFromBytes(b)\n}", "func ParseAddress(address string) (string, int) {\n\tmatch, err := gregex.MatchString(`^(.+):(\\d+)$`, address)\n\tif err == nil {\n\t\ti, _ := strconv.Atoi(match[2])\n\t\treturn match[1], i\n\t}\n\treturn \"\", 0\n}", "func Decode(b string) ([]byte, error) {\n\treturn DecodeAlphabet(b, BTCAlphabet)\n}", "func HashStringToAddress(str string) address.Address {\n\th := hash.Hash160b([]byte(str))\n\taddr, err := address.FromBytes(h[:])\n\tif err != nil {\n\t\tlog.L().Panic(\"Error when constructing the address of account protocol\", zap.Error(err))\n\t}\n\treturn addr\n}", "func BytesToAddress(b []byte) (Address, error) {\n\tif len(b) > AddressLength {\n\t\treturn Address{}, AddressOverflowError\n\t}\n\tvar a Address\n\ta.SetBytes(b)\n\treturn a, nil\n}", "func DecodeFromString(publicKey []byte, tokenString string, v interface{}) error {\n\tvar token Token\n\terr := json.Unmarshal([]byte(tokenString), &token)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn Decode(publicKey, token, v)\n}", "func IDHexDecode(s string) (core.ID, error) {\n\treturn core.IDHexDecode(s)\n}", "func HexToAddress(h string) (Address, error) {\n\ttrimmed := strings.TrimPrefix(h, \"0x\")\n\tif len(trimmed)%2 == 1 {\n\t\ttrimmed = \"0\" + trimmed\n\t}\n\tb, err := hex.DecodeString(trimmed)\n\tif err != nil {\n\t\treturn Address{}, InvalidHexAddressError\n\t}\n\treturn BytesToAddress(b)\n}", "func StrToAddr(x string) (uint32, error) {\n\tparts := strings.Split(x, \".\")\n\tif len(parts) != 4 {\n\t\treturn 0, fmt.Errorf(\"Invalid format\")\n\t}\n\n\tret := uint32(0)\n\tfor i := 0; i < 4; i++ {\n\t\ty, err := strconv.Atoi(parts[i])\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"Unable to convert %q to int: %v\", parts[i], err)\n\t\t}\n\n\t\tif y > 255 {\n\t\t\treturn 0, fmt.Errorf(\"%d is too big for a uint8\", y)\n\t\t}\n\n\t\tret += uint32(math.Pow(256, float64(3-i))) * uint32(y)\n\t}\n\n\treturn ret, nil\n}", "func GetTestAddr(addr string, bech string) sdk.AccAddress {\n\n\tres, err := sdk.AccAddressFromHex(addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tbechexpected := res.String()\n\tif bech != bechexpected {\n\t\tpanic(\"Bech encoding doesn't match reference\")\n\t}\n\n\tbechres, err := sdk.AccAddressFromBech32(bech)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !bytes.Equal(bechres, res) {\n\t\tpanic(\"Bech decode and hex decode don't match\")\n\t}\n\n\treturn res\n}", "func toStateAddress(prefix string, b []byte) string {\n\t// Make sure the address is padded correctly\n\tb = word256.RightPadBytes(b, 32)\n\n\treturn prefix + client.MustEncode(b)\n}" ]
[ "0.762846", "0.72298115", "0.72216547", "0.70370436", "0.7010531", "0.6963402", "0.69541156", "0.6823345", "0.68193984", "0.6794519", "0.67006904", "0.66820186", "0.6623264", "0.65907305", "0.65661985", "0.65495574", "0.648334", "0.6466039", "0.62854505", "0.6275847", "0.62742966", "0.62046146", "0.61446965", "0.6076108", "0.6075837", "0.599704", "0.5976486", "0.5976486", "0.5973362", "0.59251815", "0.5891787", "0.5886327", "0.58718926", "0.58702797", "0.57979697", "0.57799256", "0.57699615", "0.57505107", "0.57445985", "0.57423836", "0.57397014", "0.5726885", "0.5687597", "0.56556207", "0.562566", "0.55984384", "0.554593", "0.554385", "0.55356693", "0.55168426", "0.5501442", "0.54858553", "0.54709554", "0.5458618", "0.5430786", "0.54282814", "0.54172283", "0.54172283", "0.5407837", "0.5404417", "0.53878266", "0.53872204", "0.5362103", "0.5330295", "0.53186893", "0.5304509", "0.53025085", "0.52992696", "0.5285117", "0.5280775", "0.5262631", "0.5254506", "0.5251611", "0.524588", "0.52348226", "0.52179223", "0.5212351", "0.5209752", "0.52030134", "0.52030134", "0.52024984", "0.52016443", "0.5200561", "0.51984197", "0.5196811", "0.5196054", "0.5194987", "0.5191365", "0.5189248", "0.51742613", "0.51686865", "0.5166885", "0.51664615", "0.51497006", "0.5147992", "0.514426", "0.51427096", "0.5140523", "0.51320046", "0.5130498" ]
0.74612874
1
GetHTTPClient returns http client for the purpose of test
func (c Client) GetHTTPClient() *http.Client { return &c.httpClient }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetHTTPClient() *http.Client {\r\n tlsConfig := &tls.Config {\r\n InsecureSkipVerify: true, //for this test, ignore ssl certificate\r\n }\r\n\r\n tr := &http.Transport{TLSClientConfig: tlsConfig}\r\n client := &http.Client{Transport: tr}\r\n\r\n return client\r\n}", "func GetHTTPClient() *http.Client {\n tlsConfig := &tls.Config {\n InsecureSkipVerify: true, //for this test, ignore ssl certificate\n }\n\n tr := &http.Transport{TLSClientConfig: tlsConfig}\n client := &http.Client{Transport: tr}\n\n return client\n}", "func GetHTTPClient() *http.Client { return httpClientPool.Get().(*http.Client) }", "func GetHTTPClient() *http.Client {\n\tonce.Do(func() {\n\t\thttpClient = http.DefaultClient\n\t})\n\n\treturn httpClient\n}", "func GetHTTPClient() *http.Client {\n\treturn httpClient\n}", "func (c *Client) GetHTTPClient() *http.Client {\n\tif c.hc == nil {\n\t\tc.hc = &http.Client{\n\t\t\tTimeout: time.Second * 5,\n\t\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\t\tc.SetToken(req)\n\t\t\t\treturn nil\n\t\t\t},\n\t\t}\n\t}\n\treturn c.hc\n}", "func (a *UserPassAuthStrategy) GetHTTPClient() *http.Client {\n\tif a.hc == nil {\n\t\ta.hc = &http.Client{Timeout: time.Second * 5}\n\t}\n\treturn a.hc\n}", "func (am *authManager) GetHTTPClient(client kubernetes.Interface) *http.Client {\n\tclient, _ = am.getK8sClient(client)\n\tauthSettings := am.settingsManager.GetAuthSettings(client)\n\tif authSettings == nil || !authSettings.Enabled || (authSettings.RootCAFile == \"\" && authSettings.RootCA == \"\") {\n\t\tam.HttpClient = http.DefaultClient\n\t\tam.HttpClient.Timeout = time.Second * 30\n\t} else if authSettings.RootCAFile != am.CAPath || authSettings.RootCA != am.CACert {\n\t\tvar err error\n\t\tam.HttpClient, err = authApi.GetClient(authSettings.RootCAFile, authSettings.RootCA)\n\t\tif err != nil {\n\t\t\tam.HttpClient = http.DefaultClient\n\t\t\tlog.Println(\"Error getting HTTP client:\", err)\n\t\t} else {\n\t\t\tam.CAPath = authSettings.RootCAFile\n\t\t\tam.CACert = authSettings.RootCA\n\t\t}\n\t}\n\treturn am.HttpClient\n}", "func (adam *Ctx) getHTTPClient() *http.Client {\n\ttlsConfig := &tls.Config{}\n\tif adam.serverCA != \"\" {\n\t\tcaCert, err := ioutil.ReadFile(adam.serverCA)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"unable to read server CA file at %s: %v\", adam.serverCA, err)\n\t\t}\n\t\tcaCertPool := x509.NewCertPool()\n\t\tcaCertPool.AppendCertsFromPEM(caCert)\n\t\ttlsConfig.RootCAs = caCertPool\n\t}\n\tif adam.insecureTLS {\n\t\ttlsConfig.InsecureSkipVerify = true\n\t}\n\tvar client = &http.Client{\n\t\tTimeout: time.Second * 10,\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\t\tResponseHeaderTimeout: 10 * time.Second,\n\t\t},\n\t}\n\treturn client\n}", "func GetHTTPClient(req *client.Request) (httpClient *http.Client, err error) {\n\thttpClient = http.DefaultClient\n\n\t// Pega o proxy disponivel\n\tproxy, err := req.Proxy.GetProxy()\n\n\t// Verifica se há um proxy disponível\n\tif err == nil {\n\t\t// Cria a conexão com proxy\n\t\thttpClient = &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tProxy: http.ProxyURL(proxy),\n\t\t\t},\n\t\t}\n\t}\n\n\treturn httpClient, nil\n}", "func getHTTPClient(options RegistryOptions) *http.Client {\n\n\toverriddenTimeout := httpRequestResponseTimeout\n\ttimeout := options.HTTPTimeout\n\t//if value is invalid or unspecified, the default will be used\n\tif timeout != nil && *timeout > 0 {\n\t\t//convert timeout to seconds\n\t\toverriddenTimeout = time.Duration(*timeout) * time.Second\n\t}\n\n\treturn &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tResponseHeaderTimeout: overriddenTimeout,\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: options.SkipTLSVerify},\n\t\t},\n\t\tTimeout: overriddenTimeout,\n\t}\n}", "func (c *Client) httpClient() *http.Client { return http.DefaultClient }", "func GetHttpClient(sdkConfig *SdkConfig) *http.Client {\n\n\tif sdkConfig.SkipVerify {\n\t\tlog.Println(\"[WARNING] Using SkipVerify for ignoring SSL certificate issues!\")\n\t\treturn &http.Client{Transport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // ignore expired SSL certificates\n\t\t}}\n\t}\n\treturn http.DefaultClient\n}", "func (r *ProtocolIncus) GetHTTPClient() (*http.Client, error) {\n\tif r.http == nil {\n\t\treturn nil, fmt.Errorf(\"HTTP client isn't set, bad connection\")\n\t}\n\n\treturn r.http, nil\n}", "func GetHTTPClient(conf map[string]string) *HTTPClient {\n\tClient := new(HTTPClient)\n\tif username, err := conf[\"username\"]; err {\n\t\tClient.user = username\n\t}\n\tif password, err := conf[\"password\"]; err {\n\t\tClient.password = password\n\t}\n\treturn Client\n}", "func (c *Client) HTTPClient() HTTPInterface {\n\treturn c.httpClient\n}", "func (service *BaseService) GetHTTPClient() *http.Client {\n\tif isRetryableClient(service.Client) {\n\t\ttr := service.Client.Transport.(*retryablehttp.RoundTripper)\n\t\treturn tr.Client.HTTPClient\n\t}\n\treturn service.Client\n}", "func NewHTTPClient() *http.Client {\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: true, //nolint:gosec // Needs to be enabled in suites. Not used in production.\n\t\t},\n\t}\n\n\treturn &http.Client{\n\t\tTransport: tr,\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n}", "func (c *Client) HTTPClient() HTTPInterface {\n\treturn c.options.httpClient\n}", "func GetHTTPClient() *http.Client {\n\t// Lazy initialize the HTTP client\n\tif httpClient == nil {\n\t\t// Initialize the default transport\n\t\tinitDefaultTransport()\n\n\t\toptions := &cookiejar.Options{\n\t\t\tPublicSuffixList: publicsuffix.List,\n\t\t}\n\n\t\tjar, err := cookiejar.New(options)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error creating new cookie jar: %s\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\thttpClient = &http.Client{\n\t\t\tJar: jar,\n\t\t}\n\t}\n\n\treturn httpClient\n}", "func getHttpClient(ctx context.Context) *http.Client {\n\tif config.Server.HostedInGAE {\n\t\t// The default HttpClient on Google App Engine appears to have a 5 second timeout,\n\t\t// this creates a client with a longer timeout.\n\t\tcctx, _ := context.WithDeadline(ctx, time.Now().Add(30*time.Second))\n\t\treturn urlfetch.Client(cctx)\n\t} else {\n\t\t// Default client is good enough\n\t\treturn http.DefaultClient\n\t}\n}", "func GetClient(timeout int) *http.Client {\n\tclient := http.Client{\n\t\tTimeout: time.Duration(timeout) * time.Second,\n\t}\n\treturn &client\n}", "func (a *API) getClient(ctx context.Context) *http.Client {\n\tif a.Client != nil {\n\t\treturn a.Client\n\t}\n\tif httpClient != nil {\n\t\treturn httpClient\n\t}\n\treturn &http.Client{Timeout: a.getTimeout()}\n}", "func (cc *Client) HTTPClient() *http.Client {\n\treturn cc.httpClient\n}", "func GetHTTPClient(timeout int) *http.Client {\n\n\t// This env var should only be used in our test environments or in an emergency when there is a problem with the SSL certificate of a horizon service.\n\tskipSSL := false\n\tif os.Getenv(\"HZN_SSL_SKIP_VERIFY\") != \"\" {\n\t\tskipSSL = true\n\t}\n\n\t// Set request timeout based on environment variables and input values. The environment variable always overrides the\n\t// input parameter. The other timeouts are subject to the timeout setting also.\n\trequestTimeout := timeout\n\n\tif envTimeout := os.Getenv(config.HTTPRequestTimeoutOverride); envTimeout != \"\" {\n\t\tif t, err := strconv.Atoi(envTimeout); err == nil {\n\t\t\trequestTimeout = t\n\t\t} else {\n\t\t\tWarning(i18n.GetMessagePrinter().Sprintf(\"Unable to use %v to set the request timeout, the value is not a valid number: %v\", config.HTTPRequestTimeoutOverride, envTimeout))\n\t\t}\n\t}\n\n\tresponseTimeout := int(float64(requestTimeout) * 0.8)\n\tdialTimeout := int(float64(requestTimeout) * 0.5)\n\tkeepAlive := requestTimeout * 2\n\tTLSHandshake := dialTimeout\n\texpectContinue := int(float64(requestTimeout) * 0.5)\n\n\tVerbose(i18n.GetMessagePrinter().Sprintf(\"HTTP request timeout set to %v seconds\", requestTimeout))\n\n\treturn &http.Client{\n\t\t// remember that this timeout is for the whole request, including\n\t\t// body reading. This means that you must set the timeout according\n\t\t// to the total payload size you expect\n\t\tTimeout: time.Second * time.Duration(requestTimeout),\n\t\tTransport: &http.Transport{\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: time.Duration(dialTimeout) * time.Second,\n\t\t\t\tKeepAlive: time.Duration(keepAlive) * time.Second,\n\t\t\t}).Dial,\n\t\t\tTLSHandshakeTimeout: time.Duration(TLSHandshake) * time.Second,\n\t\t\tResponseHeaderTimeout: time.Duration(responseTimeout) * time.Second,\n\t\t\tExpectContinueTimeout: time.Duration(expectContinue) * time.Second,\n\t\t\tMaxIdleConns: config.MaxHTTPIdleConnections,\n\t\t\tIdleConnTimeout: config.HTTPIdleConnectionTimeout * time.Millisecond, // ms since we don't want cli to hold onto connections for very long\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: skipSSL,\n\t\t\t},\n\t\t},\n\t}\n\n}", "func NewHTTPClient() *http.Client {\n\n\ttr := &http.Transport{\n\t\t//TLSClientConfig: &tls.Config{\n\t\t//\tInsecureSkipVerify: conf.InsecureSkipVerify,\n\t\t//},\n\t\tMaxIdleConnsPerHost: DefaultMaxIdleConnsPerHost,\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: DefaultTimeout,\n\t\t\tKeepAlive: DefaultKeepAlive,\n\t\t}).Dial,\n\t\tTLSHandshakeTimeout: DefaultTimeout,\n\t}\n\n\treturn &http.Client{\n\t\tTimeout: DefaultTimeout,\n\t\tTransport: tr,\n\t}\n}", "func (lm LinksManager) HTTPClient() *http.Client {\n\treturn lm.Client\n}", "func HTTPClient() *http.Client {\n\tif httpClient == nil {\n\t\ttransport := &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t}\n\n\t\thttpClient = &http.Client{Transport: transport}\n\t}\n\treturn httpClient\n}", "func (c *Client) HTTPClient() *http.Client {\n\treturn c.http\n}", "func (m *ClientModule) HTTPClient() client.HTTPClient {\n\treturn client.NewHTTPClient(m.Retries)\n}", "func (backend *Backend) HTTPClient() *http.Client {\n\treturn backend.httpClient\n}", "func NewHTTPClient(slog slog.Logger, filer sio.Filer) (clt Client, err error) {\n\thttpClt := &HTTPClient{logger: slog}\n\thttpClt.client = httpClt\n\thttpClt.filer = filer\n\treturn httpClt.client, nil\n}", "func testHTTPClient() *http.Client {\n\treturn &http.Client{\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n}", "func NewHTTPClient() *HTTPClient {\n\treturn &HTTPClient{\n\t\tClient: http.DefaultClient,\n\t\tCacheDir: viper.GetString(\"http_cache_dir\"),\n\t}\n}", "func getHttpClient(f *File, timeout int) *http.Client {\n\tproxyUrl := http.ProxyFromEnvironment\n\tif f.Proxy != \"\" {\n\t\tproxy, _ := url.Parse(f.Proxy)\n\t\tproxyUrl = http.ProxyURL(proxy)\n\t}\n\ttr := &http.Transport{\n\t\tProxy: proxyUrl,\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: f.SkipVerify},\n\t}\n\tclient := &http.Client{\n\t\tTransport: tr,\n\t\tTimeout: time.Duration(timeout) * time.Second,\n\t}\n\treturn client\n}", "func getClient() *http.Client {\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\treturn &http.Client{Transport: tr}\n}", "func DefaultGetHTTPClient() *http.Client {\n\tif InsecureTLS {\n\t\treturn &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}\n\t}\n\treturn &http.Client{}\n}", "func testingHTTPClient(handler http.Handler) (*http.Client, func()) {\n\tserver := httptest.NewServer(handler)\n\n\tclient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDialContext: func(_ context.Context, network, _ string) (net.Conn, error) {\n\t\t\t\treturn net.Dial(network, server.Listener.Addr().String())\n\t\t\t},\n\t\t},\n\t}\n\n\treturn client, server.Close\n}", "func (ctl *taskController) GetClient(timeout time.Duration) (*http.Client, error) {\n\t// TODO(vadimsh): Use per-project service accounts, not a global cron service\n\t// account.\n\tctx, _ := clock.WithTimeout(ctl.ctx, timeout)\n\ttransport, err := client.Transport(ctx, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &http.Client{Transport: transport}, nil\n}", "func (a *API) GetClient() *http.Client {\n\treturn &http.Client{}\n}", "func createHTTPClient() *http.Client {\n\tclient := &http.Client{}\n\tif insecure {\n\t\thttp.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\t}\n\treturn client\n}", "func newHTTPClient() *http.Client {\n\tclient := &http.Client{\n\t\tTimeout: defaultTimeout,\n\t}\n\treturn client\n}", "func testingHTTPClient(handler http.Handler) (*http.Client, func()) {\n\ts := httptest.NewServer(handler)\n\n\tcli := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDialContext: func(_ context.Context, network, _ string) (net.Conn, error) {\n\t\t\t\treturn net.Dial(network, s.Listener.Addr().String())\n\t\t\t},\n\t\t},\n\t}\n\n\treturn cli, s.Close\n}", "func UseHTTPClient(client *http.Client) {\n\thttpClient = client\n}", "func NewHTTPClient(timeout time.Duration) *http.Client {\n\treturn &http.Client{\n\t\tTimeout: timeout,\n\t}\n}", "func HTTPClient() *http.Client {\n\treturn &http.Client{Timeout: 5 * time.Second}\n}", "func (c *Client) getClient() *http.Client {\n\t// Setup transport settings\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\tDisableCompression: true,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: 5 * time.Second,\n\t\t}).Dial,\n\t\tTLSHandshakeTimeout: 5 * time.Second,\n\t}\n\n\t// Create a client\n\tclient := &http.Client{\n\t\tTimeout: time.Second * 10, // 10 second timeout\n\t\tTransport: tr,\n\t}\n\n\treturn client\n}", "func newHTTPClient() *http.Client {\n\treturn &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: timeout,\n\t\t\t\tKeepAlive: 30 * time.Second,\n\t\t\t\tDualStack: true,\n\t\t\t}).DialContext,\n\n\t\t\tTLSHandshakeTimeout: timeout,\n\t\t\tResponseHeaderTimeout: timeout,\n\t\t\tExpectContinueTimeout: 1 * time.Second,\n\t\t\tMaxIdleConns: 5,\n\t\t\tIdleConnTimeout: 90 * time.Second,\n\t\t},\n\t}\n}", "func NewHTTPClient() *HTTPClient {\n\treturn &HTTPClient{\n\t\tfasthttpClient: fasthttp.Client{},\n\t}\n}", "func createHTTPClient() *http.Client {\n\tclient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConnsPerHost: MaxIdleConnections,\n\t\t},\n\t\tTimeout: time.Duration(RequestTimeout) * time.Second,\n\t}\n\n\treturn client\n}", "func NewHTTPClient(uri string) HTTPClient {\n\treturn HTTPClient{\n\t\tBackendURI: uri,\n\t\tclient: &http.Client{},\n\t}\n}", "func NewHTTPClient() (client.Client, error) {\n\taddr := Settings.Config.URL.String()\n\tc, err := client.NewHTTPClient(client.HTTPConfig{\n\t\tAddr: addr,\n\t\tUsername: Settings.Config.Username,\n\t\tPassword: Settings.Config.Password,\n\t\tTimeout: Settings.Config.Timeout,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Debugf(\"action=NewHTTPClient addr=%s username=%s\", addr, Settings.Config.Username)\n\treturn c, nil\n}", "func (o *HandleGetAboutUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func createHTTPClient() *http.Client {\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\tMaxIdleConnsPerHost: 1,\n\t\tDisableKeepAlives: true,\n\t}\n\n\treturn &http.Client{\n\t\tTransport: tr,\n\t\tTimeout: time.Second * 60,\n\t}\n}", "func (agent *Agent) HttpClient() *http.Client {\n\treturn agent.httpCli\n}", "func createHTTPClient() *http.Client {\n\tclient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConnsPerHost: 15,\n\t\t},\n\t\tTimeout: time.Duration(10) * time.Second,\n\t}\n\n\treturn client\n}", "func (o *GetContentSourceUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func getHTTPClientSingleton() (*http.Client, error) {\n\tcerts, err := rootcerts.LoadSystemCAs()\n\tif err != nil {\n\t\tLogger.Errorf(\"Could not load System root CA files. Reason: %v\", err)\n\t\treturn nil, fmt.Errorf(\"Could not load System root CA files. Reason: %v\", err)\n\t}\n\n\tconfig := &tls.Config{\n\t\tRootCAs: certs,\n\t}\n\n\ttr := &http.Transport{TLSClientConfig: config}\n\thttpClient := &http.Client{\n\t\tTransport: tr,\n\t\tTimeout: time.Second * ClientHttpTimeoutInSeconds,\n\t}\n\n\treturn httpClient, nil\n}", "func (c *Config) CreateHTTPClient() *http.Client {\n\tif c.HTTPTransport != nil {\n\t\treturn &http.Client{\n\t\t\tTransport: c.HTTPTransport,\n\t\t}\n\t}\n\treturn http.DefaultClient\n}", "func NewHTTPClient(proxyNetwork, proxyAddress string, serviceNetwork, service string) http.Client {\n\tproxyClient := Client{proxyNetwork: proxyNetwork, proxyAddress: proxyAddress, serviceNetwork: serviceNetwork, service: service}\n\ttrans := &http.Transport{\n\t\tDial: proxyClient.proxyDial,\n\t\tDisableKeepAlives: false,\n\t}\n\treturn http.Client{Transport: trans}\n}", "func CreateHTTPClient(handler http.Handler) (*http.Client, func()) {\n\ts := httptest.NewServer(handler)\n\n\tcli := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDialContext: func(_ context.Context, network, _ string) (net.Conn, error) {\n\t\t\t\treturn net.Dial(network, s.Listener.Addr().String())\n\t\t\t},\n\t\t},\n\t}\n\n\treturn cli, s.Close\n}", "func MakeHTTPClient(request *http.Request) (*http.Client, error) {\n\tcertFile := viper.GetString(\"AUTH.client_cert\")\n\tkeyFile := viper.GetString(\"AUTH.client_key\")\n\tif certFile != \"\" && keyFile != \"\" {\n\t\tcert, err := tls.LoadX509KeyPair(certFile, keyFile)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Failed to load client key and certificate\")\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttlsConfig := &tls.Config{\n\t\t\tCertificates: []tls.Certificate{cert},\n\t\t}\n\n\t\tcaFile := viper.GetString(\"AUTH.root_ca\")\n\t\tif caFile != \"\" {\n\t\t\tcaCerts := x509.NewCertPool()\n\n\t\t\tcaData, err := ioutil.ReadFile(caFile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Failed to load root CA\")\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcaCerts.AppendCertsFromPEM(caData)\n\t\t\ttlsConfig.RootCAs = caCerts\n\t\t}\n\n\t\ttlsConfig.BuildNameToCertificate()\n\t\ttransport := &http.Transport{TLSClientConfig: tlsConfig, Proxy: http.ProxyFromEnvironment}\n\t\treturn &http.Client{Transport: transport}, nil\n\t}\n\n\t//dev only\n\tif rhIdentity := viper.GetString(\"AUTH.x_rh_identity\"); rhIdentity != \"\" {\n\t\trequest.Header.Set(\"x-rh-identity\", rhIdentity)\n\t}\n\tuser := viper.GetString(\"AUTH.user\")\n\tpassword := viper.GetString(\"AUTH.password\")\n\tif user != \"\" && password != \"\" {\n\t\trequest.SetBasicAuth(user, password)\n\t}\n\treturn &http.Client{}, nil\n}", "func (ic *Client) HTTPClient() (*http.Client, error) {\n\tctx := context.Background()\n\tconf := ic.OAuth2Config()\n\n\ttok := conf.TokenSource(ctx, &oauth2.Token{\n\t\tAccessToken: ic.AccessToken,\n\t\tRefreshToken: ic.RefreshToken,\n\t})\n\n\ttt, err := tok.Token()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn conf.Client(ctx, tt), nil\n}", "func (o *GetProductUpgradeURLUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (o *EmailTemplateGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (c *HTTPClientMock) Client() *http.Client {\n\treturn c.client\n}", "func (rpc *RpcClient) newHTTPClient() (*http.Client, error) {\n\t// Configure proxy if needed.\n\tvar dial func(network, addr string) (net.Conn, error)\n\n\t// Configure TLS if needed.\n\tvar tlsConfig *tls.Config\n\n\t// Create and return the new HTTP client potentially configured with a\n\t// proxy and TLS.\n\tclient := http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: dial,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t\tKeepAlive: 5 * time.Second,\n\t\t\t\tDualStack: true,\n\t\t\t}).DialContext,\n\t\t},\n\t}\n\treturn &client, nil\n}", "func (rpc *RpcClient) newHTTPClient() (*http.Client, error) {\n\t// Configure proxy if needed.\n\tvar dial func(network, addr string) (net.Conn, error)\n\tif rpc.Cfg.OptionConfig.Proxy != \"\" {\n\t\tproxy := &socks.Proxy{\n\t\t\tAddr: rpc.Cfg.OptionConfig.Proxy,\n\t\t\tUsername: rpc.Cfg.OptionConfig.ProxyUser,\n\t\t\tPassword: rpc.Cfg.OptionConfig.ProxyPass,\n\t\t}\n\t\tdial = func(network, addr string) (net.Conn, error) {\n\t\t\tc, err := proxy.Dial(network, addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, nil\n\t\t}\n\t}\n\n\t// Configure TLS if needed.\n\tvar tlsConfig *tls.Config\n\tif !rpc.Cfg.SoloConfig.NoTLS && rpc.Cfg.SoloConfig.RPCCert != \"\" {\n\t\tpem, err := ioutil.ReadFile(rpc.Cfg.SoloConfig.RPCCert)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpool := x509.NewCertPool()\n\t\tpool.AppendCertsFromPEM(pem)\n\t\ttlsConfig = &tls.Config{\n\t\t\tRootCAs: pool,\n\t\t\tInsecureSkipVerify: rpc.Cfg.SoloConfig.NoTLS,\n\t\t}\n\t} else {\n\t\ttlsConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: rpc.Cfg.SoloConfig.NoTLS,\n\t\t}\n\t}\n\n\t// Create and return the new HTTP client potentially configured with a\n\t// proxy and TLS.\n\tclient := http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: dial,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: time.Duration(rpc.Cfg.OptionConfig.Timeout) * time.Second,\n\t\t\t\tKeepAlive: time.Duration(rpc.Cfg.OptionConfig.Timeout) * time.Second,\n\t\t\t\tDualStack: true,\n\t\t\t}).DialContext,\n\t\t},\n\t}\n\treturn &client, nil\n}", "func NewHTTPClient(skipVerify bool, certPath string) (*http.Client, error) {\n\ttlsConfig := &tls.Config{\n\t\tInsecureSkipVerify: skipVerify,\n\t}\n\n\tif !skipVerify && certPath != \"\" {\n\t\tcert, err := os.ReadFile(certPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcertPool, err := x509.SystemCertPool()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"WARN: unable to get system cert pool: %v\\n\", err)\n\t\t\tcertPool = x509.NewCertPool()\n\t\t}\n\t\tcertPool.AppendCertsFromPEM(cert)\n\t\ttlsConfig.RootCAs = certPool\n\t}\n\n\treturn &http.Client{\n\t\tTimeout: 2 * time.Minute,\n\t\tTransport: &http.Transport{\n\t\t\tIdleConnTimeout: 2 * time.Minute,\n\t\t\tResponseHeaderTimeout: 2 * time.Minute,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t}}, nil\n}", "func testHttpClient() *http.Client {\n\treturn &http.Client{\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n}", "func newHTTPClient(cfg *OutboundCommConfig) (*http.Client, error) {\n\tvar err error\n\tvar caCertPool tlsCertPool.CertPool\n\tif cfg.CACertsPaths != \"\" {\n\t\tcaCertPool, err = tlsCertPool.NewCertPool(false)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Failed to create new Cert Pool\")\n\t\t}\n\n\t\tcaCertsPaths := strings.Split(cfg.CACertsPaths, \",\")\n\t\tvar caCerts []string\n\t\tfor _, path := range caCertsPaths {\n\t\t\tif path == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Create a pool with server certificates\n\t\t\tcaCert, e := ioutil.ReadFile(filepath.Clean(path))\n\t\t\tif e != nil {\n\t\t\t\treturn nil, errors.Wrap(e, \"Failed Reading server certificate\")\n\t\t\t}\n\t\t\tcaCerts = append(caCerts, string(caCert))\n\t\t}\n\n\t\tcaCertPool.Add(tlsCertPool.DecodeCerts(caCerts)...)\n\t} else {\n\t\tcaCertPool, err = tlsCertPool.NewCertPool(true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// update the config's caCertPool\n\tcfg.caCertPool = caCertPool\n\n\ttlsConfig, err := buildNewCertPool(cfg.caCertPool)\n\tif err != nil {\n\t\tlog.Printf(\"HTTP Transport - Failed to build/get Cert Pool: %s\", err)\n\t\treturn nil, err\n\t}\n\n\treturn &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t},\n\t\tTimeout: cfg.Timeout,\n\t}, nil\n}", "func HTTPClientFor(config *Config) (*http.Client, error) {\n\ttransport, err := TransportFor(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar httpClient *http.Client\n\tif transport != http.DefaultTransport || config.Timeout > 0 {\n\t\thttpClient = &http.Client{\n\t\t\tTransport: transport,\n\t\t\tTimeout: config.Timeout,\n\t\t}\n\t} else {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\treturn httpClient, nil\n}", "func getTLSClient(config map[string]string) (http.Client) {\n // Load client cert\n cert, err := tls.LoadX509KeyPair(config[\"CERT_FILE\"], config[\"KEY_FILE\"])\n if err != nil {\n log.Fatal(err)\n }\n\n // Load CA cert\n caCert, err := ioutil.ReadFile(config[\"CA_FILE\"])\n if err != nil {\n log.Fatal(err)\n }\n caCertPool := x509.NewCertPool()\n caCertPool.AppendCertsFromPEM(caCert)\n\n // Setup HTTPS client\n tlsConfig := &tls.Config{\n Certificates: []tls.Certificate{cert},\n RootCAs: caCertPool,\n }\n tlsConfig.BuildNameToCertificate()\n transport := &http.Transport{TLSClientConfig: tlsConfig}\n client := &http.Client{Transport: transport}\n return *client\n}", "func NewHTTPClient(tc *trace.Client, orig *http.Client) *HTTPClient {\n\tif orig == nil {\n\t\torig = http.DefaultClient\n\t}\n\trt := orig.Transport\n\tif rt == nil {\n\t\trt = http.DefaultTransport\n\t}\n\tclient := http.Client{\n\t\tTransport: &tracerTransport{base: rt},\n\t\tCheckRedirect: orig.CheckRedirect,\n\t\tJar: orig.Jar,\n\t\tTimeout: orig.Timeout,\n\t}\n\treturn &HTTPClient{\n\t\tClient: client,\n\t\ttc: tc,\n\t}\n}", "func (i Config) httpClient() (*http.Client, error) {\n\n\tif i.ProxyURL != \"\" {\n\t\tp, err := url.Parse(i.ProxyURL)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"proxy provided but its URL is malformed\")\n\t\t}\n\t\tt := &http.Transport{\n\t\t\tProxy: http.ProxyURL(p),\n\t\t}\n\t\tif i.ProxyIgnoreTLS {\n\t\t\tt.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\t\t}\n\t\tclient := &http.Client{\n\t\t\tTimeout: 30 * time.Second,\n\t\t\tTransport: t,\n\t\t}\n\t\treturn client, nil\n\t}\n\tclient := &http.Client{\n\t\tTimeout: 30 * time.Second,\n\t}\n\treturn client, nil\n}", "func CreateHTTPClient(roundTripper func(*http.Request) (*http.Response, error)) *http.Client {\n\treturn &http.Client{\n\t\tTransport: roundTripperFunc(roundTripper),\n\t}\n}", "func TracedHTTPClient(timeout time.Duration) *http.Client {\n\tot := project.DefaultHTTPTransport()\n\treturn &http.Client{\n\t\tTimeout: timeout,\n\t\tTransport: &withInjectedDataRoundTripper{ot},\n\t}\n}", "func newHTTPClient(\n\tapiKey string,\n\tdebug bool,\n\tomitRetry bool,\n\ttimeout time.Duration,\n\ttransport http.RoundTripper,\n) httpC {\n\tif transport == nil {\n\t\ttransport = http.DefaultTransport\n\t}\n\treturn &gcmHTTP{\n\t\tGCMURL: httpAddress,\n\t\tapiKey: apiKey,\n\t\thttpClient: &http.Client{\n\t\t\tTransport: transport,\n\t\t\tTimeout: timeout,\n\t\t},\n\t\tdebug: debug,\n\t\tomitRetry: omitRetry,\n\t}\n}", "func (o *GetDeploymentByIDV3UsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func (b *OGame) GetClient() *httpclient.Client {\n\treturn b.client\n}", "func (o *ConfigGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func NewHTTPClient(conn net.Conn, opt *codec.Option) (*Client, error) {\n\t_, _ = io.WriteString(conn, fmt.Sprintf(\"CONNECT %s HTTP/1.0\\n\\n\", defaultHandlePath))\n\n\tres, err := http.ReadResponse(bufio.NewReader(conn), &http.Request{Method: \"CONNECT\"})\n\tif err == nil && res.Status == \"200 Connected to Gingle RPC\" {\n\t\treturn NewRPCClient(conn, opt)\n\t}\n\n\tif err == nil {\n\t\terr = fmt.Errorf(\"client: failed to new http client, err: unexpected http response\")\n\t}\n\treturn nil, err\n}", "func NewHTTPClient(source Source) (*http.Client, error) {\n\tcerts, err := x509.SystemCertPool()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(source.CACerts) > 0 {\n\t\tfor i := range source.CACerts {\n\t\t\tcerts.AddCert(source.CACerts[i])\n\t\t}\n\t}\n\n\treturn &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\tRootCAs: certs,\n\t\t\t},\n\t\t\tProxy: func(req *http.Request) (*url.URL, error) {\n\t\t\t\tif strings.TrimSpace(source.HTTPProxy) != \"\" {\n\t\t\t\t\tos.Setenv(\"HTTP_PROXY\", source.HTTPProxy)\n\t\t\t\t}\n\n\t\t\t\tif strings.TrimSpace(source.HTTPSProxy) != \"\" {\n\t\t\t\t\tos.Setenv(\"HTTPS_PROXY\", source.HTTPSProxy)\n\t\t\t\t}\n\n\t\t\t\tif strings.TrimSpace(source.NoProxy) != \"\" {\n\t\t\t\t\tos.Setenv(\"NO_PROXY\", source.NoProxy)\n\t\t\t\t}\n\n\t\t\t\treturn http.ProxyFromEnvironment(req)\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func (p *TestProvider) GetClient(creds *common.Credentials) (*http.Client, error) {\n\targs := p.Called(creds)\n\treturn args.Get(0).(*http.Client), args.Error(1)\n}", "func (c *Context) GetClient() *http.Client {\n\treturn c.Client\n}", "func (o *ServiceInstanceGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func SetHTTPClient(client *http.Client) {\n\thttpClient = client\n}", "func newHTTPClient(cfg *Config) (*http.Client, error) {\n\t// Configure proxy if needed.\n\tvar dial func(network, addr string) (net.Conn, error)\n\tif cfg.Proxy != \"\" {\n\t\tproxy := &socks.Proxy{\n\t\t\tAddr: cfg.Proxy,\n\t\t\tUsername: cfg.ProxyUser,\n\t\t\tPassword: cfg.ProxyPass,\n\t\t}\n\t\tdial = func(network, addr string) (net.Conn, error) {\n\t\t\tc, err := proxy.Dial(network, addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, nil\n\t\t}\n\t}\n\n\t// Configure TLS if needed.\n\tvar tlsConfig *tls.Config\n\tif !cfg.NoTLS {\n\t\ttlsConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: cfg.TLSSkipVerify,\n\t\t}\n\t\tif !cfg.TLSSkipVerify && cfg.RPCCert != \"\" {\n\t\t\tpem, err := ioutil.ReadFile(cfg.RPCCert)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tpool := x509.NewCertPool()\n\t\t\tif ok := pool.AppendCertsFromPEM(pem); !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid certificate file: %v\",\n\t\t\t\t\tcfg.RPCCert)\n\t\t\t}\n\t\t\ttlsConfig.RootCAs = pool\n\t\t}\n\t}\n\n\ttimeout, _ := time.ParseDuration(\"30s\")\n\n\t// Create and return the new HTTP client potentially configured with a\n\t// proxy and TLS.\n\tclient := http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: dial,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t},\n\t\tTimeout: timeout,\n\t}\n\treturn &client, nil\n}", "func (m *MockIPushyClient) GetHTTPClient() pushy.IHTTPClient {\n\tret := m.ctrl.Call(m, \"GetHTTPClient\")\n\tret0, _ := ret[0].(pushy.IHTTPClient)\n\treturn ret0\n}", "func NewHTTPClient(apiEndpoint string, pageSize int64, setAuth func(r *http.Request)) *APIClient {\n\treturn &APIClient{\n\t\tconn: connector.NewHTTPConnector(apiEndpoint, pageSize, setAuth),\n\t}\n}", "func CreateHTTPClient(requestURL string) (*Client, error) {\n\t_, err := url.ParseRequestURI(requestURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Client{\n\t\tHTTPClient: &http.Client{\n\t\t\tTimeout: time.Duration(requestTimeout) * time.Second,\n\t\t},\n\t\tbaseURL: requestURL,\n\t}, nil\n}", "func useGoClient(t *testing.T, client *go_http.Client, path string, method string,\n\treqBodyData []byte, statusCode int, expectedRespBodyData []byte) {\n\turl := HTTPHost + path\n\tvar resp *go_http.Response\n\tvar err error\n\tif method == MethodGet {\n\t\tresp, err = client.Get(url)\n\t} else {\n\t\tresp, err = client.Post(url, HeaderContentTypeValue, bytes.NewReader(reqBodyData))\n\t}\n\tif err != nil || resp == nil {\n\t\tt.Fatalf(\"Get(%v) failed, error: %v\", url, err)\n\t} else {\n\t\tif resp.StatusCode != statusCode {\n\t\t\tt.Fatalf(\"Get(%v) status=%v, expected=%v\",\n\t\t\t\turl, resp.StatusCode, statusCode)\n\t\t}\n\t\trespBodyData, _ := ioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t\tif bytes.Compare(respBodyData, expectedRespBodyData) != 0 {\n\t\t\tt.Fatalf(\"Get(%v) body=%v, expected=%v\",\n\t\t\t\turl, string(respBodyData), string(expectedRespBodyData))\n\t\t}\n\t}\n}", "func (p *Provider) Client() *http.Client {\n\treturn goth.HTTPClientWithFallBack(p.HTTPClient)\n}", "func CtxHttpClient(src *Source) *http.Client {\n\tval := src.Context().Value(HttpClientContextKey)\n\tif val == nil {\n\t\treturn nil\n\t}\n\treturn val.(*http.Client)\n}", "func NewClient(options *ClientOptions, customHTTPClient *http.Client,\r\n\tcustomEnvironment string) (c *Client) {\r\n\r\n\t// Create a client\r\n\tc = new(Client)\r\n\r\n\t// Set options (either default or user modified)\r\n\tif options == nil {\r\n\t\toptions = DefaultClientOptions()\r\n\t}\r\n\r\n\t// Set the options\r\n\tc.Options = options\r\n\r\n\t// Set the environment\r\n\tvar found bool\r\n\tif c.Environment, found = environments[customEnvironment]; !found {\r\n\t\tc.Environment = environments[EnvironmentProduction]\r\n\t}\r\n\r\n\t// Is there a custom HTTP client to use?\r\n\tif customHTTPClient != nil {\r\n\t\tc.httpClient = customHTTPClient\r\n\t\treturn\r\n\t}\r\n\r\n\t// dial is the net dialer for clientDefaultTransport\r\n\tdial := &net.Dialer{KeepAlive: options.DialerKeepAlive, Timeout: options.DialerTimeout}\r\n\r\n\t// clientDefaultTransport is the default transport struct for the HTTP client\r\n\tclientDefaultTransport := &http.Transport{\r\n\t\tDialContext: dial.DialContext,\r\n\t\tExpectContinueTimeout: options.TransportExpectContinueTimeout,\r\n\t\tIdleConnTimeout: options.TransportIdleTimeout,\r\n\t\tMaxIdleConns: options.TransportMaxIdleConnections,\r\n\t\tProxy: http.ProxyFromEnvironment,\r\n\t\tTLSHandshakeTimeout: options.TransportTLSHandshakeTimeout,\r\n\t}\r\n\r\n\t// Determine the strategy for the http client\r\n\tif options.RequestRetryCount <= 0 {\r\n\r\n\t\t// no retry enabled\r\n\t\tc.httpClient = httpclient.NewClient(\r\n\t\t\thttpclient.WithHTTPTimeout(options.RequestTimeout),\r\n\t\t\thttpclient.WithHTTPClient(&http.Client{\r\n\t\t\t\tTransport: clientDefaultTransport,\r\n\t\t\t\tTimeout: options.RequestTimeout,\r\n\t\t\t}),\r\n\t\t)\r\n\t\treturn\r\n\t}\r\n\r\n\t// Retry enabled - create exponential back-off\r\n\tc.httpClient = httpclient.NewClient(\r\n\t\thttpclient.WithHTTPTimeout(options.RequestTimeout),\r\n\t\thttpclient.WithRetrier(heimdall.NewRetrier(\r\n\t\t\theimdall.NewExponentialBackoff(\r\n\t\t\t\toptions.BackOffInitialTimeout,\r\n\t\t\t\toptions.BackOffMaxTimeout,\r\n\t\t\t\toptions.BackOffExponentFactor,\r\n\t\t\t\toptions.BackOffMaximumJitterInterval,\r\n\t\t\t))),\r\n\t\thttpclient.WithRetryCount(options.RequestRetryCount),\r\n\t\thttpclient.WithHTTPClient(&http.Client{\r\n\t\t\tTransport: clientDefaultTransport,\r\n\t\t\tTimeout: options.RequestTimeout,\r\n\t\t}),\r\n\t)\r\n\r\n\treturn\r\n}", "func (o *GetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}", "func NewHTTPClient(url, endpoint string, timeout time.Duration) *HTTPClient {\n\treturn &HTTPClient{\n\t\turl: url,\n\t\thttpClient: &http.Client{Timeout: timeout},\n\t\tendPoint: endpoint,\n\t}\n}", "func NewHTTPClient(url string, backend Backend) (*HTTPClient, error) {\n b := backend\n if b == nil {\n b = newDefaultBackend()\n }\n return &HTTPClient{url: url, backend: b}, nil\n}", "func (t *BasicAuth) Client() *http.Client {\n\treturn &http.Client{Transport: t}\n}", "func createDefaultHTTPClient(c *Client) HTTPInterface {\n\n\t// dial is the net dialer for clientDefaultTransport\n\tdial := &net.Dialer{\n\t\tKeepAlive: c.options.httpOptions.DialerKeepAlive,\n\t\tTimeout: c.options.httpOptions.DialerTimeout,\n\t}\n\n\t// clientDefaultTransport is the default transport struct for the HTTP client\n\tclientDefaultTransport := &http.Transport{\n\t\tDialContext: dial.DialContext,\n\t\tExpectContinueTimeout: c.options.httpOptions.TransportExpectContinueTimeout,\n\t\tIdleConnTimeout: c.options.httpOptions.TransportIdleTimeout,\n\t\tMaxIdleConns: c.options.httpOptions.TransportMaxIdleConnections,\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tTLSHandshakeTimeout: c.options.httpOptions.TransportTLSHandshakeTimeout,\n\t}\n\n\t// Determine the strategy for the http client (no retry enabled)\n\tif c.options.httpOptions.RequestRetryCount <= 0 {\n\t\treturn httpclient.NewClient(\n\t\t\thttpclient.WithHTTPTimeout(c.options.httpOptions.RequestTimeout),\n\t\t\thttpclient.WithHTTPClient(&http.Client{\n\t\t\t\tTransport: clientDefaultTransport,\n\t\t\t\tTimeout: c.options.httpOptions.RequestTimeout,\n\t\t\t}),\n\t\t)\n\t}\n\n\t// Create exponential back-off\n\tbackOff := heimdall.NewExponentialBackoff(\n\t\tc.options.httpOptions.BackOffInitialTimeout,\n\t\tc.options.httpOptions.BackOffMaxTimeout,\n\t\tc.options.httpOptions.BackOffExponentFactor,\n\t\tc.options.httpOptions.BackOffMaximumJitterInterval,\n\t)\n\n\treturn httpclient.NewClient(\n\t\thttpclient.WithHTTPTimeout(c.options.httpOptions.RequestTimeout),\n\t\thttpclient.WithRetrier(heimdall.NewRetrier(backOff)),\n\t\thttpclient.WithRetryCount(c.options.httpOptions.RequestRetryCount),\n\t\thttpclient.WithHTTPClient(&http.Client{\n\t\t\tTransport: clientDefaultTransport,\n\t\t\tTimeout: c.options.httpOptions.RequestTimeout,\n\t\t}),\n\t)\n}" ]
[ "0.840997", "0.84031147", "0.82243925", "0.8215249", "0.81582445", "0.8098461", "0.7828292", "0.781695", "0.77948695", "0.7714", "0.7694769", "0.7684848", "0.7631528", "0.7596621", "0.7594578", "0.7573144", "0.75554603", "0.7527961", "0.75186354", "0.7507211", "0.74942607", "0.74839926", "0.74577355", "0.7419502", "0.741676", "0.7384024", "0.7366282", "0.7328323", "0.73192316", "0.7269892", "0.7266482", "0.724055", "0.7238345", "0.72309697", "0.7212839", "0.7205916", "0.72054434", "0.7204973", "0.7200476", "0.7184442", "0.71790045", "0.71775186", "0.7147574", "0.71299076", "0.7103926", "0.7094556", "0.7066621", "0.7062567", "0.70419437", "0.7019961", "0.6976314", "0.6970667", "0.69650835", "0.6919259", "0.6917691", "0.69145834", "0.6913251", "0.6910143", "0.69069815", "0.69067484", "0.68997455", "0.6896634", "0.68964785", "0.68960005", "0.68907034", "0.68834275", "0.68827903", "0.6876771", "0.6863856", "0.6851762", "0.68362683", "0.68339294", "0.6832752", "0.6830537", "0.6827337", "0.6822128", "0.6806073", "0.68015146", "0.67898995", "0.6788868", "0.67825896", "0.67824566", "0.6781874", "0.6780724", "0.67802525", "0.6775209", "0.6772859", "0.67621005", "0.676122", "0.67559826", "0.67512894", "0.6751107", "0.6746652", "0.6744989", "0.6744411", "0.67400765", "0.6735772", "0.67319185", "0.67310923", "0.6719735" ]
0.7969526
6
Helper for tests and output
func JsonPrint(data interface{}) { var p []byte p, err := json.Marshal(data) if err != nil { fmt.Println(err) return } fmt.Printf("%s \n", p) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func writeout() {\n\tworden := \"test, test\" // temporary\n\n\tfmt.Println(bar)\n\tfmt.Printf(\"%v\\n%v %s %v\\n%v\", bar, sd, worden, sd, bar)\n\t/* file io should replace prints */\n}", "func TestCaptureUserOut(t *testing.T) {\n\tassert := asrt.New(t)\n\trestoreOutput := util.CaptureUserOut()\n\ttext := util.RandString(128)\n\toutput.UserOut.Println(text)\n\tout := restoreOutput()\n\n\tassert.Contains(out, text)\n}", "func output(res []Result, format string) {\n\tswitch format {\n\tcase \"plain\":\n\t\tconst sep = \": \"\n\t\t// Aligned printing.\n\t\tlongest := 0\n\t\tfor _, r := range res {\n\t\t\tfor _, ri := range r.Implementers {\n\t\t\t\tpath := filepath.Base(ri.Pos.String())\n\t\t\t\tif len(path)+len(sep) > longest {\n\t\t\t\t\tlongest = len(path) + len(sep)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor i, r := range res {\n\t\t\tif len(r.Implementers) == 0 {\n\t\t\t\tfmt.Println(\"No implementing types.\")\n\t\t\t}\n\t\t\tfor _, ri := range r.Implementers {\n\t\t\t\tpath := filepath.Base(ri.Pos.String())\n\t\t\t\tfmt.Printf(\"%-*s%s\\n\", longest, path+sep, ri.Name)\n\t\t\t}\n\t\t\tif i != len(res)-1 {\n\t\t\t\tfmt.Println()\n\t\t\t}\n\t\t}\n\tcase \"json\":\n\t\tb, err := json.MarshalIndent(res, \"\", \" \")\n\t\tif err != nil {\n\t\t\tlogger.Fatal(err)\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", b)\n\tcase \"xml\":\n\t\tb, err := xml.MarshalIndent(res, \"\", \" \")\n\t\tif err != nil {\n\t\t\tlogger.Fatal(err)\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", b)\n\t}\n}", "func (f *FakeOutput) Type() string { return \"fake_output\" }", "func TestProcessAlbumNumber_GoodOutput(t *testing.T){\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n if req.URL.String() == \"/photos?albumId=1\"{\n fmt.Fprint(w,`[\n {\n \"albumId\": 1,\n \"id\": 1,\n \"title\": \"accusamus beatae ad facilis cum similique qui sunt\",\n \"url\": \"https://via.placeholder.com/600/92c952\",\n \"thumbnailUrl\": \"https://via.placeholder.com/150/92c952\"\n },\n {\n \"albumId\": 1,\n \"id\": 2,\n \"title\": \"reprehenderit est deserunt velit ipsam\",\n \"url\": \"https://via.placeholder.com/600/771796\",\n \"thumbnailUrl\": \"https://via.placeholder.com/150/771796\"\n }\n]`)\n\n }else{\n fmt.Fprint(w,`{\"Some Error?\"}`)\n }\n }))\n\n defer server.Close()\n\n api = API{server.Client(), server.URL}\n\n r, w, _ := os.Pipe()\n os.Stdout = w\n\n processAlbumNumber(1)\n\n w.Close()\n out, _ := ioutil.ReadAll(r)\n\n if string(out) !=\"\\n[1] accusamus beatae ad facilis cum similique qui sunt\\n\\n[2] reprehenderit est deserunt velit ipsam\\n\"{\n t.Errorf(\"processAlbumNumber(1) printed an unexpected output: %s\", string(out))\n }\n\n\n\n}", "func printOutputHeader() {\n\tfmt.Println(\"\")\n\tfmt.Println(\"FreeTAXII TestLab - Basic Connectivity Tests\")\n\tfmt.Println(\"Copyright: Bret Jordan\")\n\tfmt.Println(\"Version:\", Version)\n\tif Build != \"\" {\n\t\tfmt.Println(\"Build:\", Build)\n\t}\n\tfmt.Println(\"\")\n}", "func (f *FakeOutput) Outputs() []operator.Operator { return nil }", "func TestCaptureStdOut(t *testing.T) {\n\tassert := asrt.New(t)\n\trestoreOutput := util.CaptureStdOut()\n\ttext := util.RandString(128)\n\tfmt.Println(text)\n\tout := restoreOutput()\n\n\tassert.Contains(out, text)\n}", "func writeTestmain(out string, t *testFuncs) error {\n\tf, err := os.Create(out)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tif err := testmainTmpl.Execute(f, t); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Test_DebugLogger_Log_StandardOut(t *testing.T) {\n\tdl := NewDebugLogger(\"!!!! \")\n\tdl.Log(\"Two is %d\", 2)\n}", "func (t *Test) StdOut() *bytes.Buffer {\n\treturn t.stdOut\n}", "func (r *reporter) Helper() {}", "func TestOutputs(t *testing.T) {\n\toutputs := Outputs()\n\tdefer SetOutputs(outputs...)\n\n\t// All outputs.\n\toutput := Output{\n\t\tName: \"test\",\n\t\tWriter: os.Stdout,\n\t}\n\n\terr := SetOutputs(output)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tout := Outputs()\n\tif len(out) != 1 {\n\t\tt.Errorf(\"Outputs did not return the correct number of outputs\")\n\t}\n\n\tif out[0].Name != output.Name {\n\t\tt.Errorf(\"Outputs did not return the correct output\")\n\t}\n\n\t// Only std*.\n\tSetOutputs(output, Stdout, Stderr)\n\tout = Outputs(Stdout.Name, Stderr.Name)\n\tif len(out) != 2 {\n\t\tt.Errorf(\"Outputs did not return the correct number of outputs\")\n\t}\n}", "func TestPrewriteWritten4A(t *testing.T) {\n}", "func (m *MockManager) Output(component string) (map[string][2]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Output\", component)\n\tret0, _ := ret[0].(map[string][2]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func testCode() string {\n consoleCommand := `go test -v` // Single place to alter the command line command\n output := ``\n\n output += breakspace + cBold + cCyan + \" Golang Function Tests:\" + cClr\n output += breakspace + cYellow + \" > \" + cClr + consoleCommand + breakspace\n\n output += breakspace\n\n testOutput, _ := exec.Command( \"cmd\", \"/c\", consoleCommand ).Output()\n\n // Alternative code:\n //cmd.Stdout = os.Stdout\n //cmd.Run()\n\n return output + statusColorize( string(testOutput) )\n}", "func getTest(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"test\")\n}", "func (*HTML) isOutput() {\n}", "func (t *SpecTest) String() string { return strings.Join(t.descstack, \" \") }", "func parseAndStoreTestOutput(\n\tlogger *logrus.Logger,\n\tread io.Reader,\n\toutputDir string,\n) {\n\tlogWriter := LogWriter{\n\t\tlookup: make(map[string]*os.File),\n\t\toutputDir: outputDir,\n\t}\n\tdefer logWriter.closeFiles(logger)\n\n\t// Track some state that persists across lines\n\ttestResultMarkers := TestResultMarkerStack{}\n\tpreviousTestName := \"\"\n\n\tvar err error\n\treader := bufio.NewReader(read)\n\tfor {\n\t\tvar data string\n\t\tdata, err = reader.ReadString('\\n')\n\t\tif len(data) == 0 && err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tdata = strings.TrimSuffix(data, \"\\n\")\n\n\t\t// separate block so that we do not overwrite the err variable that we need afterwards to check if we're done\n\t\t{\n\t\t\tindentLevel := len(getIndent(data))\n\t\t\tisIndented := indentLevel > 0\n\n\t\t\t// Garbage collection of test result markers. Primary purpose is to detect when we dedent out, which can only be\n\t\t\t// detected when we reach a dedented line.\n\t\t\ttestResultMarkers = testResultMarkers.removeDedentedTestResultMarkers(indentLevel)\n\n\t\t\t// Handle each possible category of test lines\n\t\t\tswitch {\n\t\t\tcase isSummaryLine(data):\n\t\t\t\tlogWriter.writeLog(logger, \"summary\", data)\n\n\t\t\tcase isStatusLine(data):\n\t\t\t\ttestName := getTestNameFromStatusLine(data)\n\t\t\t\tpreviousTestName = testName\n\t\t\t\tlogWriter.writeLog(logger, testName, data)\n\n\t\t\tcase strings.HasPrefix(data, \"Test\"):\n\t\t\t\t// Heuristic: `go test` will only execute test functions named `Test.*`, so we assume any line prefixed\n\t\t\t\t// with `Test` is a test output for a named test. Also assume that test output will be space delimeted and\n\t\t\t\t// test names can't contain spaces (because they are function names).\n\t\t\t\t// This must be modified when `logger.DoLog` changes.\n\t\t\t\tvals := strings.Split(data, \" \")\n\t\t\t\ttestName := vals[0]\n\t\t\t\tpreviousTestName = testName\n\t\t\t\tlogWriter.writeLog(logger, testName, data)\n\n\t\t\tcase isIndented && isResultLine(data):\n\t\t\t\t// In a nested test result block, so collect the line into all the test results we have seen so far.\n\t\t\t\tfor _, marker := range testResultMarkers {\n\t\t\t\t\tlogWriter.writeLog(logger, marker.TestName, data)\n\t\t\t\t}\n\n\t\t\tcase isPanicLine(data):\n\t\t\t\t// When panic, we want all subsequent nonstandard test lines to roll up to the summary\n\t\t\t\tpreviousTestName = \"summary\"\n\t\t\t\tlogWriter.writeLog(logger, \"summary\", data)\n\n\t\t\tcase isResultLine(data):\n\t\t\t\t// We ignore result lines, because that is handled specially below.\n\n\t\t\tcase previousTestName != \"\":\n\t\t\t\t// Base case: roll up to the previous test line, if it exists.\n\t\t\t\t// Handles case where terratest log has entries with newlines in them.\n\t\t\t\tlogWriter.writeLog(logger, previousTestName, data)\n\n\t\t\tdefault:\n\t\t\t\tlogger.Warnf(\"Found test line that does not match known cases: %s\", data)\n\t\t\t}\n\n\t\t\t// This has to happen separately from main if block to handle the special case of nested tests (e.g table driven\n\t\t\t// tests). For those result lines, we want it to roll up to the parent test, so we need to run the handler in\n\t\t\t// the `isIndented` section. But for both root and indented result lines, we want to execute the following code,\n\t\t\t// hence this special block.\n\t\t\tif isResultLine(data) {\n\t\t\t\ttestName := getTestNameFromResultLine(data)\n\t\t\t\tlogWriter.writeLog(logger, testName, data)\n\t\t\t\tlogWriter.writeLog(logger, \"summary\", data)\n\n\t\t\t\tmarker := TestResultMarker{\n\t\t\t\t\tTestName: testName,\n\t\t\t\t\tIndentLevel: indentLevel,\n\t\t\t\t}\n\t\t\t\ttestResultMarkers = testResultMarkers.push(marker)\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err != io.EOF {\n\t\tlogger.Fatalf(\"Error reading from Reader: %s\", err)\n\t}\n}", "func (f *FakeOutput) CanOutput() bool { return false }", "func TestReportOutput(t *testing.T) {\n\ttt := &testingT{}\n\tc := qt.New(tt)\n\tc.Assert(42, qt.Equals, 47)\n\twant := `\nerror:\n values are not equal\ngot:\n int(42)\nwant:\n int(47)\nstack:\n $file:18\n c.Assert(42, qt.Equals, 47)\n`\n\tassertReport(t, tt, want)\n}", "func getResult(testCase []string) string {\n\tfirst := exec.Command(\"go\", \"run\", \".\", testCase[0], testCase[1])\n\tsecond := exec.Command(\"cat\", \"-e\")\n\treader, writer := io.Pipe()\n\tfirst.Stdout = writer\n\tsecond.Stdin = reader\n\tvar buffer bytes.Buffer\n\tsecond.Stdout = &buffer\n\tfirst.Start()\n\tsecond.Start()\n\tfirst.Wait()\n\twriter.Close()\n\tsecond.Wait()\n\treturn buffer.String()\n}", "func (v *Venom) OutputResult() error {\n\tif v.OutputDir == \"\" {\n\t\treturn nil\n\t}\n\tcleanedTs := []TestSuite{}\n\tfor i := range v.Tests.TestSuites {\n\t\ttcFiltered := []TestCase{}\n\t\tfor _, tc := range v.Tests.TestSuites[i].TestCases {\n\t\t\tif tc.IsEvaluated {\n\t\t\t\ttcFiltered = append(tcFiltered, tc)\n\t\t\t}\n\t\t}\n\t\tv.Tests.TestSuites[i].TestCases = tcFiltered\n\t\tts := v.CleanUpSecrets(v.Tests.TestSuites[i])\n\t\tcleanedTs = append(cleanedTs, ts)\n\n\t\ttestsResult := &Tests{\n\t\t\tTestSuites: []TestSuite{ts},\n\t\t\tStatus: v.Tests.Status,\n\t\t\tNbTestsuitesFail: v.Tests.NbTestsuitesFail,\n\t\t\tNbTestsuitesPass: v.Tests.NbTestsuitesPass,\n\t\t\tNbTestsuitesSkip: v.Tests.NbTestsuitesSkip,\n\t\t\tDuration: v.Tests.Duration,\n\t\t\tStart: v.Tests.Start,\n\t\t\tEnd: v.Tests.End,\n\t\t}\n\n\t\tvar data []byte\n\t\tvar err error\n\n\t\tswitch v.OutputFormat {\n\t\tcase \"json\":\n\t\t\tdata, err = json.MarshalIndent(testsResult, \"\", \" \")\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"Error: cannot format output json (%s)\", err)\n\t\t\t}\n\t\tcase \"tap\":\n\t\t\tdata, err = outputTapFormat(*testsResult)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"Error: cannot format output tap (%s)\", err)\n\t\t\t}\n\t\tcase \"yml\", \"yaml\":\n\t\t\tdata, err = yaml.Marshal(testsResult)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"Error: cannot format output yaml (%s)\", err)\n\t\t\t}\n\t\tcase \"xml\":\n\t\t\tdata, err = outputXMLFormat(*testsResult)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"Error: cannot format output xml (%s)\", err)\n\t\t\t}\n\t\tcase \"html\":\n\t\t\treturn errors.New(\"Error: you have to use the --html-report flag\")\n\t\t}\n\n\t\tfname := strings.TrimSuffix(ts.Filepath, filepath.Ext(ts.Filepath))\n\t\tfname = strings.ReplaceAll(fname, \"/\", \"_\")\n\t\tfilename := path.Join(v.OutputDir, \"test_results_\"+fname+\".\"+v.OutputFormat)\n\t\tif err := os.WriteFile(filename, data, 0600); err != nil {\n\t\t\treturn fmt.Errorf(\"Error while creating file %s: %v\", filename, err)\n\t\t}\n\t\tv.PrintFunc(\"Writing file %s\\n\", filename)\n\t}\n\n\tif v.HtmlReport {\n\t\ttestsResult := &Tests{\n\t\t\tTestSuites: cleanedTs,\n\t\t\tStatus: v.Tests.Status,\n\t\t\tNbTestsuitesFail: v.Tests.NbTestsuitesFail,\n\t\t\tNbTestsuitesPass: v.Tests.NbTestsuitesPass,\n\t\t\tNbTestsuitesSkip: v.Tests.NbTestsuitesSkip,\n\t\t\tDuration: v.Tests.Duration,\n\t\t\tStart: v.Tests.Start,\n\t\t\tEnd: v.Tests.End,\n\t\t}\n\n\t\tdata, err := outputHTML(testsResult)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Error: cannot format output html\")\n\t\t}\n\t\tvar filename = filepath.Join(v.OutputDir, computeOutputFilename(\"test_results.html\"))\n\t\tv.PrintFunc(\"Writing html file %s\\n\", filename)\n\t\tif err := os.WriteFile(filename, data, 0600); err != nil {\n\t\t\treturn errors.Wrapf(err, \"Error while creating file %s\", filename)\n\t\t}\n\t}\n\n\treturn nil\n}", "func TestImplsOutput(t *testing.T) { //nolint:gocognit\n\tconst filename = \"lg_test.go\"\n\n\tvar lineParts = [][]string{\n\t\t{\"DEBUG\", \"Debug msg\"},\n\t\t{\"DEBUG\", \"Debugf msg\"},\n\t\t{\"WARN\", \"Warn msg\"},\n\t\t{\"WARN\", \"Warnf msg\"},\n\t\t{\"ERROR\", \"Error msg\"},\n\t\t{\"ERROR\", \"Errorf msg\"},\n\t\t{\"WARN\", \"WarnIfError msg\"},\n\t\t{\"WARN\", \"error: WarnIfFuncError msg\"},\n\t\t{\"WARN\", \"error: WarnIfCloseError msg\"},\n\t}\n\n\t// testCases are the main configurable params (level and caller)\n\t// to the log impl constructs. Timestamp param is not tested.\n\ttestCases := []struct {\n\t\tlevel bool\n\t\tcaller bool\n\t}{\n\t\t{level: true, caller: true},\n\t\t{level: true, caller: false},\n\t\t{level: false, caller: true},\n\t\t{level: false, caller: false},\n\t}\n\n\tlogImpls := []struct {\n\t\tname string\n\t\tnewFn func(w io.Writer, level, caller bool) lg.Log\n\t}{\n\t\t{\"zaplg\", func(w io.Writer, level, caller bool) lg.Log {\n\t\t\treturn zaplg.NewWith(w, \"text\", false, true, level, caller, 0)\n\t\t}},\n\t}\n\n\tfor _, logImpl := range logImpls {\n\t\tlogImpl := logImpl\n\n\t\tt.Run(logImpl.name, func(t *testing.T) {\n\t\t\tfor _, tc := range testCases {\n\t\t\t\ttc := tc\n\n\t\t\t\tt.Run(fmt.Sprintf(\"level_%v__caller_%v\", tc.level, tc.caller), func(t *testing.T) {\n\t\t\t\t\tbuf := &bytes.Buffer{}\n\n\t\t\t\t\tlog := logImpl.newFn(buf, tc.level, tc.caller)\n\t\t\t\t\tlogItAll(log)\n\n\t\t\t\t\tsc := bufio.NewScanner(buf)\n\t\t\t\t\tvar gotLines []string\n\t\t\t\t\tfor sc.Scan() {\n\t\t\t\t\t\tgotLines = append(gotLines, sc.Text())\n\t\t\t\t\t}\n\n\t\t\t\t\trequire.NoError(t, sc.Err())\n\t\t\t\t\trequire.Equal(t, len(lineParts), len(gotLines))\n\n\t\t\t\t\tfor i, gotLine := range gotLines {\n\t\t\t\t\t\tif tc.caller {\n\t\t\t\t\t\t\trequire.Contains(t, gotLine, filename, \"caller should be printed\")\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\trequire.NotContains(t, gotLine, filename, \"caller should not be printed\")\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif tc.level {\n\t\t\t\t\t\t\trequire.Contains(t, gotLine, lineParts[i][0], \"level should be printed\")\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\trequire.NotContains(t, gotLine, lineParts[i][0], \"level should not be printed\")\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\trequire.Contains(t, gotLine, lineParts[i][1], \"log msg should be printed\")\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}", "func TestPrintLog( test *testing.T ) {\n testLog := ErrorLog{ nil , nil }\n\n testLog.addLog( \"First Log\\n\" )\n testLog.addLog( \"Second Log\\n\" )\n testLog.addLog( \"Third Log\\n\" )\n\n print(\"Expect:\\nFirst Log\\nSecond Log\\nThird Log\\nGot:\\n\")\n\n testLog.printLog()\n}", "func makeOutput(abc *st.Art, coloring bool) {\n\tindex := 0\n\tfor i := abc.Output.Index - 8; i < abc.Output.Index; i++ {\n\t\tline := abc.Alphabet.Rune[abc.Alphabet.Letter][index]\n\t\tif coloring {\n\t\t\tabc.Output.Final[i] += abc.Flag.Color.Case1 + line + abc.Flag.Color.Case2\n\t\t} else {\n\t\t\tabc.Output.Final[i] += line\n\t\t}\n\t\tindex++\n\t}\n}", "func writeOutput(outputType string, requestedKeys []string, resultMap map[string]string, wr io.Writer, prefix string, suffix string, suppressBlankKeys bool) error {\n\tprocessedMap := make(map[string]string, len(resultMap))\n\tfor k, v := range resultMap {\n\t\t// If suppressing blank keys, skip the key entirely.\n\t\tif suppressBlankKeys && v == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tprocessedMap[k] = fmt.Sprintf(\"%s%s%s\", prefix, v, suffix)\n\t}\n\t// Filter out requested keys which were suppressed in the previous step,\n\t// but maintain their order.\n\tprocessedKeys := []string{}\n\tfor _, key := range requestedKeys {\n\t\tif _, ok := processedMap[key]; ok {\n\t\t\tprocessedKeys = append(processedKeys, key)\n\t\t}\n\t}\n\n\t// Do output processing.\n\tswitch *output {\n\tcase OutputSimple:\n\t\t// Print out the found keys in the order they were requested,\n\t\t// with blank lines for missing keys.\n\t\tfor _, name := range processedKeys {\n\t\t\tvalue, _ := processedMap[name]\n\t\t\tfmt.Fprintln(wr, value)\n\t\t}\n\tcase OutputAsFlags:\n\t\t// Print out the found keys in the order they were requested, formatted\n\t\t// as --key=value command line flags.\n\t\tfor _, name := range processedKeys {\n\t\t\tvalue, _ := processedMap[name]\n\t\t\tfmt.Fprintf(wr, \"%s=%s \", name, value)\n\t\t}\n\tcase OutputOneline:\n\t\t// Print out the found keys in the order they were requested,\n\t\t// with empty strings for missing keys.\n\t\tvar outputEntries []string\n\t\tfor _, name := range processedKeys {\n\t\t\tvalue, _ := processedMap[name]\n\t\t\toutputEntries = append(outputEntries, value)\n\t\t}\n\n\t\tfmt.Fprintln(wr, shellquote.Join(outputEntries...))\n\tcase OutputEnv:\n\t\t// Print out the found keys in the order they were requested suitable\n\t\t// for eval'ing as shell script data. We make some effort to do escaping\n\t\t// here so the trivial case will work with Docker's brain-dead env-file\n\t\t// parser.\n\t\tfor _, name := range processedKeys {\n\t\t\tvalue, _ := processedMap[name]\n\t\t\tfmt.Fprintf(wr, \"%s=%s\\n\", name, shellquote.Join(value))\n\t\t}\n\tcase OutputJson:\n\t\t// Output the keys as a JSON object. This is suitable for many things,\n\t\t// specifically p2cli input\n\t\tjsonBytes, err := json.Marshal(processedMap)\n\t\tif err != nil {\n\t\t\tlog.Errorln(\"Error marshalling JSON:\", err)\n\t\t\treturn err\n\t\t}\n\t\tif _, err := wr.Write(jsonBytes); err != nil {\n\t\t\tlog.Errorln(\"Error writing to stdout.\")\n\t\t\treturn err\n\t\t}\n\tcase OutputJsonPretty:\n\t\tjsonBytes, err := json.MarshalIndent(processedMap, \"\", \" \")\n\t\tif err != nil {\n\t\t\tlog.Errorln(\"Error marshalling JSON:\", err)\n\t\t\treturn err\n\t\t}\n\t\tif _, err := wr.Write(jsonBytes); err != nil {\n\t\t\tlog.Errorln(\"Error writing to stdout:\", err)\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\tlog.Errorln(\"Invalid output format specified.\")\n\t\treturn errors.New(\"Invalid output format specified.\")\n\t}\n\n\treturn nil\n}", "func (s TestingSingleton) Output(file string) TestingBuildParams {\n\treturn buildParamsFromOutput(s.provider, file)\n}", "func (m *MockapprunnerDescriber) Outputs() (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Outputs\")\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (Output) typ() string { return \"output1\" }", "func TestExampleFWriteCustomOutput() {\n\tow := NewOutputWriter()\n\t// Write 3 lines and assert correct result\n\tow.StartLine()\n\tow.AppendData(\"ID\", \"1\")\n\tow.AppendData(\"Key\", \"Raspberry\")\n\tow.AppendData(\"Desc\", \"first\")\n\tow.StartLine()\n\tow.AppendData(\"ID\", \"2\")\n\tow.AppendData(\"Key\", \"Pi\")\n\tow.AppendData(\"Desc\", \"second\")\n\tow.StartLine()\n\tow.AppendData(\"ID\", \"3\")\n\tow.AppendData(\"Key\", \"Zero\")\n\tow.AppendData(\"Desc\", \"third\")\n\tow.WriteCustomOutput(\"Key,Desc\")\n\n\t// Output:\n\t// Raspberry,first\n\t// Pi,second\n\t// Zero,third\n}", "func testSomeLogMethod(t *testing.T, fn LogMethod, level string, expectOutput bool) {\n\tr, w := io.Pipe()\n\tdefer r.Close()\n\tlog.InitWithWriter(w)\n\n\t// Generate log message\n\trs := randomString()\n\tgo func(fn LogMethod, rs string, w io.WriteCloser) {\n\t\tfn(rs)\n\t\tw.Close()\n\t}(fn, rs, w)\n\n\t// Check we got the message\n\tvar output []byte = make([]byte, 1024)\n\t_, readErr := r.Read(output)\n\tif readErr != nil && readErr != io.EOF {\n\t\tt.Fatalf(\"Cannot read log output from io.Pipe: %v\", readErr)\n\t}\n\tif readErr == io.EOF {\n\t\tif expectOutput {\n\t\t\t// This is what we wanted\n\t\t\tt.Fatalf(\"Got EOF when output was expected\")\n\t\t} else {\n\t\t\treturn\n\t\t}\n\t}\n\tt.Logf(\"Log output: <<<%s>>>\", string(output))\n\tif !strings.Contains(string(output), rs) {\n\t\tt.Error(\"Log output did not have message\")\n\t}\n\tif !strings.Contains(string(output), level) {\n\t\tt.Error(\"Log output did not have expected level\")\n\t}\n}", "func TestSetOutputs(t *testing.T) {\n\toutputs := Outputs()\n\tdefer SetOutputs(outputs...)\n\n\ttests := []struct {\n\t\tname string\n\t\tin []Output\n\t\tnames []string\n\t\thasErr bool\n\t}{\n\t\t{\n\t\t\tname: \"Empty outputs\",\n\t\t\tin: []Output{},\n\t\t\tnames: []string{},\n\t\t\thasErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Wrong output name\",\n\t\t\tin: []Output{\n\t\t\t\t{\n\t\t\t\t\tName: \"incorrect name\",\n\t\t\t\t\tWriter: os.Stdout,\n\t\t\t\t},\n\t\t\t},\n\t\t\tnames: []string{},\n\t\t\thasErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"CSS-style output name\",\n\t\t\tin: []Output{\n\t\t\t\t{\n\t\t\t\t\tName: \"file-name\",\n\t\t\t\t\tWriter: os.Stdout,\n\t\t\t\t},\n\t\t\t},\n\t\t\tnames: []string{\"file-name\"},\n\t\t\thasErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Wrong output name, but it is system output\",\n\t\t\tin: []Output{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tWriter: os.Stdout,\n\t\t\t\t\tisSystem: true,\n\t\t\t\t},\n\t\t\t},\n\t\t\tnames: []string{\"*\"},\n\t\t\thasErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"One short output\",\n\t\t\tin: []Output{\n\t\t\t\t{\n\t\t\t\t\tName: \"test\",\n\t\t\t\t\tWriter: os.Stdout,\n\t\t\t\t},\n\t\t\t},\n\t\t\tnames: []string{\"test\"},\n\t\t\thasErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"One short output with defaults\",\n\t\t\tin: []Output{\n\t\t\t\t{\n\t\t\t\t\tName: \"test\",\n\t\t\t\t\tWriter: os.Stdout,\n\t\t\t\t},\n\t\t\t\tStdout,\n\t\t\t\tStderr,\n\t\t\t},\n\t\t\tnames: []string{\"test\", \"stdout\", \"stderr\"},\n\t\t\thasErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Noname output\",\n\t\t\tin: []Output{\n\t\t\t\t{\n\t\t\t\t\tWriter: os.Stdout,\n\t\t\t\t},\n\t\t\t},\n\t\t\thasErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Nowriter output\",\n\t\t\tin: []Output{\n\t\t\t\t{\n\t\t\t\t\tName: \"test\",\n\t\t\t\t},\n\t\t\t},\n\t\t\thasErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Duplicates outputs\",\n\t\t\tin: []Output{\n\t\t\t\t{\n\t\t\t\t\tName: \"test\",\n\t\t\t\t\tWriter: os.Stdout,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"test\",\n\t\t\t\t\tWriter: os.Stdout,\n\t\t\t\t},\n\t\t\t},\n\t\t\thasErr: true,\n\t\t},\n\t}\n\n\t// Don't use parallel tests here.\n\tfor _, tt := range tests {\n\t\t// Error check.\n\t\terr := SetOutputs(tt.in...)\n\t\tif tt.hasErr {\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"%s: an error was expected\", tt.name)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: an error occurred: %s\", tt.name, err.Error())\n\t\t}\n\n\t\t// Len check.\n\t\tout := Outputs()\n\t\tif len(out) != len(tt.in) {\n\t\t\tt.Errorf(\"%s: %d items passed but %d items sets\",\n\t\t\t\ttt.name, len(out), len(tt.in))\n\t\t}\n\n\t\t// Check names.\n\t\tfor _, n := range tt.names {\n\t\t\tif _, ok := self.outputs[n]; !ok {\n\t\t\t\tt.Errorf(\"%s: %s output not found\", tt.name, n)\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *BuildahTestSession) OutputToString() string {\n\tfields := strings.Fields(fmt.Sprintf(\"%s\", s.Out.Contents()))\n\treturn strings.Join(fields, \" \")\n}", "func PrintAndJsonOutput(result []*kubestr.TestOutput, output string, outfile string) bool {\n\tif output == \"json\" {\n\t\tjsonRes, _ := json.MarshalIndent(result, \"\", \" \")\n\t\tif len(outfile) > 0 {\n\t\t\terr := os.WriteFile(outfile, jsonRes, 0666)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error writing output:\", err.Error())\n\t\t\t\tos.Exit(2)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(string(jsonRes))\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}", "func TestMainFunc(t *testing.T) {\n\t// TODO\n\tsavedStdout := os.Stdout\n\tdefer func() { os.Stdout = savedStdout }()\n\n\tstdoutTmpFile, err := ioutil.TempFile(\"\", \"joincap_output_\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfilename := stdoutTmpFile.Name()\n\tdefer os.Remove(filename)\n\n\tos.Stdout = stdoutTmpFile\n\tmain()\n\tstdoutTmpFile.Close()\n}", "func PrintOutput(f *Formatter, output *perfops.RunOutput) {\n\tif f.printID {\n\t\tf.Printf(\"Test ID: %v\\n\", output.ID)\n\t}\n\tspinner := f.s.Step()\n\tif !output.IsFinished() {\n\t\tf.Printf(\"%s\", spinner)\n\t\tif len(output.Items) > 1 {\n\t\t\tfinished := 0\n\t\t\tfor _, item := range output.Items {\n\t\t\t\tif item.Result.IsFinished() {\n\t\t\t\t\tfinished++\n\t\t\t\t}\n\t\t\t}\n\t\t\tf.Printf(\" %d/%d\", finished, len(output.Items))\n\t\t}\n\t\tf.Printf(\"\\n\")\n\t}\n\tfor _, item := range output.Items {\n\t\tr := item.Result\n\t\tn := r.Node\n\t\tif item.Result.Message == \"\" {\n\t\t\to := r.Output\n\t\t\tif o == \"-2\" {\n\t\t\t\to = \"The command timed-out. It either took too long to execute or we could not connect to your target at all.\"\n\t\t\t}\n\t\t\tf.Printf(\"Node%d, AS%d, %s, %s\\n%s\\n\", n.ID, n.AsNumber, n.City, n.Country.Name, o)\n\t\t} else if r.Message != \"NO DATA\" {\n\t\t\tf.Printf(\"Node%d, AS%d, %s, %s\\n%s\\n\", n.ID, n.AsNumber, n.City, n.Country.Name, r.Message)\n\t\t}\n\t\tif !item.Result.IsFinished() {\n\t\t\tf.Printf(\"%s\\n\", spinner)\n\t\t}\n\t}\n\tf.Flush(!output.IsFinished())\n}", "func init() {\n\tSetOutput(os.Stdout)\n}", "func (m *mock) MockContents() string {\n\treturn m.mockFunction() + m.mockExport()\n}", "func Output(t testing.TestingT, options *Options, key string) string {\n\tout, err := OutputE(t, options, key)\n\trequire.NoError(t, err)\n\treturn out\n}", "func makeOutputByMethod(abc *st.Art) {\n\tif abc.Flag.Color.MethodColoring == \"none\" || abc.Flag.Color.MethodColoring == \"all\" {\n\t\t// if method \"all\" - it is no need to add color for each letter. only at start and the end line\n\t\tmakeOutput(abc, false)\n\t} else {\n\t\tmakeOutput(abc, abc.Alphabet.Coloring)\n\t}\n}", "func main() {\n\tlog.Println(\"TEST CASE 1:\")\n\ttest_case_1()\n\tlog.Println(\"\\n\\n\")\n\n\tlog.Println(\"TEST CASE 2:\")\n\ttest_case_2()\n\tlog.Println(\"\\n\\n\")\n\n\tlog.Println(\"TEST CASE 3:\")\n\ttest_case_3()\n\tlog.Println(\"\\n\\n\")\n\n\tlog.Println(\"TEST CASE 4:\")\n\ttest_case_4()\n\tlog.Println(\"\\n\\n\")\n}", "func printOutputHeader() {\n\tfmt.Println(\"\")\n\tfmt.Println(\"FreeTAXII - STIX Table Creator\")\n\tfmt.Println(\"Copyright: Bret Jordan\")\n\tfmt.Println(\"Version:\", Version)\n\tif Build != \"\" {\n\t\tfmt.Println(\"Build:\", Build)\n\t}\n\tfmt.Println(\"\")\n}", "func strFormatOut(input *gofeed.Item) string {\n\tvar output string = input.Title + \"\\n\" + input.Link + \"\\n\"\n\treturn output\n}", "func (s *Suite) printTestSummary() {\n\tif s.ProblemsFound == 0 {\n\t\ts.Logger.Println(\"== SUCCESS: This test completed successfully\\n\")\n\t} else if s.ProblemsFound == 1 {\n\t\ts.Logger.Println(\"== FAILURE:\", s.ProblemsFound, \"problem found in this test\\n\")\n\t} else if s.ProblemsFound > 1 {\n\t\ts.Logger.Println(\"== FAILURE:\", s.ProblemsFound, \"problems found in this test\\n\")\n\t}\n\ts.ProblemsFound = 0\n}", "func outPut() {\n\ts, _ := json.Marshal(allMap)\n\tfmt.Println(string(s))\n\ts, _ = json.Marshal(errMap)\n\tfmt.Println(string(s))\n}", "func printOutputHeader() {\n\tfmt.Println(\"\")\n\tfmt.Println(\"FreeTAXII Server\")\n\tfmt.Println(\"Copyright, Bret Jordan\")\n\tfmt.Println(\"Version:\", sVersion)\n\tfmt.Println(\"\")\n}", "func (r BenchmarkResult) String() string {}", "func printOutput(o output.Outputter, err error) error {\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif o == nil {\n\t\treturn nil\n\t}\n\n\tif output.GOutputTemplate != \"\" {\n\t\to.ToText()\n\t\treturn nil\n\t}\n\n\tswitch globalstate.OutputFormat {\n\tcase \"json\":\n\t\to.ToJSON()\n\n\tcase \"text\":\n\t\to.ToText()\n\n\tdefault:\n\t\to.ToTable()\n\t}\n\n\treturn nil\n}", "func testHeader(t *testing.T) {\n\tlogging.Info(fmt.Sprintf(\"=============== Starting test [%s] ===============\", t.Name()))\n}", "func TestOutputFile(t *testing.T) {\n\tmsg := \"This should get written\"\n\n\tfor _, format := range []string{DefaultFormat, TextFormat, JSONFormat} {\n\t\tf, err := os.CreateTemp(\"\", \"testoutputfile\")\n\t\trequire.NoError(t, err)\n\t\ttmpfile := f.Name()\n\t\tdefer os.Remove(tmpfile)\n\n\t\tlogger, err := NewLogger(WithOutputFile(tmpfile), WithFormat(format))\n\t\trequire.NoError(t, err)\n\n\t\tlogger.Warning(msg)\n\n\t\trequire.NoError(t, logger.Close())\n\n\t\tlog, err := io.ReadAll(f)\n\t\trequire.NoError(t, err)\n\n\t\tif format == JSONFormat {\n\t\t\tvar data map[string]string\n\t\t\trequire.NoError(t, json.Unmarshal(log, &data))\n\t\t\tassert.Equal(t, data[\"level\"], \"warning\")\n\t\t\tassert.Equal(t, data[\"msg\"], msg)\n\t\t\tassert.Contains(t, data, \"time\")\n\t\t\tassert.EqualValues(t, len(data), 3, \"%q\", data)\n\t\t} else {\n\t\t\texpected := fmt.Sprintf(\"level=warning msg=\\\"%s\\\"\", msg)\n\t\t\trequire.Contains(t, string(log), expected)\n\t\t}\n\t}\n}", "func TestSinglePrewrite4A(t *testing.T) {\n}", "func sampleWrite() {\n\toutput, err := os.Create(\"output.txt\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer func() {\n\t\tif err := output.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\t// write sth into output\n}", "func (f *FakeOutput) ID() string { return \"fake\" }", "func (manager *Manager) DebugOutput() {\n\tlog.Debugf(\"-------------------------\\n\")\n\tlog.Debugf(\"| Times and Commit Counts|\\n\")\n\tlog.Debugf(\"-------------------------\\n\")\n\tfmt.Println(\"totalScanTime: \", durafmt.Parse(time.Duration(manager.metadata.ScanTime)*time.Nanosecond))\n\tfmt.Println(\"totalPatchTime: \", durafmt.Parse(time.Duration(manager.metadata.patchTime)*time.Nanosecond))\n\tfmt.Println(\"totalCloneTime: \", durafmt.Parse(time.Duration(manager.metadata.cloneTime)*time.Nanosecond))\n\tfmt.Println(\"totalCommits: \", manager.metadata.Commits)\n\n\tconst padding = 6\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, padding, '.', 0)\n\n\tlog.Debugf(\"--------------------------\\n\")\n\tlog.Debugf(\"| Individual Regexes Times |\\n\")\n\tlog.Debugf(\"--------------------------\\n\")\n\tfor k, v := range manager.metadata.RegexTime {\n\t\t_, _ = fmt.Fprintf(w, \"%s\\t%s\\n\", k, durafmt.Parse(time.Duration(v)*time.Nanosecond))\n\t}\n\t_ = w.Flush()\n\n}", "func TestInfo(t *testing.T) {\n\tout := strings.Builder{}\n\tSetOutput(&out)\n\tSetLevelStr(\"INFO\")\n\tt.Run(\"test info function\", func(t *testing.T) {\n\t\tprintAllLevels(\"test info message\")\n\t\tif strings.Count(out.String(), \"DEBUG\") == 2 {\n\t\t\tt.Errorf(\"Message with status DEBUG should not printed:\\n%v\", out.String())\n\t\t}\n\t\tif strings.Count(out.String(), \"INFO\") != 2 {\n\t\t\tt.Errorf(\"Message with status INFO should printed:\\n%v\", out.String())\n\t\t}\n\t\tif strings.Count(out.String(), \"WARN\") != 2 {\n\t\t\tt.Errorf(\"Message with status WARN should printed:\\n%v\", out.String())\n\t\t}\n\t\tif strings.Count(out.String(), \"ERROR\") != 2 {\n\t\t\tt.Errorf(\"Message with status ERROR should printed:\\n%v\", out.String())\n\t\t}\n\t\tif strings.Count(out.String(), \"FATAL\") != 2 {\n\t\t\tt.Errorf(\"Message with status FATAL should printed:\\n%v\", out.String())\n\t\t}\n\t})\n}", "func TestEcho(t *testing.T) {\n\t// Create a new logger.\n\tlogger := New(\"TEST-PREFIX:\")\n\n\t// Classical test.\n\tr, w, _ := os.Pipe()\n\terr := logger.SetOutputs(Output{\n\t\tName: \"test\",\n\t\tWriter: w,\n\t\tLevels: level.Default,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tlogger.echo(nil, level.Debug, \"test %s\", \"message\")\n\toutC := make(chan string)\n\tgo ioCopy(r, outC)\n\tw.Close()\n\tout := <-outC\n\n\tif !strings.Contains(out, \"test message\") {\n\t\tt.Errorf(\"echo did not write the correct TEXT message: %s\", out)\n\t}\n\n\t// As JSON.\n\tr, w, _ = os.Pipe()\n\tlogger.SetOutputs(Output{\n\t\tName: \"test\",\n\t\tWriter: w,\n\t\tLevels: level.Default,\n\t\tWithPrefix: trit.False,\n\t})\n\n\tlogger.echo(nil, level.Debug, \"test %s\", \"message\")\n\toutC = make(chan string)\n\tgo ioCopy(r, outC)\n\tw.Close()\n\tout = <-outC\n\n\tif strings.Contains(out, \"TEST-PREFIX\") {\n\t\tt.Errorf(\"the prefix should not appear in this test: %s\", out)\n\t}\n\n\t// As JSON.\n\tr, w, _ = os.Pipe()\n\tlogger.SetOutputs(Output{\n\t\tName: \"test\",\n\t\tWriter: w,\n\t\tLevels: level.Default,\n\t\tTextStyle: trit.False,\n\t})\n\n\tlogger.echo(nil, level.Debug, \"test %s\", \"message\")\n\toutC = make(chan string)\n\tgo ioCopy(r, outC)\n\tw.Close()\n\tout = <-outC\n\n\tif !strings.Contains(out, \"\\\"level\\\":\\\"DEBUG\\\"\") {\n\t\tt.Errorf(\"echo did not write the correct JSON message: %s\", out)\n\t}\n\n\t// Disabled.\n\tr, w, _ = os.Pipe()\n\tlogger.SetOutputs(Output{\n\t\tName: \"test\",\n\t\tWriter: w,\n\t\tEnabled: trit.False,\n\t})\n\n\tlogger.echo(nil, level.Debug, \"test %s\", \"message\")\n\toutC = make(chan string)\n\tgo ioCopy(r, outC)\n\tw.Close()\n\tout = <-outC\n\n\tif len(out) != 0 {\n\t\tt.Errorf(\"should not write anything: %s\", out)\n\t}\n}", "func TestPrint(t *testing.T) {\n\n\tcli.PrintHelp(&appCmd, os.Stdout, nil)\n\tfmt.Println(\"------------------------------\")\n\n\tcli.PrintUsage(&appCmd, os.Stdout, nil)\n\tfmt.Println(\"------------------------------\")\n\n\tcli.PrintUsage(&appCmd, os.Stdout, &printSettings)\n\tfmt.Println(\"\")\n\tcli.PrintUsage(&all, os.Stdout, &printSettings)\n\tfmt.Println(\"\")\n\tcli.PrintUsage(&task1, os.Stdout, &printSettings)\n\tfmt.Println(\"\")\n\tcli.PrintUsage(&task2, os.Stdout, &printSettings)\n\tfmt.Println(\"\")\n\tcli.PrintUsage(&task3, os.Stdout, &printSettings)\n\n\tfmt.Println(\"------------------------------\")\n\terr := errors.New(\"test error\")\n\tcli.PrintMisuse(&setup, os.Stdout, err, nil)\n\tfmt.Println(\"\")\n\tcli.PrintMisuse(&all, os.Stdout, err, nil)\n\tfmt.Println(\"\")\n\tcli.PrintMisuse(&task1, os.Stdout, err, nil)\n\tfmt.Println(\"\")\n\tcli.PrintMisuse(&task2, os.Stdout, err, nil)\n\tfmt.Println(\"\")\n\tcli.PrintMisuse(&task3, os.Stdout, err, nil)\n}", "func Stdout(format string, a ...interface{}) {\n\tfmt.Printf(GetOutputPrefix(GetFuncName(1))+format+\"\\n\", a...)\n}", "func PrintTestcaseNameandStatus() {\n\tvar Status string\n\ttestSpecReport := ginkgo.CurrentGinkgoTestDescription()\n\tif testSpecReport.Failed {\n\t\tStatus = \"FAILED\"\n\t} else {\n\t\tStatus = \"PASSED\"\n\t}\n\tInfof(\"TestCase:%40s Status=%s\", testSpecReport.TestText, Status)\n}", "func expectOutput(t *testing.T, f func(), expected string) {\n\told := os.Stdout // keep backup of the real stdout\n\tr, w, _ := os.Pipe()\n\tos.Stdout = w\n\n\tf()\n\n\toutC := make(chan string)\n\t// copy the output in a separate goroutine so printing can't block indefinitely\n\tgo func() {\n\t\tvar buf bytes.Buffer\n\t\tio.Copy(&buf, r)\n\t\toutC <- buf.String()\n\t}()\n\n\tos.Stdout.Close()\n\tos.Stdout = old // restoring the real stdout\n\tout := <-outC\n\tif !strings.Contains(out, expected) {\n\t\tt.Errorf(\"Expected '%s', received '%s'.\", expected, out)\n\t}\n}", "func (f *FakeOutput) Logger() *zap.SugaredLogger { return f.SugaredLogger }", "func (f *AccessReq) GenerateOutput(output string) {\n\n\tfmt.Println(\"\\n\")\n\n\tfmt.Println(\"-------------------------------------------------------------------\\n\")\n\tfmt.Println(f.Input, \"START\\n\")\n\tfmt.Println(\"-------------------------------------------------------------------\\n\\n\")\n\n\t///ejecuta\n\n\tfmt.Println(output)\n\n\tlogs := f.Logs\n\n\tif len(logs) > 0 {\n\n\t\tfmt.Println(\"\\nFrames Swapped: \\n\")\n\n\t\tfor i := 0; i < len(logs); i++ {\n\n\t\t\tlog := logs[i]\n\t\t\tlogType := log.Type\n\n\t\t\tif logType == SwapOut {\n\t\t\t\tfmt.Println(\"PID: %d VP: %d Swap: %d \\t\", log.PageBefore.PID, log.PageBefore.ID, log.PageAfter.SwapFrame)\n\t\t\t}\n\n\t\t}\n\t}\n\n\tfmt.Println(\"\\n\")\n\n}", "func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, runner command.Runner, lines int, logOutput *os.File) error {\n\tcmds := logCommands(r, bs, cfg, lines, false)\n\tcmds[\"kernel\"] = \"uptime && uname -a && grep PRETTY /etc/os-release\"\n\n\tnames := []string{}\n\tfor k := range cmds {\n\t\tnames = append(names, k)\n\t}\n\n\tout.SetOutFile(logOutput)\n\tdefer out.SetOutFile(os.Stdout)\n\tout.SetErrFile(logOutput)\n\tdefer out.SetErrFile(os.Stderr)\n\n\tsort.Strings(names)\n\tfailed := []string{}\n\tfor i, name := range names {\n\t\tif i > 0 {\n\t\t\tout.Styled(style.Empty, \"\")\n\t\t}\n\t\tout.Styled(style.Empty, \"==> {{.name}} <==\", out.V{\"name\": name})\n\t\tvar b bytes.Buffer\n\t\tc := exec.Command(\"/bin/bash\", \"-c\", cmds[name])\n\t\tc.Stdout = &b\n\t\tc.Stderr = &b\n\t\tif rr, err := runner.RunCmd(c); err != nil {\n\t\t\tklog.Errorf(\"command %s failed with error: %v output: %q\", rr.Command(), err, rr.Output())\n\t\t\tfailed = append(failed, name)\n\t\t\tcontinue\n\t\t}\n\t\tl := \"\"\n\t\tscanner := bufio.NewScanner(&b)\n\t\tfor scanner.Scan() {\n\t\t\tl += scanner.Text() + \"\\n\"\n\t\t}\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tklog.Errorf(\"failed to read output: %v\", err)\n\t\t\tfailed = append(failed, name)\n\t\t}\n\t\tout.Styled(style.Empty, l)\n\t}\n\n\tif len(failed) > 0 {\n\t\treturn fmt.Errorf(\"unable to fetch logs for: %s\", strings.Join(failed, \", \"))\n\t}\n\treturn nil\n}", "func output(path string, info os.FileInfo, result map[string]interface{}) {\n\tfor i, attr := range attrs {\n\t\tif q.HasAttribute(attr) {\n\t\t\tfmt.Printf(\"%v\", result[attr])\n\t\t\tif q.HasAttribute(attrs[i+1:]...) {\n\t\t\t\tfmt.Print(\"\\t\")\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Print(\"\\n\")\n}", "func TestDoesOutput(t *testing.T, logic func(w io.Writer)) {\n\tassert.NotEmpty(t, CaptureStdout(logic))\n}", "func (*text) isOutput() {\n}", "func TestEmptyPrewrite4A(t *testing.T) {\n}", "func TestFieldOutputText(t *testing.T) {\n\tfield := NewField()\n\tfield.Name = \"foo\"\n\tfield.Type = \"text\"\n\n\ttag := field.output()\n\n\tassert.Equal(t, \"<input type=\\\"text\\\" name=\\\"foo\\\" id=\\\"foo\\\" value=\\\"\\\" />\", tag)\n}", "func prt(args ...interface{}) {\n\tfmt.Println(append([]interface{}{\"-------------> DEMO\"}, args...)...)\n}", "func TestPrewriteMultiple4A(t *testing.T) {\n}", "func usageCommon(w io.Writer, line int) {\n\toutputPara(w, line, 0, usageCommonPara)\n}", "func TestEchoWithTextFormatting(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tformat string\n\t\tin []interface{}\n\t\twant string\n\t}{\n\t\t{\n\t\t\tname: \"Empty format\",\n\t\t\tformat: \"\",\n\t\t\tin: []interface{}{\"hello\", \"world\"},\n\t\t\twant: \"helloworld\", // used fmt.Print\n\t\t},\n\t\t{\n\t\t\tname: \"System formatStr\",\n\t\t\tformat: formatPrint,\n\t\t\tin: []interface{}{\"hello\", \"world\"},\n\t\t\twant: \"helloworld\", // used fmt.Print\n\t\t},\n\t\t{\n\t\t\tname: \"System formatStrLn\",\n\t\t\tformat: formatPrintln,\n\t\t\tin: []interface{}{\"hello\", \"world\"},\n\t\t\twant: \" hello world\\n\", // used fmt.Println\n\t\t},\n\t\t{\n\t\t\tname: \"Custom formats\",\n\t\t\tformat: \"[%d]-%s is %v\",\n\t\t\tin: []interface{}{777, \"message\", true},\n\t\t\twant: \"[777]-message is true\", // used fmt.Printf\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tlogger := New()\n\t\t\tlogger.SetSkipStackFrames(2)\n\t\t\tr, w, _ := os.Pipe()\n\t\t\tlogger.SetOutputs(Output{\n\t\t\t\tName: \"test\",\n\t\t\t\tWriter: w,\n\t\t\t\tLevels: level.Default,\n\t\t\t})\n\n\t\t\tlogger.echo(nil, level.Debug, tt.format, tt.in...)\n\t\t\toutC := make(chan string)\n\t\t\tgo ioCopy(r, outC)\n\t\t\tw.Close()\n\t\t\tout := <-outC\n\t\t\tif !strings.Contains(out, tt.want) {\n\t\t\t\tt.Errorf(\"Expression `%v` does not contain `%v`\", out, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}", "func helpsTestOutputSplitThreeChunksAtIndex(t *testing.T, data []byte, i int, j int) {\n\tstdOut, _, _, mock := newBufferedMockTerm()\n\n\tt.Logf(\"\\nWriting chunk[0] == %s\", string(data[:i]))\n\tt.Logf(\"\\nWriting chunk[1] == %s\", string(data[i:j]))\n\tt.Logf(\"\\nWriting chunk[2] == %s\", string(data[j:]))\n\tstdOut.Write(data[:i])\n\tstdOut.Write(data[i:j])\n\tstdOut.Write(data[j:])\n\n\tassertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, \"Operation should be Write : %#v\", mock)\n\tassertBytesEqual(t, data[:i], mock.OutputCommandSequence[0].Data, \"Write data should match\")\n\n\tassertTrue(t, mock.OutputCommandSequence[1].Operation == WRITE_OPERATION, \"Operation should be Write : %#v\", mock)\n\tassertBytesEqual(t, data[i:j], mock.OutputCommandSequence[1].Data, \"Write data should match\")\n\n\tassertTrue(t, mock.OutputCommandSequence[2].Operation == WRITE_OPERATION, \"Operation should be Write : %#v\", mock)\n\tassertBytesEqual(t, data[j:], mock.OutputCommandSequence[2].Data, \"Write data should match\")\n}", "func TestInfo(t *testing.T) {\n\tr, w, _ := os.Pipe()\n\tSetOutputs(Output{\n\t\tName: \"test\",\n\t\tWriter: w,\n\t\tLevels: level.Default,\n\t})\n\n\tInfo(\"Test info\")\n\toutC := make(chan string)\n\tgo ioCopy(r, outC)\n\tw.Close()\n\tout := <-outC\n\n\texpected := \"Test info\"\n\tn := level.Labels[level.Info]\n\tif !strings.Contains(out, expected) || !strings.Contains(out, n) {\n\t\tt.Errorf(\"Result `%s` doesn't contains `%s` and `%s`\",\n\t\t\tout, expected, n)\n\t}\n}", "func Test(t *testing.T) {\n\trw, err := NewRotateWriter(\"./logs/data.log\", RotateTimeClose)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tSetOutput(rw)\n\tInfo(\"TEST INFO\")\n}", "func printTest(args *Args, isJustTest bool) {\n\tmessage := fmt.Sprintf(`%s %s configuration file %s test is `, appname, appversion, args.ConfigFile)\n\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tfmt.Printf(\"%s wrong !!!\\n> %s\\n\", message, err)\n\t\t\tos.Exit(0)\n\t\t}\n\t}()\n\n\t// Get config file content to struct\n\tcfg := config.GetConfig(args.ConfigFile)\n\n\t// check statistic redis source\n\tif \"redis\" == strings.ToLower(cfg.Statistic.SourceType) {\n\t\toredis.GetInstancePanic(cfg.Statistic.RedisSource)\n\t}\n\n\tenabledQueueCount := 0\n\t// sort the DelayOnFailure array\n\tfor _, r := range cfg.Redis {\n\t\toredis.GetInstancePanic(r.Config)\n\t\tfor _, n := range r.Queues {\n\t\t\tif n.IsEnabled {\n\t\t\t\tenabledQueueCount++\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, r := range cfg.RabbitMQ {\n\t\tr.Config.GetConnectionPanic()\n\t\tfor _, n := range r.Queues {\n\t\t\tif n.IsEnabled {\n\t\t\t\tenabledQueueCount++\n\t\t\t}\n\t\t}\n\t}\n\n\tif enabledQueueCount < 1 {\n\t\tpanic(`There has no enabled queue, please check configure file \"IsEnabled\" fields for every queue`)\n\t}\n\n\t// if -t\n\tif isJustTest {\n\t\tfmt.Printf(\"%s ok\\n\", message)\n\t\tos.Exit(0)\n\t}\n}", "func TestNewBatchConstructor(t *testing.T) {\n\tbatch := NewBatch()\n\tif batch.Writer != os.Stdout {\n\t\tt.Fatal(\"Batch is not using stdout\")\n\t}\n}", "func TestMultiplePrewrites4A(t *testing.T) {\n}", "func colorizeOutput(result string) string {\n\tvar output string\n\tswitch result {\n\tcase \"vulnerable\":\n\t\toutput = term.Redf(result)\n\tcase \"not vulnerable\", \"secure\":\n\t\toutput = term.Greenf(result)\n\tdefault:\n\t\toutput = result\n\t}\n\treturn output\n}", "func PrintMutatedOutput(mutateLogPath string, mutateLogPathIsDir bool, yaml string, fileName string) error {\n\tvar f *os.File\n\tvar err error\n\tyaml = yaml + (\"\\n---\\n\\n\")\n\n\tmutateLogPath = filepath.Clean(mutateLogPath)\n\tif !mutateLogPathIsDir {\n\t\t// truncation for the case when mutateLogPath is a file (not a directory) is handled under pkg/kyverno/apply/test_command.go\n\t\tf, err = os.OpenFile(mutateLogPath, os.O_APPEND|os.O_WRONLY, 0o600) // #nosec G304\n\t} else {\n\t\tf, err = os.OpenFile(mutateLogPath+\"/\"+fileName+\".yaml\", os.O_CREATE|os.O_WRONLY, 0o600) // #nosec G304\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := f.Write([]byte(yaml)); err != nil {\n\t\tcloseErr := f.Close()\n\t\tif closeErr != nil {\n\t\t\tlog.Error(closeErr, \"failed to close file\")\n\t\t}\n\t\treturn err\n\t}\n\tif err := f.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func TestLogger(t *testing.T) {\n\t// a := assert.New(t)\n\tr := require.New(t)\n\toutput := bytes.NewBuffer(nil)\n\tlogger := newLogger()\n\tlogger.Options(false, false, false).SetOutput(output)\n\tin := make(chan interface{}, 2)\n\tout := make(chan interface{}, 2)\n\n\tlogger.LogBegin(\"TEST\", \"POST\", \"http://test.fr\", []byte(\"QUERY\"))\n\tr.Empty(output.String())\n\toutput.Reset()\n\n\tgo logger.LogResult(false, time.Now(), in, out)\n\tin <- json.RawMessage(\"Hello world !\")\n\tin <- nil\n\t<-out\n\t<-out\n\tr.Empty(output.String())\n\toutput.Reset()\n\n\tlogger.LogError(\"ERROR\", time.Now())\n\tr.Empty(output.String())\n\toutput.Reset()\n\n\tlogger.Options(true, false, false)\n\n\tlogger.LogBegin(\"TEST\", \"POST\", \"http://test.fr\", []byte(`{\"foo\":\"bar\"}`))\n\tr.Contains(output.String(), \"TEST\")\n\tr.Contains(output.String(), \"http://test.fr\")\n\tr.NotContains(output.String(), \"foo\")\n\toutput.Reset()\n\n\tgo logger.LogResult(true, time.Now(), in, out)\n\tin <- json.RawMessage{}\n\tin <- nil\n\t<-out\n\t<-out\n\tr.NotContains(output.String(), \"[]\")\n\toutput.Reset()\n\n\tlogger.LogError(\"ERROR\", time.Now())\n\tr.Contains(output.String(), \"ERROR\")\n\toutput.Reset()\n\n\tlogger.Options(true, true, true)\n\n\tlogger.LogBegin(\"TEST\", \"POST\", \"http://test.fr\", []byte(`{\"foo\":\"bar\"}`))\n\tr.Contains(output.String(), \"TEST\")\n\tr.Contains(output.String(), \"http://test.fr\")\n\tr.Contains(output.String(), \"foo\")\n\toutput.Reset()\n\n\tgo logger.LogResult(false, time.Now(), in, out)\n\tin <- json.RawMessage(`{\"foo\":\"bar\"}`)\n\tin <- json.RawMessage{}\n\tin <- nil\n\t<-out\n\t<-out\n\t<-out\n\ttime.Sleep(time.Millisecond)\n\tr.Contains(output.String(), \"foo\")\n\toutput.Reset()\n}", "func doServerStreams(t *testing.T, prefix string, stdin io.Reader, stdout, stderr io.Writer) {\n\tif stderr != nil {\n\t\twriteExpected(t, \"server stderr\", stderr, prefix+testErr)\n\t}\n\treadExpected(t, \"server stdin\", stdin, prefix+testInput)\n\twriteExpected(t, \"server stdout\", stdout, prefix+testOutput)\n}", "func TestOutput(t *testing.T) {\n\tmsg := []byte(\"[email protected]\")\n\n\t// Generate the keys for a given msg input.\n\toutput, err := ProcessMessage(string(msg))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to generate keys %s\", err)\n\t}\n\toutput.FormatOutput()\n\n\t// Parse out the signature.\n\tder, err := base64.StdEncoding.DecodeString(output.EncodedSignature)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get decode signature %s\", err)\n\t}\n\t// Unmarshal the R and S components of the ASN.1-encoded signature into our signature data structure\n\tsig := &ECDSASignature{}\n\t_, err = asn1.Unmarshal(der, sig)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get signature data %s\", err)\n\t}\n\n\th := toSha256(msg)\n\tpubkey, err := loadPublicKey(output.PEMEncodedPubKey)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load public key %s %s\", output.PEMEncodedPubKey, err)\n\t}\n\n\tif valid := ecdsa.Verify(\n\t\tpubkey,\n\t\th,\n\t\tsig.R,\n\t\tsig.S,\n\t); !valid {\n\t\tt.Fatal(\"verify failed\")\n\t}\n\n}", "func specifyOutput() {\n\tif *outputFile == \"\" {\n\t\t// no output file, write to Stdout\n\t\toutputWriter = os.Stdout\n\t} else {\n\t\tw, err := os.Create(*outputFile)\n\t\tif err != nil {\n\t\t\t// error creating output file\n\t\t\tfmt.Printf(\"Error creating output file: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\toutputWriter = w\n\t}\n}", "func mockQiNiu01(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tif _, err := w.Write([]byte(\"success\")); err != nil {\n\t\tcommon.ErrHandler(w, err)\n\t\treturn\n\t}\n\tw.(http.Flusher).Flush()\n\n\tb, err := ioutil.ReadFile(\"ab_test.out\")\n\tif err != nil {\n\t\tcommon.ErrHandler(w, err)\n\t\treturn\n\t}\n\tif _, err := io.Copy(w, bufio.NewReader(bytes.NewReader(b))); err != nil {\n\t\tcommon.ErrHandler(w, err)\n\t}\n}", "func init() {\n\twithoutWhitespace := func(s string) string {\n\t\treturn strings.Map(func(r rune) rune {\n\t\t\tif !unicode.IsPrint(r) {\n\t\t\t\treturn -1\n\t\t\t} else {\n\t\t\t\treturn r\n\t\t\t}\n\t\t}, s)\n\t}\n\tfmt.Printf(\"fixtures:\\n\"+strings.Repeat(\"\\t%v\\t%v\\n\", 5),\n\t\tleafAlphaLnk, withoutWhitespace(string(storage[leafAlphaLnk])),\n\t\tleafBetaLnk, withoutWhitespace(string(storage[leafBetaLnk])),\n\t\tmiddleMapNodeLnk, withoutWhitespace(string(storage[middleMapNodeLnk])),\n\t\tmiddleListNodeLnk, withoutWhitespace(string(storage[middleListNodeLnk])),\n\t\trootNodeLnk, withoutWhitespace(string(storage[rootNodeLnk])),\n\t)\n}", "func main() {\n\toption := &Option{\n\t\tFormat: \"Common\",\n\t\tOutput: \"/tmp/sample.log\",\n\t\tType: \"log\",\n\t\tNumber: 10000,\n\t}\n\tGenerate(option)\n}", "func (c testCEEwriter) Debug(m string) error { return nil }", "func (s *outputs) stdout() *bytes.Buffer {\n\treturn s.out\n}", "func (m *MockecsDescriber) Outputs() (map[string]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Outputs\")\n\tret0, _ := ret[0].(map[string]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func runToolAndCollectOutput(toolPath string, args []string) (bytes.Buffer, error) {\n\tcmd := exec.Command(toolPath, args...)\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out\n\terr := cmd.Run()\n\treturn out, err\n}", "func main(){\n\tcfg := setup()\n\n\t// Just output the template.\n\ttemplate, _ := ioutil.ReadAll(cfg.Template)\n\tfmt.Println(string(template))\n}", "func StdoutAs(funcName, format string, a ...interface{}) {\n\tfmt.Printf(GetOutputPrefix(funcName)+format+\"\\n\", a...)\n}", "func TestReopenableOutputFile(t *testing.T) {\n\tmsg := \"This should get written\"\n\n\tfor _, format := range []string{DefaultFormat, TextFormat, JSONFormat} {\n\t\tf, err := os.CreateTemp(\"\", \"testoutputfile\")\n\t\trequire.NoError(t, err)\n\t\ttmpfile := f.Name()\n\t\tdefer os.Remove(tmpfile)\n\n\t\treopenableFile, err := NewReopenableFile(f.Name())\n\t\trequire.NoError(t, err)\n\n\t\tlogger, err := NewLogger(WithReopenableOutputFile(reopenableFile), WithFormat(format))\n\t\trequire.NoError(t, err)\n\n\t\tlogger.Warning(msg)\n\n\t\trequire.NoError(t, logger.Close())\n\n\t\tlog, err := io.ReadAll(f)\n\t\trequire.NoError(t, err)\n\n\t\tif format == JSONFormat {\n\t\t\tvar data map[string]string\n\t\t\trequire.NoError(t, json.Unmarshal(log, &data))\n\t\t\tassert.Equal(t, data[\"level\"], \"warning\")\n\t\t\tassert.Equal(t, data[\"msg\"], msg)\n\t\t\tassert.Contains(t, data, \"time\")\n\t\t\tassert.EqualValues(t, len(data), 3, \"%q\", data)\n\t\t} else {\n\t\t\texpected := fmt.Sprintf(\"level=warning msg=\\\"%s\\\"\", msg)\n\t\t\trequire.Contains(t, string(log), expected)\n\t\t}\n\t}\n}", "func TestScaffoldUsecaseInteractorInterface(t *testing.T){\n\n\t// Build\n\tstatement, err := testInteractorGenerator.scaffoldUsecaseInteractorInterface(testEntity)\n\n\t// Return\n\tif err != nil {\n\t\tt.Errorf(`scaffoldUsecaseInteractorInterface() failed with error %v`, err)\n\t}\n\n\tf, err := os.Create(\"./testing/usecase/interactor/created/\" + testOutputScaffoldUsecaseInteractorInterfaceName)\n\tif err != nil {\n\t\tt.Errorf(`scaffoldUsecaseInteractorInterface() failed with error %v`, err)\n\t}\n\tbuf := &bytes.Buffer{}\n\terr = statement.Render(buf)\n\tif err != nil {\n\t\tt.Errorf(`scaffoldUsecaseInteractorInterface() failed with error %v`, err)\n\t}\n\t_, err = f.Write(buf.Bytes())\n\n\tif buf.String() != testOutputScaffoldUsecaseInteractorInterface {\n\t\tt.Errorf(`scaffoldUsecaseInteractorInterface() failed; want \"%s\", got \"%s\"`, testOutputScaffoldUsecaseInteractorInterface, buf.String())\n\t}\n\t\n}", "func TestTools(t *testing.T) { TestingT(t) }", "func (w *OutputWriter) printResult(r TestResult) {\n\tif !r.Success {\n\t\tw.fprintf(w.au.Red(w.template.testResult(r)))\n\t\treturn\n\t}\n\tw.fprintf(w.template.testResult(r))\n}", "func (self *testParser) accountForOutputWithoutNewline() {\n\tprefix := strings.Split(self.line, reporting.OpenJson)[0]\n\tif prefix != \"\" {\n\t\tself.otherLines = append(self.otherLines, prefix)\n\t}\n}", "func outputIntro() {\n\tfor _, i := range intros {\n\t\ti.docBlock.generate()\n\n\t\tfor _, c := range i.classes {\n\t\t\tc.generateOutput()\n\t\t}\n\t}\n}" ]
[ "0.5921244", "0.5917927", "0.58908236", "0.58242327", "0.5802392", "0.57774836", "0.57616454", "0.56760526", "0.567026", "0.5658049", "0.5656105", "0.5647464", "0.5606502", "0.5605456", "0.55558556", "0.55383164", "0.551986", "0.5514595", "0.55137485", "0.54991525", "0.5488504", "0.54791915", "0.5450627", "0.54474884", "0.54373074", "0.5436181", "0.54281795", "0.5404398", "0.5402326", "0.5398476", "0.53973395", "0.5395138", "0.5376817", "0.53629565", "0.53520906", "0.5336557", "0.5319203", "0.52969503", "0.52906007", "0.52897006", "0.5287567", "0.527831", "0.5272729", "0.52615947", "0.5260258", "0.5257387", "0.5252914", "0.5249345", "0.5246983", "0.52403396", "0.523879", "0.52365077", "0.5218027", "0.5214962", "0.5214172", "0.5209509", "0.51998186", "0.51906765", "0.5179818", "0.51794684", "0.51730067", "0.51664233", "0.5162226", "0.51549", "0.51536983", "0.5141801", "0.51389056", "0.5138868", "0.51380783", "0.51146424", "0.5101295", "0.5100923", "0.5092792", "0.5080579", "0.5078171", "0.506531", "0.5061791", "0.5056761", "0.50526166", "0.504872", "0.50465846", "0.50436056", "0.5043299", "0.50375825", "0.5032726", "0.502726", "0.5025347", "0.50239545", "0.5022715", "0.5018045", "0.5017626", "0.50170106", "0.501268", "0.50115305", "0.5006727", "0.50051916", "0.49996245", "0.49988562", "0.49974677", "0.49973008", "0.49923712" ]
0.0
-1
Function for generate IMT Hash
func Generate(data []byte) ([]byte, error) { if len(data) == 0 { return nil, ErrEmptySlice } var tempHash = make([]int, len(data)*8) var hash []byte var index = 0 for _, ib := range data { for i, coefficient := range Coefficients { var temp int if i > 0 { temp = i - 1 } tempHash[index] = ((tempHash[temp] + int(ib)) * coefficient) % 255 index++ } } // []int to []byte Conversion - Is not elegant, but works fine for _, h := range tempHash { hash = append(hash, byte(h)) } return hash, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Hashit(tox string) string {\n h:= sha256.New()\n h.Write([]byte(tox))\n bs := h.Sum([]byte{})\n str := base64.StdEncoding.EncodeToString(bs)\n return str\n}", "func (tx *Tx) generateHash() [32]byte {\n\ttxInSlices := make([][]byte, 0)\n\tfor _, txIn := range tx.Inputs {\n\t\ttxInSlices = append(txInSlices, txIn.forBlkHash())\n\t}\n\ttxInSlice := helpers.ConcatByteArray(txInSlices)\n\n\ttxOutSlices := make([][]byte, 0)\n\tfor _, txOut := range tx.Outputs {\n\t\ttxOutSlices = append(txOutSlices, txOut.forBlkHash())\n\t}\n\ttxOutSlice := helpers.ConcatByteArray(txOutSlices)\n\n\ttxSlices := [][]byte{\n\t\thelpers.UInt32ToBytes(tx.Version), // 4 bytes\n\t\thelpers.UInt32ToBytes(tx.LockTime), // 4 bytes\n\t\ttxInSlice,\n\t\ttxOutSlice,\n\t}\n\n\tmsg := helpers.ConcatByteArray(txSlices)\n\treturn DHASH(msg)\n}", "func genhash(in []string) string {\n\th := md5.New()\n\tio.WriteString(h, strings.Join(in, \"\"))\n\treturn fmt.Sprintf(\"%x\", h.Sum([]byte{}))\n}", "func Hash(i interface{}) string {\n\tv := reflect.ValueOf(i)\n\tif v.Kind() != reflect.Ptr {\n\t\tif !v.CanAddr(){\n\t\t\treturn \"\"\n\t\t}\n\t\tv = v.Addr()\n\t}\n\n\tsize := unsafe.Sizeof(v.Interface())\n\tb := (*[1 << 10]uint8)(unsafe.Pointer(v.Pointer()))[:size:size]\n\n\th := md5.New()\n\treturn base64.StdEncoding.EncodeToString(h.Sum(b))\n}", "func IfaceHash(i interface{F()}, seed uintptr) uintptr", "func (v *Libravatar) genHash(email *mail.Address, openid *url.URL) string {\n\tif email != nil {\n\t\temail.Address = strings.ToLower(strings.TrimSpace(email.Address))\n\t\tsum := md5.Sum([]byte(email.Address))\n\t\treturn fmt.Sprintf(\"%x\", sum)\n\t} else if openid != nil {\n\t\topenid.Scheme = strings.ToLower(openid.Scheme)\n\t\topenid.Host = strings.ToLower(openid.Host)\n\t\tsum := sha256.Sum256([]byte(openid.String()))\n\t\treturn fmt.Sprintf(\"%x\", sum)\n\t}\n\t// panic, because this should not be reachable\n\tpanic(\"Neither Email or OpenID set\")\n}", "func new256Asm() hash.Hash { return nil }", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff) % 10\n}", "func (ig *IdGen) GetHash(data string) string {\r\n\talgorithm := md5.New()\r\n\talgorithm.Write([]byte(data))\r\n\treturn hex.EncodeToString(algorithm.Sum(nil))\r\n}", "func (c *Client) generateHash(method, path, timestamp string) string {\n\tdata := fmt.Sprintf(\"%v+%v%v?apiuserid=%v&timestamp=%v\", method, c.BaseURL.Host, path, c.APIUserID, timestamp)\n\tmac := hmac.New(sha256.New, []byte(c.APIUserKey))\n\tmac.Write([]byte(data))\n\n\treturn hex.EncodeToString(mac.Sum(nil))\n}", "func hash(i interface{}) string {\n\n var s []byte\n\n switch i.(type) {\n case []byte:\n s = reflect.ValueOf(i).Bytes()\n\n case string:\n s = []byte(reflect.ValueOf(i).String())\n\n default:\n panic(\"Cannot use interface type given in hash\")\n }\n\n h := md5.New()\n\n h.Write(s)\n\n b := h.Sum(nil)\n\n return hex.EncodeToString(b)\n}", "func hashmaker(pass string) string {\n\thash, _ := bcrypt.GenerateFromPassword([]byte(pass), bcrypt.DefaultCost)\n\treturn string(hash)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func ihash(key string) int {\n\th := fnv.New32a()\n\th.Write([]byte(key))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func (self *ResTransaction)GetHash()string{\n hb := new(utils.HashBuilder)\n hb.Add(self.Creator)\n hb.Add(self.Timestamp.Format(\"2006-01-02 15:04:05\"))\n hb.Add(self.JobBlock)\n hb.Add(self.JobTrans)\n hb.Add(self.Output)\n for i:=0;i<len(self.Inputs);i++{\n hb.Add(self.Inputs[i])\n }\n hb.Add(self.HashSol)\n hb.Add(self.Evaluation)\n hb.Add(self.IsMin)\n return fmt.Sprintf(\"%x\",hb.GetHash())\n}", "func GenerateUserEmailHMAC(userEmail string) string { return api.GenerateUserEmailHMAC(userEmail) }", "func computeMD5Hash(toBeEncoded int64) string {\n\tvar computedString string\n\n\tbyteArray := make([]byte, 1024)\n\tn := binary.PutVarint(byteArray, toBeEncoded)\n\n\tcomputedMD5 := md5.Sum(byteArray[0:n])\n\n\tcomputedString = fmt.Sprintf(\"%x\", computedMD5)\n\n\treturn computedString\n\n}", "func (gen *IDGenerator) Hash(string) string {\n\tid := <-gen.ch\n\treturn strconv.FormatUint(id, 10)\n}", "func hash(s string) string {\n\thash := fnv.New32a()\n\thash.Write([]byte(s))\n\tintHash := hash.Sum32()\n\tresult := fmt.Sprintf(\"%08x\", intHash)\n\treturn result\n}", "func genHash(text string) string {\n\th := sha1.New()\n\th.Write([]byte(text))\n\thashed := h.Sum(nil)\n\treturn fmt.Sprintf(\"%v\", hashed)\n}", "func GenUUID(account string) string {\n h1 := md5.New()\n io.WriteString(h1, account)\n io.WriteString(h1, UUIDkey)\n h2 := md5.New()\n io.WriteString(h2, account)\n io.WriteString(h2, MD5key)\n return fmt.Sprintf(\"%x%x\", h1.Sum(nil), h2.Sum(nil))\n}", "func generateHash(src, secret string) string {\n\tkey := []byte(secret)\n\th := hmac.New(sha256.New, key)\n\th.Write([]byte(src))\n\treturn base64.StdEncoding.EncodeToString(h.Sum(nil))\n}", "func ihash(s string) int {\n\th := fnv.New32a()\n\th.Write([]byte(s))\n\treturn int(h.Sum32() & 0x7fffffff)\n}", "func (c *HashRing) generateHash(key string) uint32 {\n\treturn crc32.ChecksumIEEE([]byte(key))\n}", "func GenHashKey() string {\n\tid := uuid.New()\n\treturn hex.EncodeToString(id[:])\n}", "func (h *hasht) hash(input string) uint64 {\n\tvar hash uint64 = FNVOffset\n\tfor _, char := range input {\n\t\thash ^= uint64(char)\n\t\thash *= FNVPrime\n\t}\n\treturn hash\n}", "func Hash(b []byte, seed uint64) uint64", "func Hash(input string) (string, error) {\n\tbytes, err := bcrypt.GenerateFromPassword([]byte(input), 14)\n\treturn string(bytes), err\n}", "func Hash(email string) string {\n\th := md5.New()\n\tio.WriteString(h, email)\n\n\treturn fmt.Sprintf(\"%x\", h.Sum(nil))\n}", "func CalculateHash(key string, iteration int) string {\n\ty := fmt.Sprintf(\"%s%d\", key, iteration)\n\tx := fmt.Sprintf(\"%x\", md5.Sum([]byte(y)))\n\treturn x\n}", "func GenSessionKey() string {\n\n b := make([]byte, 16)\n\n t := time.Now().Unix()\n tmpid := uint16(atomic.AddUint32(&uuid, 1))\n\n b[0] = byte(255)\n b[1] = byte(0)\n b[2] = byte(tmpid)\n b[3] = byte(tmpid >> 8)\n\n b[4] = byte(t)\n b[5] = byte(t >> 8)\n b[6] = byte(t >> 16)\n b[7] = byte(t >> 24)\n\n c, _ := rc4.NewCipher([]byte{0x0c, b[2], b[3], b[0]})\n c.XORKeyStream(b[8:], b[:8])\n\n guid := fmt.Sprintf(\"%x-%x-%x-%x-%x\", b[:4], b[4:6], b[6:8], b[8:12], b[12:])\n h := md5.New()\n io.WriteString(h, guid)\n io.WriteString(h, MD5key)\n\n return fmt.Sprintf(\"%x-%x-%x-%x-%x--%x\", b[:4], b[4:6], b[6:8], b[8:12], b[12:], h.Sum(nil))\n}", "func hash(key uint64) uint64 {\r\n\tkey ^= key >> 33\r\n\tkey *= 0xff51afd7ed558ccd\r\n\tkey ^= key >> 33\r\n\tkey *= 0xc4ceb9fe1a85ec53\r\n\tkey ^= key >> 33\r\n\treturn key\r\n}", "func calculateHash (block Block) string{\n h := sha256.New()\n unique := block.Data + block.PrevHash + block.TimeStamp + strconv.Itoa(block.Nonce)\n h.Write([]byte(unique))\n \n return hex.EncodeToString(h.Sum(nil))\n}", "func TestGenerateHash(t *testing.T) {\n\t// Define The TestCase Struct\n\ttype TestCase struct {\n\t\tName string\n\t\tLength int\n\t}\n\n\t// Create The TestCases\n\ttestCases := []TestCase{\n\t\t{Name: \"\", Length: 32},\n\t\t{Name: \"short string\", Length: 4},\n\t\t{Name: \"long string, 8-character hash\", Length: 8},\n\t\t{Name: \"odd hash length, 13-characters\", Length: 13},\n\t\t{Name: \"very long string with 16-character hash and more than 64 characters total\", Length: 16},\n\t}\n\n\t// Run The TestCases\n\tfor _, testCase := range testCases {\n\t\thash := GenerateHash(testCase.Name, testCase.Length)\n\t\texpected := fmt.Sprintf(\"%x\", md5.Sum([]byte(testCase.Name)))[0:testCase.Length]\n\t\tassert.Equal(t, expected, hash)\n\t}\n\n}", "func createHash(key string) string {\n\thasher := md5.New()\n\thasher.Write([]byte(key))\n\treturn hex.EncodeToString(hasher.Sum(nil))\n}", "func createHash(key string) string {\n\thasher := md5.New()\n\thasher.Write([]byte(key))\n\treturn hex.EncodeToString(hasher.Sum(nil))\n}", "func createHash(key string) string {\n\thasher := md5.New()\n\thasher.Write([]byte(key))\n\treturn hex.EncodeToString(hasher.Sum(nil))\n}", "func (bc *Blockchain) Hash() {\n\n}", "func calculateBlockHash(Term int, Index int, idx int, Timestamp string,BPM string, prevHash string, validator string) string {\nrecord := string(Index) + Timestamp + string(BPM) + prevHash + string(idx) + string(Term) + validator\nreturn calculateHash(record)\n}", "func GenerateHash(base string) (string, int) {\n\t// get current time as a string\n\tnowStr := strconv.Itoa(int(time.Now().Unix()))\n\n\t// hash it\n\thasher := sha256.New()\n\thasher.Write([]byte(nowStr))\n\n\thash := hex.EncodeToString(hasher.Sum(nil))\n\n\t// create a folder named after the hash\n\terr := os.MkdirAll(filepath.Join(base, hash), 0755)\n\tif err != nil {\n\t\treturn \"\", 0\n\t}\n\n\treturn hash, 1\n}", "func EfaceHash(i interface{}, seed uintptr) uintptr", "func (u *UIA2) Hash(data []byte, blength uint64) []byte {\n\treturn u.F9(data, blength)\n}", "func new512Asm() hash.Hash { return nil }", "func (c *HashRing) generateKey(ip string, i int) string {\n\treturn ip + \"#\" + strconv.Itoa(i)\n}", "func Hashbin(tox string) []byte {\n h:= sha256.New()\n h.Write([]byte(tox))\n bs := h.Sum([]byte{})\n return bs \n}", "func ComputeHash(body []byte) string {\n\th := md5.New()\n\th.Write(body)\n\th.Write(kSecret)\n\treturn fmt.Sprintf(\"%x\", h.Sum(nil))\n}", "func generateSHA256HashInBase64Form(preimage string) string {\n\thasher := sha256.New()\n\thasher.Write([]byte(preimage))\n\tshaHash := hasher.Sum(nil)\n\tshaHashBase64 := base64.StdEncoding.EncodeToString(shaHash)\n\treturn shaHashBase64\n}", "func Hash(s string, maxKey uint64) Key {\n\th := fnv.New64a()\n\th.Write([]byte(s))\n\treturn NewKey(h.Sum64() % maxKey)\n}", "func computeNonceSecretHash(nonce string, secret string) string {\n\th := md5.New()\n\th.Write([]byte(nonce + secret))\n\tstr := hex.EncodeToString(h.Sum(nil))\n\treturn str\n}", "func GenerateHash(stringToHash string, length int) string {\n\t// Create an MD5 hash and return however many characters the caller wants (note that the max is 32 for MD5)\n\treturn fmt.Sprintf(fmt.Sprintf(\"%%.%ds\", length), fmt.Sprintf(\"%x\", md5.Sum([]byte(stringToHash))))\n}", "func Hash(str string) string {\n\thasher := fnv.New64a()\n\thasher.Write([]byte(str))\n\treturn hex.EncodeToString(hasher.Sum(nil))\n}", "func hash(s string) string {\n\th := fnv.New32a()\n\t_, err := h.Write([]byte(s))\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprint(h.Sum32())\n}", "func HashASM(k0, k1 uint64, p []byte) uint64", "func hash_func(x, y, n HashValue) (HashValue) {\n return (x*1640531513 ^ y*2654435789) % n\n}", "func generateAuthCode(key []byte, t uint64) string {\n\tt /= 30 // converting time for any reason\n\ttb := make([]byte, 8) // 00 00 00 00 00 00 00 00\n\tbinary.BigEndian.PutUint64(tb, t) // 00 00 00 00 xx xx xx xx\n\n\t// evaluate hash code for `tb` by key\n\tmac := hmac.New(sha1.New, key)\n\tmac.Write(tb)\n\thashcode := mac.Sum(nil)\n\n\t// last 4 bits provide initial position\n\t// len(hashcode) = 20 bytes\n\tstart := hashcode[19] & 0xf\n\n\t// extract 4 bytes at `start` and drop first bit\n\tfc32 := binary.BigEndian.Uint32(hashcode[start : start+4])\n\tfc32 &= 1<<31 - 1\n\tfullcode := int(fc32)\n\n\t// generate auth code\n\tcode := make([]byte, 5)\n\tfor i := range code {\n\t\tcode[i] = codeChars[fullcode%codeCharsLen]\n\t\tfullcode /= codeCharsLen\n\t}\n\n\treturn string(code[:])\n}", "func generatePasswordHash(password string) string {\n sum := sha256.Sum256([]byte(password))\n return hex.EncodeToString(sum[:])\n}", "func Hash(length int, key string) int64 {\n\tif key == \"\" {\n\t\treturn 0\n\t}\n\thc := hashCode(key)\n\treturn (hc ^ (hc >> 16)) % int64(length)\n}", "func generateSkey() string {\n myTime := fmt.Sprintf(\"%d\", time.Now().Unix())\n str := utils.Md5String(myTime)\n return str[0:6]\n}", "func createHMACKey() string {\n\tkey := make([]byte, 49)\n\trand.Reader.Read(key)\n\tvar cooked = base64.StdEncoding.EncodeToString(key)\n\treturn cooked\n}", "func hash(id, app string) string {\n\treturn id + \"|\" + app\n}", "func (in *Instance) hash(x, y, mu *big.Int, T uint64) *big.Int {\n\tb := sha512.New()\n\tb.Write(x.Bytes())\n\tb.Write(y.Bytes())\n\tb.Write(mu.Bytes())\n\tbits := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(bits, T)\n\tb.Write(bits)\n\tres := new(big.Int).SetBytes(b.Sum(nil))\n\tres.Mod(res, in.rsaModulus)\n\treturn res\n}", "func new224Asm() hash.Hash { return nil }", "func siphash(k0, k1, m uint64) uint64 {\n\t// Initialization.\n\tv0 := k0 ^ 0x736f6d6570736575\n\tv1 := k1 ^ 0x646f72616e646f6d\n\tv2 := k0 ^ 0x6c7967656e657261\n\tv3 := k1 ^ 0x7465646279746573\n\tt := uint64(8) << 56\n\n\t// Compression.\n\tv3 ^= m\n\n\t// Round 1.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\t// Round 2.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\tv0 ^= m\n\n\t// Compress last block.\n\tv3 ^= t\n\n\t// Round 1.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\t// Round 2.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\tv0 ^= t\n\n\t// Finalization.\n\tv2 ^= 0xff\n\n\t// Round 1.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\t// Round 2.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\t// Round 3.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\t// Round 4.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\treturn v0 ^ v1 ^ v2 ^ v3\n}", "func Hash(t *Token) (hash []byte) {\n var sum []byte\n\n // Compute the SHA1 sum of the Token\n {\n shasum := sha1.Sum([]byte(salt+string(*t)))\n copy(sum[:], shasum[:20])\n }\n\n // Encode the sum to hexadecimal\n hex.Encode(sum, sum)\n\n return\n}", "func computeHash(nstObj megav1.NamespaceTemplate) uint64 {\n\thash, err := hashstructure.Hash(nstObj, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Printf(\"computeHash: %d\\n\", hash)\n\treturn hash\n}", "func Generate() []byte {\n\tt := make([]byte, TOKEN_SIZE)\n\n\t//32-64 is pure random...\n\trand.Read(t[32:])\n\n\thash := createHash(t[32:])\n\n\t//\tlogx.D(\"hash:\", base64.URLEncoding.EncodeToString(hash))\n\n\t//copy hash protection to first 32bytes\n\tcopy(t[0:32], hash)\n\n\t//\tlogx.D(\"token:\", base64.URLEncoding.EncodeToString(t))\n\n\treturn t\n}", "func GenHashStr(data interface{}) string {\n\ts := fmt.Sprintf(\"%d\", genHash(data))\n\treturn s\n}", "func (pssOpts *PSSOptions) HashFunc() crypto.Hash", "func (dtk *DcmTagKey) Hash() uint32 {\n\treturn ((uint32(int(dtk.group)<<16) & 0xffff0000) | (uint32(int(dtk.element) & 0xffff)))\n}", "func Hash(src string, secret string) string {\n key := []byte(secret)\n h := hmac.New(sha256.New, key)\n h.Write([]byte(src))\n return base64.StdEncoding.EncodeToString(h.Sum(nil))\n}", "func GenPassword(account string, passwd string) string {\n h := md5.New()\n io.WriteString(h, account)\n io.WriteString(h, passwd)\n io.WriteString(h, UUIDkey)\n return fmt.Sprintf(\"%x\", h.Sum(nil))\n}", "func htkey_hash_str(k1 voidptr, len int) usize {\n\tvar k1p byteptr = (byteptr)(k1)\n\tvar hash usize\n\n\thash = 0 + 5381 + len + 1\n\tfor i := 0; i < len; i++ {\n\t\tc := k1p[i]\n\t\thash = ((hash << 5) + hash) ^ usize(c)\n\t}\n\n\treturn hash\n}" ]
[ "0.682205", "0.63708305", "0.6320564", "0.62342554", "0.6228108", "0.62187105", "0.6194701", "0.61638", "0.6110786", "0.6078033", "0.6066673", "0.60372865", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.6024374", "0.60106754", "0.60035646", "0.59922016", "0.5975482", "0.5969103", "0.59649575", "0.5963625", "0.5953858", "0.59137917", "0.59017414", "0.58928597", "0.5880091", "0.58677554", "0.58658373", "0.5852951", "0.5839644", "0.5833071", "0.5822261", "0.5812235", "0.5809927", "0.58094853", "0.58094853", "0.58094853", "0.5807519", "0.58001286", "0.579818", "0.57785946", "0.57624054", "0.57497364", "0.5732602", "0.5726931", "0.5721738", "0.5715009", "0.571458", "0.57076776", "0.57064617", "0.5702777", "0.57005227", "0.5698609", "0.5696944", "0.5687296", "0.5670907", "0.566478", "0.56642205", "0.5658694", "0.5653518", "0.5652771", "0.5652027", "0.5633697", "0.56300944", "0.56214744", "0.5620175", "0.5617132", "0.56166285", "0.56082547", "0.56028455", "0.5602672", "0.5599705" ]
0.0
-1
Test that Pool does not hold pointers to previously cached resources.
func TestPoolGC(t *testing.T) { testPool(t, true) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestPool(t *testing.T, p pool.Pool) {\n\tt.Helper()\n\tctx := context.Background()\n\toffers, err := p.Offers(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := len(offers), 1; got != want {\n\t\tt.Fatalf(\"got %v, want %v\", got, want)\n\t}\n\t// We accept half the memory and disk; we use 0 CPUs.\n\to := offers[0]\n\tr := o.Available()\n\tvar orig reflow.Resources\n\torig.Set(r)\n\tr[\"cpu\"] = 0\n\tr[\"mem\"] /= 2\n\tr[\"disk\"] /= 2\n\talloc, err := o.Accept(ctx, pool.AllocMeta{Want: r, Owner: \"test\", Labels: nil})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\toffers, err = p.Offers(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := len(offers), 1; got != want {\n\t\tt.Fatalf(\"got %v, want %v\", got, want)\n\t}\n\to = offers[0]\n\tlog.Printf(\"offer received %v\", o.Available())\n\tif got, want := o.Available()[\"mem\"], (orig[\"mem\"] - orig[\"mem\"]/2); got != want {\n\t\tt.Fatalf(\"got %v, want %v\", got, want)\n\t}\n\n\tid := reflow.Digester.FromString(\"alloctest\")\n\texec, err := alloc.Put(ctx, id, reflow.ExecConfig{\n\t\tType: \"exec\",\n\t\tImage: bashImage,\n\t\tCmd: \"echo logthis; echo foobar > $out\",\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Give it some time to fetch the image, etc.\n\tctx, cancel := context.WithTimeout(ctx, 1*time.Minute)\n\tdefer cancel()\n\terr = exec.Wait(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres, err := exec.Result(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res.Err != nil {\n\t\tt.Fatal(res.Err)\n\t}\n\torigres := res\n\n\t// Now we force expiry to see that we can grab everything.\n\t// We grab a new alloc, and check that our old alloc died;\n\t// there should now be zero offers.\n\tintv := 1 * time.Nanosecond\n\td, err := alloc.Keepalive(ctx, intv)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := d, intv; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\ttime.Sleep(d)\n\toffers, err = p.Offers(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := len(offers), 1; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\to = offers[0]\n\tif got, want := o.Available(), orig; !got.Equal(want) {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\talloc1, err := o.Accept(ctx, pool.AllocMeta{Want: o.Available(), Owner: \"test\", Labels: nil})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := alloc1.Resources(), o.Available(); !got.Equal(want) {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\t// Look it up again to get its zombie.\n\t// Note: in client-server testing we're interacting directly with a client\n\t// not through a cluster implementation, so we'll need to strip off the\n\t// hostname ourselves.\n\tallocID := alloc.ID()\n\tif idx := strings.Index(allocID, \"/\"); idx > 0 {\n\t\tallocID = allocID[idx+1:]\n\t}\n\talloc, err = p.Alloc(ctx, allocID)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\texec, err = alloc.Get(ctx, id)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres, err = exec.Result(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := res, origres; !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\trc, err := exec.Logs(ctx, true, false, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer rc.Close()\n\tb, err := ioutil.ReadAll(rc)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := string(b), \"logthis\\n\"; got != want {\n\t\tt.Errorf(\"got %q, want %q\", got, want)\n\t}\n\n\t// We shouldn't have any offers now.\n\toffers, err = p.Offers(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := len(offers), 0; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n}", "func TestPool(t *testing.T) {\n\tdefer leaktest.Check(t)()\n\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n}", "func DontUsePool() {\n\thabbo.Lock()\n\tusePool = false\n\thabbo.Unlock()\n}", "func (ft *FacadeUnitTest) Test_PoolCacheEditPool(c *C) {\n\tft.setupMockDFSLocking()\n\n\tpc := NewPoolCacheEnv()\n\n\tft.hostStore.On(\"FindHostsWithPoolID\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]host.Host{pc.firstHost, pc.secondHost}, nil)\n\n\tft.poolStore.On(\"GetResourcePools\", ft.ctx).\n\t\tReturn([]pool.ResourcePool{pc.resourcePool}, nil).Once()\n\n\tft.serviceStore.On(\"GetServicesByPool\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]service.Service{pc.firstService, pc.secondService}, nil)\n\n\tft.serviceStore.On(\"GetServiceDetails\", ft.ctx, pc.firstService.ID).\n\t\tReturn(&service.ServiceDetails{\n\t\t\tID: pc.firstService.ID,\n\t\t\tRAMCommitment: pc.firstService.RAMCommitment,\n\t\t}, nil)\n\n\tpools, err := ft.Facade.GetReadPools(ft.ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(pools, Not(IsNil))\n\tc.Assert(len(pools), Equals, 1)\n\n\tp := pools[0]\n\n\tc.Assert(p.ID, Equals, pc.resourcePool.ID)\n\tc.Assert(p.CreatedAt, TimeEqual, pc.resourcePool.CreatedAt)\n\tc.Assert(p.UpdatedAt, TimeEqual, pc.resourcePool.UpdatedAt)\n\tc.Assert(p.Permissions, Equals, pc.resourcePool.Permissions)\n\n\tpc.resourcePool.Permissions = pool.AdminAccess & pool.DFSAccess\n\n\tft.poolStore.On(\"GetResourcePools\", ft.ctx).\n\t\tReturn([]pool.ResourcePool{pc.resourcePool}, nil).Once()\n\n\tft.poolStore.On(\"Get\", ft.ctx, pool.Key(pc.resourcePool.ID), mock.AnythingOfType(\"*pool.ResourcePool\")).\n\t\tReturn(nil).\n\t\tRun(func(args mock.Arguments) {\n\t\t\t*args.Get(2).(*pool.ResourcePool) = pc.resourcePool\n\t\t})\n\n\tft.poolStore.On(\"Put\", ft.ctx, pool.Key(pc.resourcePool.ID), mock.AnythingOfType(\"*pool.ResourcePool\")).\n\t\tReturn(nil)\n\n\tft.zzk.On(\"UpdateResourcePool\", mock.AnythingOfType(\"*pool.ResourcePool\")).\n\t\tReturn(nil)\n\n\tft.Facade.UpdateResourcePool(ft.ctx, &pc.resourcePool)\n\n\t// GetReadPools should see that the cache is dirty, and update itself\n\tpools, err = ft.Facade.GetReadPools(ft.ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(pools, Not(IsNil))\n\tc.Assert(len(pools), Equals, 1)\n\n\tp = pools[0]\n\tc.Assert(p.ID, Equals, pc.resourcePool.ID)\n\tc.Assert(p.CreatedAt, TimeEqual, pc.resourcePool.CreatedAt)\n\tc.Assert(p.UpdatedAt, Not(TimeEqual), pc.resourcePool.UpdatedAt)\n\tc.Assert(p.Permissions, Equals, (pool.AdminAccess & pool.DFSAccess))\n}", "func TestPoolRelease(t *testing.T) {\n\ttestPool(t, false)\n}", "func TestPools(t *testing.T) {\n\ttc := testutil.SystemTest(t)\n\tbuf := &bytes.Buffer{}\n\n\t// Tests\n\n\t// Get the pool.\n\ttestutil.Retry(t, 3, 2*time.Second, func(r *testutil.R) {\n\t\tpoolName := fmt.Sprintf(\"projects/%s/locations/%s/pools/%s\", tc.ProjectID, location, poolID)\n\t\tif err := getPool(buf, tc.ProjectID, location, poolID); err != nil {\n\t\t\tr.Errorf(\"getPool got err: %v\", err)\n\t\t}\n\t\tif got := buf.String(); !strings.Contains(got, poolName) {\n\t\t\tr.Errorf(\"getPool got\\n----\\n%v\\n----\\nWant to contain:\\n----\\n%v\\n----\\n\", got, poolName)\n\t\t}\n\t})\n\tbuf.Reset()\n\n\t// Update an existing pool. Set the updated peer network to \"\", which\n\t// is the same as the default otherwise the test will take a long time\n\t// to complete.\n\ttestutil.Retry(t, 3, 2*time.Second, func(r *testutil.R) {\n\t\tpoolName := fmt.Sprintf(\"projects/%s/locations/%s/pools/%s\", tc.ProjectID, location, poolID)\n\t\tif err := updatePool(buf, tc.ProjectID, location, poolID, \"\"); err != nil {\n\t\t\tr.Errorf(\"updatePool got err: %v\", err)\n\t\t}\n\t\tif got := buf.String(); !strings.Contains(got, poolName) {\n\t\t\tr.Errorf(\"updatePool got\\n----\\n%v\\n----\\nWant to contain:\\n----\\n%v\\n----\\n\", got, poolName)\n\t\t}\n\t})\n\tbuf.Reset()\n\tt.Logf(\"\\nTestPools() completed\\n\")\n}", "func (this *PoolTestSuite) TestNoInstanceOverlap() {\n\tmaxTotal := 5\n\tnumGoroutines := 100\n\tdelay := 1\n\titerations := 1000\n\tthis.pool.Config.MaxTotal = maxTotal\n\tthis.pool.Config.MaxIdle = maxTotal\n\tthis.pool.Config.TestOnBorrow = true\n\tthis.pool.Config.BlockWhenExhausted = true\n\tthis.pool.Config.MaxWaitMillis = int64(-1)\n\trunTestGoroutines(this.T(), numGoroutines, iterations, delay, this.pool)\n\tthis.Equal(0, this.pool.GetDestroyedByBorrowValidationCount())\n}", "func (this *PoolTestSuite) TestMutable() {\n\tpool := NewObjectPoolWithDefaultConfig(NewPooledObjectFactorySimple(func() (interface{}, error) {\n\t\treturn make(map[string]string), nil\n\t}))\n\tm1 := this.NoErrorWithResult(pool.BorrowObject()).(map[string]string)\n\tm2 := this.NoErrorWithResult(pool.BorrowObject()).(map[string]string)\n\tm1[\"k1\"] = \"v1\"\n\tm2[\"k2\"] = \"v2\"\n\tthis.NoError(pool.ReturnObject(m1))\n\tthis.NoError(pool.ReturnObject(m2))\n\tthis.Equal(2, pool.GetNumIdle())\n\tpool.Close()\n}", "func TestPoolDoDoesNotBlock(t *T) {\n\tsize := 10\n\trequestTimeout := 200 * time.Millisecond\n\tredialInterval := 100 * time.Millisecond\n\n\tconnFunc := PoolConnFunc(func(string, string) (Conn, error) {\n\t\treturn dial(DialTimeout(requestTimeout)), nil\n\t})\n\tpool := testPool(size,\n\t\tPoolOnEmptyCreateAfter(redialInterval),\n\t\tPoolPipelineWindow(0, 0),\n\t\tconnFunc,\n\t)\n\n\tassertPoolConns := func(exp int) {\n\t\tassert.Equal(t, exp, pool.NumAvailConns())\n\t}\n\tassertPoolConns(size)\n\n\tvar wg sync.WaitGroup\n\tvar timeExceeded uint32\n\n\t// here we try to imitate external requests which come one at a time\n\t// and exceed the number of connections in pool\n\tfor i := 0; i < 5*size; i++ {\n\t\twg.Add(1)\n\t\tgo func(i int) {\n\t\t\ttime.Sleep(time.Duration(i*10) * time.Millisecond)\n\n\t\t\ttimeStart := time.Now()\n\t\t\terr := pool.Do(WithConn(\"\", func(conn Conn) error {\n\t\t\t\ttime.Sleep(requestTimeout)\n\t\t\t\tconn.(*ioErrConn).lastIOErr = errors.New(\"i/o timeout\")\n\t\t\t\treturn nil\n\t\t\t}))\n\t\t\tassert.NoError(t, err)\n\n\t\t\tif time.Since(timeStart)-requestTimeout-redialInterval > 20*time.Millisecond {\n\t\t\t\tatomic.AddUint32(&timeExceeded, 1)\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\n\twg.Wait()\n\tassert.True(t, timeExceeded == 0)\n}", "func TestPoolInternalConflict(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\", \"10.0.10.64/28\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, false, nil)\n\n\tawait := fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\treturn !isPoolConflicting(pool)\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected pool to be marked conflicting\")\n\t}\n\n\tawait = fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\treturn !isPoolConflicting(pool)\n\t}, 2*time.Second)\n\n\tpool, err := fixture.poolClient.Get(context.Background(), \"pool-a\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpool.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: \"10.0.10.0/24\",\n\t\t},\n\t}\n\n\t_, err = fixture.poolClient.Update(context.Background(), pool, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected pool to be un-marked conflicting\")\n\t}\n}", "func (ft *FacadeUnitTest) Test_PoolCacheRemoveHost(c *C) {\n\tft.setupMockDFSLocking()\n\n\tpc := NewPoolCacheEnv()\n\n\tft.hostStore.On(\"FindHostsWithPoolID\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]host.Host{pc.firstHost, pc.secondHost}, nil).Once()\n\n\tft.poolStore.On(\"GetResourcePools\", ft.ctx).\n\t\tReturn([]pool.ResourcePool{pc.resourcePool}, nil)\n\n\tft.serviceStore.On(\"GetServicesByPool\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]service.Service{pc.firstService, pc.secondService}, nil)\n\n\tft.serviceStore.On(\"GetServiceDetails\", ft.ctx, pc.firstService.ID).\n\t\tReturn(&service.ServiceDetails{\n\t\t\tID: pc.firstService.ID,\n\t\t\tRAMCommitment: pc.firstService.RAMCommitment,\n\t\t}, nil)\n\n\tft.hostStore.On(\"Get\", ft.ctx, host.HostKey(pc.secondHost.ID), mock.AnythingOfType(\"*host.Host\")).\n\t\tReturn(nil).\n\t\tRun(func(args mock.Arguments) {\n\t\t\t*args.Get(2).(*host.Host) = pc.secondHost\n\t\t})\n\n\tft.zzk.On(\"RemoveHost\", &pc.secondHost).Return(nil)\n\tft.zzk.On(\"UnregisterDfsClients\", []host.Host{pc.secondHost}).Return(nil)\n\n\tft.hostkeyStore.On(\"Delete\", ft.ctx, pc.secondHost.ID).Return(nil)\n\tft.hostStore.On(\"Delete\", ft.ctx, host.HostKey(pc.secondHost.ID)).Return(nil)\n\n\tpools, err := ft.Facade.GetReadPools(ft.ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(pools, Not(IsNil))\n\tc.Assert(len(pools), Equals, 1)\n\n\tp := pools[0]\n\n\tc.Assert(p.ID, Equals, pc.resourcePool.ID)\n\tc.Assert(p.CoreCapacity, Equals, 14)\n\tc.Assert(p.MemoryCapacity, Equals, uint64(22000))\n\n\terr = ft.Facade.RemoveHost(ft.ctx, pc.secondHost.ID)\n\tc.Assert(err, IsNil)\n\n\tft.hostStore.On(\"FindHostsWithPoolID\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]host.Host{pc.firstHost}, nil).Once()\n\n\tpools, err = ft.Facade.GetReadPools(ft.ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(pools, Not(IsNil))\n\tc.Assert(len(pools), Equals, 1)\n\n\tp = pools[0]\n\tc.Assert(p.ID, Equals, pc.resourcePool.ID)\n\tc.Assert(p.CoreCapacity, Equals, 6)\n\tc.Assert(p.MemoryCapacity, Equals, uint64(12000))\n}", "func (this *PoolTestSuite) TestEqualsIndiscernible() {\n\tpool := NewObjectPoolWithDefaultConfig(NewPooledObjectFactorySimple(func() (interface{}, error) {\n\t\treturn make(map[string]string), nil\n\t}))\n\tm1 := this.NoErrorWithResult(pool.BorrowObject())\n\tm2 := this.NoErrorWithResult(pool.BorrowObject())\n\tthis.NoError(pool.ReturnObject(m1))\n\tthis.NoError(pool.ReturnObject(m2))\n\tpool.Close()\n}", "func (this *PoolTestSuite) TestInvalidateFreesCapacity() {\n\tthis.pool.Config.MaxTotal = 2\n\tthis.pool.Config.MaxWaitMillis = 500\n\tthis.pool.Config.BlockWhenExhausted = true\n\t// Borrow an instance and hold if for 5 seconds\n\tch1 := waitTestGoroutine(this.pool, 5000)\n\t// Borrow another instance\n\tobj := this.NoErrorWithResult(this.pool.BorrowObject())\n\t// Launch another goroutine - will block, but fail in 500 ms\n\tch2 := waitTestGoroutine(this.pool, 100)\n\t// Invalidate the object borrowed by this goroutine - should allow goroutine2 to create\n\tsleep(20)\n\tthis.NoError(this.pool.InvalidateObject(obj))\n\tsleep(600) // Wait for goroutine2 to timeout\n\tresult2 := <-ch2\n\tclose(ch2)\n\tif result2.error != nil {\n\t\tthis.Fail(result2.error.Error())\n\t}\n\t<-ch1\n\tclose(ch1)\n}", "func (ft *FacadeUnitTest) Test_PoolCacheAddHost(c *C) {\n\tft.setupMockDFSLocking()\n\n\tpc := NewPoolCacheEnv()\n\n\tft.hostStore.On(\"FindHostsWithPoolID\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]host.Host{pc.firstHost}, nil).Once()\n\n\tft.poolStore.On(\"GetResourcePools\", ft.ctx).\n\t\tReturn([]pool.ResourcePool{pc.resourcePool}, nil)\n\n\tft.serviceStore.On(\"GetServicesByPool\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]service.Service{pc.firstService, pc.secondService}, nil)\n\n\tft.serviceStore.On(\"GetServiceDetails\", ft.ctx, pc.firstService.ID).\n\t\tReturn(&service.ServiceDetails{\n\t\t\tID: pc.firstService.ID,\n\t\t\tRAMCommitment: pc.firstService.RAMCommitment,\n\t\t}, nil)\n\n\tft.hostStore.On(\"Get\", ft.ctx, host.HostKey(pc.secondHost.ID), mock.AnythingOfType(\"*host.Host\")).\n\t\tReturn(datastore.ErrNoSuchEntity{}).\n\t\tOnce()\n\n\tpools, err := ft.Facade.GetReadPools(ft.ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(pools, Not(IsNil))\n\tc.Assert(len(pools), Equals, 1)\n\n\tp := pools[0]\n\n\tc.Assert(p.ID, Equals, pc.resourcePool.ID)\n\tc.Assert(p.CoreCapacity, Equals, 6)\n\tc.Assert(p.MemoryCapacity, Equals, uint64(12000))\n\n\tft.poolStore.On(\"Get\", ft.ctx, pool.Key(pc.resourcePool.ID), mock.AnythingOfType(\"*pool.ResourcePool\")).\n\t\tReturn(nil).\n\t\tRun(func(args mock.Arguments) {\n\t\t\t*args.Get(2).(*pool.ResourcePool) = pc.resourcePool\n\t\t})\n\n\tft.hostStore.On(\"FindHostsWithPoolID\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]host.Host{pc.firstHost, pc.secondHost}, nil)\n\n\tft.hostkeyStore.On(\"Put\", ft.ctx, pc.secondHost.ID, mock.AnythingOfType(\"*hostkey.HostKey\")).\n\t\tReturn(nil)\n\n\tft.hostStore.On(\"Put\", ft.ctx, host.HostKey(pc.secondHost.ID), &pc.secondHost).\n\t\tReturn(nil)\n\n\tft.zzk.On(\"AddHost\", &pc.secondHost).Return(nil)\n\n\t_, err = ft.Facade.AddHost(ft.ctx, &pc.secondHost)\n\tc.Assert(err, IsNil)\n\n\tpools, err = ft.Facade.GetReadPools(ft.ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(pools, Not(IsNil))\n\tc.Assert(len(pools), Equals, 1)\n\n\tp = pools[0]\n\tc.Assert(p.ID, Equals, pc.resourcePool.ID)\n\tc.Assert(p.CoreCapacity, Equals, 14)\n\tc.Assert(p.MemoryCapacity, Equals, uint64(22000))\n}", "func TestPoolGet(t *T) {\n\tgetBlock := func(p *Pool) (time.Duration, error) {\n\t\tstart := time.Now()\n\t\t_, err := p.get()\n\t\treturn time.Since(start), err\n\t}\n\n\t// this one is a bit weird, cause it would block infinitely if we let it\n\tt.Run(\"onEmptyWait\", func(t *T) {\n\t\tpool := testPool(1, PoolOnEmptyWait())\n\t\tconn, err := pool.get()\n\t\tassert.NoError(t, err)\n\n\t\tgo func() {\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t\tpool.put(conn)\n\t\t}()\n\t\ttook, err := getBlock(pool)\n\t\tassert.NoError(t, err)\n\t\tassert.True(t, took-2*time.Second < 20*time.Millisecond)\n\t})\n\n\t// the rest are pretty straightforward\n\tgen := func(mkOpt func(time.Duration) PoolOpt, d time.Duration, expErr error) func(*T) {\n\t\treturn func(t *T) {\n\t\t\tpool := testPool(0, PoolOnFullClose(), mkOpt(d))\n\t\t\ttook, err := getBlock(pool)\n\t\t\tassert.Equal(t, expErr, err)\n\t\t\tassert.True(t, took-d < 20*time.Millisecond)\n\t\t}\n\t}\n\n\tt.Run(\"onEmptyCreate\", gen(PoolOnEmptyCreateAfter, 0, nil))\n\tt.Run(\"onEmptyCreateAfter\", gen(PoolOnEmptyCreateAfter, 1*time.Second, nil))\n\tt.Run(\"onEmptyErr\", gen(PoolOnEmptyErrAfter, 0, ErrPoolEmpty))\n\tt.Run(\"onEmptyErrAfter\", gen(PoolOnEmptyErrAfter, 1*time.Second, ErrPoolEmpty))\n}", "func getNewPool(cfg *config.Pool) *pool {\n\tvar p pool\n\n\tp.lockDuration = cfg.LockDuration\n\n\tp.locks = make(map[*config.Resource]*ResourceLock)\n\tfor _, resource := range cfg.Resources {\n\t\tp.locks[resource] = nil\n\t}\n\n\tkeys, _ := storage.GetKeys(storageKey)\n\tfor _, key := range keys {\n\t\tvar lock ResourceLock\n\t\tif err := storage.Read(storageKey, key, &lock); err != nil {\n\t\t\tlog.Errorf(\"[Pool] unable to restore lock for '%s': %s\", key, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor k := range p.locks {\n\t\t\tif k.Name == key {\n\t\t\t\tlock.Resource = *k\n\t\t\t\tp.locks[k] = &lock\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn &p\n}", "func PutResponseIntoPool(r *Response) { r.Reset(nil); responsePool.Put(r) }", "func TestPoolDelete(t *testing.T) {\n\tinitDone := make(chan struct{})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t\tmkPool(poolBUID, \"pool-b\", []string{\"10.0.20.0/24\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tvar allocPool string\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif strings.HasPrefix(svc.Status.LoadBalancer.Ingress[0].IP, \"10.0.10\") {\n\t\t\tallocPool = \"pool-a\"\n\t\t} else {\n\t\t\tallocPool = \"pool-b\"\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\t<-initDone\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif strings.HasPrefix(svc.Status.LoadBalancer.Ingress[0].IP, \"10.0.10\") {\n\t\t\tif allocPool == \"pool-a\" {\n\t\t\t\tt.Error(\"New IP was allocated from deleted pool\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else {\n\t\t\tif allocPool == \"pool-b\" {\n\t\t\t\tt.Error(\"New IP was allocated from deleted pool\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\terr := fixture.poolClient.Delete(context.Background(), allocPool, meta_v1.DeleteOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (s *WorkSuite) TestZeroed(c *check.C) {\n\tctx, cancel := context.WithCancel(context.TODO())\n\tdefer cancel()\n\tp := NewPool(ctx)\n\tkey := \"some-key\"\n\tp.Set(key, 1)\n\tvar l Lease\n\tselect {\n\tcase l = <-p.Acquire():\n\t\tc.Assert(l.Key().(string), check.Equals, key)\n\t\tl.Release()\n\tcase <-time.After(time.Millisecond * 128):\n\t\tc.Errorf(\"timeout waiting for lease grant\")\n\t}\n\tp.Set(key, 0)\n\t// modifications to counts are *ordered*, but asynchronous,\n\t// so we could actually receieve a lease here if we don't sleep\n\t// briefly. if we opted for condvars instead of channels, this\n\t// issue could be avoided at the cost of more cumbersome\n\t// composition/cancellation.\n\ttime.Sleep(time.Millisecond * 10)\n\tselect {\n\tcase l := <-p.Acquire():\n\t\tc.Errorf(\"unexpected lease grant: %+v\", l)\n\tcase <-time.After(time.Millisecond * 128):\n\t}\n}", "func TestSourcePool_connsReturning(t *testing.T) {\n\tsrc := &testSource{}\n\texpires := time.Millisecond\n\tpool := store.Pool(src, 1, expires)\n\tpool.Open()\n\n\tgetConn := func() <-chan store.Conn {\n\t\tout := make(chan store.Conn)\n\t\tgo func() {\n\t\t\tconn, _ := pool.Open()\n\t\t\tout <- conn\n\t\t\tclose(out)\n\t\t}()\n\t\treturn out\n\t}\n\n\tselect {\n\tcase <-getConn():\n\t\t// skip\n\tcase <-time.After((expires * 15) / 10):\n\t\t// wait some more to see if get new connection\n\t\tt.Errorf(\"failed to get connection after conn expires\")\n\t}\n}", "func TestZeroed(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tt.Cleanup(cancel)\n\tp := NewPool(ctx)\n\tp.Set(1)\n\tvar l Lease\n\tselect {\n\tcase l = <-p.Acquire():\n\t\tl.Release()\n\tcase <-time.After(time.Millisecond * 128):\n\t\tt.Errorf(\"timeout waiting for lease grant\")\n\t}\n\tp.Set(0)\n\t// modifications to counts are *ordered*, but asynchronous,\n\t// so we could actually receive a lease here if we don't sleep\n\t// briefly. if we opted for condvars instead of channels, this\n\t// issue could be avoided at the cost of more cumbersome\n\t// composition/cancellation.\n\ttime.Sleep(time.Millisecond * 10)\n\tselect {\n\tcase l := <-p.Acquire():\n\t\tt.Errorf(\"unexpected lease grant: %+v\", l)\n\tcase <-time.After(time.Millisecond * 128):\n\t}\n}", "func Test_Static_Pool_Handle_Dead(t *testing.T) {\n\tctx := context.Background()\n\tp, err := Initialize(\n\t\tcontext.Background(),\n\t\tfunc() *exec.Cmd { return exec.Command(\"php\", \"../tests/slow-destroy.php\", \"echo\", \"pipes\") },\n\t\tpipe.NewPipeFactory(),\n\t\t&Config{\n\t\t\tNumWorkers: 5,\n\t\t\tAllocateTimeout: time.Second * 100,\n\t\t\tDestroyTimeout: time.Second,\n\t\t},\n\t)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, p)\n\n\ttime.Sleep(time.Second)\n\tfor i := range p.Workers() {\n\t\tp.Workers()[i].State().Set(worker.StateErrored)\n\t}\n\n\t_, err = p.Exec(&payload.Payload{Body: []byte(\"hello\")})\n\tassert.NoError(t, err)\n\tp.Destroy(ctx)\n}", "func NewPool(t mockConstructorTestingTNewPool) *Pool {\n\tmock := &Pool{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (cacheableStoreMock) Discard() {}", "func (p *ResourcePool) getAvailable(timeout <-chan time.Time) (ResourceWrapper, error) {\n\n\t//Wait for an object, or a timeout\n\tselect {\n\tcase <-timeout:\n\t\treturn ResourceWrapper{p: p, e: ResourceTimeoutError}, ResourceTimeoutError\n\n\tcase wrapper, ok := <-p.resources:\n\n\t\t//pool is closed\n\t\tif !ok {\n\t\t\treturn ResourceWrapper{p: p, e: PoolClosedError}, PoolClosedError\n\t\t}\n\n\t\t//decriment the number of available resources\n\t\tatomic.AddUint32(&p.nAvailable, ^uint32(0))\n\n\t\t//if the resource fails the test, close it and wait to get another resource\n\t\tif p.resTest(wrapper.Resource) != nil {\n\t\t\tp.resClose(wrapper.Resource)\n\t\t\twrapper.Close()\n\t\t\treturn ResourceWrapper{p: p, e: ResourceTestError}, ResourceTestError\n\t\t}\n\n\t\t//we got a valid resource to return\n\t\t//signal the filler that we need to fill\n\t\treturn wrapper, wrapper.e\n\n\t//we don't have a resource available\n\t//lets create one if we can\n\tdefault:\n\n\t\t//try to obtain a lock for a new resource\n\t\tif n_open := atomic.AddUint32(&p.open, 1); n_open > p.Cap() {\n\t\t\t//decriment\n\t\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\t\treturn ResourceWrapper{p: p, e: ResourceExhaustedError}, ResourceExhaustedError\n\t\t}\n\n\t\tresource, err := p.resOpen()\n\t\tif err != nil {\n\t\t\t//decriment\n\t\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\t\treturn ResourceWrapper{p: p, e: ResourceCreationError}, ResourceCreationError\n\t\t}\n\n\t\treturn ResourceWrapper{p: p, Resource: resource}, nil\n\t}\n}", "func (mio *Mio) InPool(pool string) bool {\n if mio.obj == nil {\n return false\n }\n id1, err := ScanID(pool)\n if err != nil {\n return false\n }\n p := mio.objPool\n id2 := C.struct_m0_uint128{p.f_container, p.f_key}\n\n return C.m0_uint128_cmp(&id1, &id2) == 0\n}", "func MustUnmarshalPool(cdc *wire.Codec, value []byte) Pool {\n\tpool, err := UnmarshalPool(cdc, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn pool\n}", "func (rp *resourcePool) Maintain() {\n\trp.Lock()\n\tdefer rp.Unlock()\n\n\tif rp.closed {\n\t\treturn\n\t}\n\n\tfor curr := rp.end; curr != nil; curr = curr.prev {\n\t\tif rp.expiredFn(curr.value) {\n\t\t\trp.remove(curr)\n\t\t\trp.closeFn(curr.value)\n\t\t\trp.totalSize--\n\t\t}\n\t}\n\n\tfor rp.totalSize < rp.minSize {\n\t\trp.add(nil)\n\t\trp.totalSize++\n\t}\n\n\t// reset the timer for the background cleanup routine\n\tif rp.maintainTimer == nil {\n\t\trp.maintainTimer = time.AfterFunc(rp.maintainInterval, rp.Maintain)\n\t}\n\tif !rp.maintainTimer.Stop() {\n\t\trp.maintainTimer = time.AfterFunc(rp.maintainInterval, rp.Maintain)\n\t\treturn\n\t}\n\trp.maintainTimer.Reset(rp.maintainInterval)\n}", "func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) {\n\ttime.Sleep(timeout) // wait for the txs in all mempools\n\tassert.Zero(t, reactor.mempool.Size())\n}", "func UsePool() {\n\thabbo.Lock()\n\tusePool = true\n\thabbo.Unlock()\n}", "func TestFull(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tt.Cleanup(cancel)\n\tp := NewPool(ctx)\n\tvar wg sync.WaitGroup\n\t// signal channel to cause the first group of workers to\n\t// release their leases.\n\tg1done := make(chan struct{})\n\t// timeout channel indicating all of group one should\n\t// have acquired their leases.\n\tg1timeout := make(chan struct{})\n\tgo func() {\n\t\ttime.Sleep(time.Millisecond * 500)\n\t\tclose(g1timeout)\n\t}()\n\tp.Set(200)\n\t// spawn first group of workers.\n\tfor i := 0; i < 200; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase l := <-p.Acquire():\n\t\t\t\t<-g1done\n\t\t\t\tl.Release()\n\t\t\tcase <-g1timeout:\n\t\t\t\tt.Errorf(\"Timeout waiting for lease\")\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\t<-g1timeout\n\t// no additional leases should exist\n\tselect {\n\tcase l := <-p.Acquire():\n\t\tt.Errorf(\"unexpected lease: %+v\", l)\n\tdefault:\n\t}\n\t// spawn a second group of workers that won't be able to\n\t// acquire their leases until the first group is done.\n\tfor i := 0; i < 200; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-p.Acquire():\n\t\t\t\t// leak deliberately\n\t\t\tcase <-time.After(time.Millisecond * 512):\n\t\t\t\tt.Errorf(\"Timeout waiting for lease\")\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\t// signal first group is done\n\tclose(g1done)\n\t// wait for second group to acquire leases.\n\twg.Wait()\n\t// no additional leases should exist\n\tselect {\n\tcase l := <-p.Acquire():\n\t\tcounts := l.loadCounts()\n\t\tt.Errorf(\"unexpected lease grant: %+v, counts=%+v\", l, counts)\n\tcase <-time.After(time.Millisecond * 128):\n\t}\n\t// make one additional lease available\n\tp.Set(201)\n\tselect {\n\tcase l := <-p.Acquire():\n\t\tl.Release()\n\tcase <-time.After(time.Millisecond * 128):\n\t\tt.Errorf(\"timeout waiting for lease grant\")\n\t}\n}", "func TestPool(t *testing.T) {\n\n\tvar res []WaitFunc\n\n\tpool := NewLimited(4)\n\tdefer pool.Close()\n\n\tnewFunc := func(d time.Duration) WorkFunc {\n\t\treturn func(context.Context) (interface{}, error) {\n\t\t\ttime.Sleep(d)\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\n\treportCount := int64(0)\n\treport := func(v interface{}, err error) {\n\t\tatomic.AddInt64(&reportCount, 1)\n\t}\n\n\tfor i := 0; i < 4; i++ {\n\t\twu := pool.Queue(context.Background(), newFunc(time.Second*1), report)\n\t\tres = append(res, wu)\n\t}\n\n\tvar count int\n\n\tfor i, wu := range res {\n\t\tfmt.Println(i)\n\t\tv, e := wu()\n\t\trequire.Equal(t, e, nil)\n\t\trequire.Equal(t, v, nil)\n\t\tcount++\n\t}\n\n\trequire.Equal(t, count, 4)\n\trequire.Equal(t, reportCount, int64(4))\n\n\tpool.Close() // testing no error occurs as Close will be called twice once defer pool.Close() fires\n}", "func TestPoolTimeout(t *testing.T) {\n\tdefer leaktest.CheckTimeout(t, time.Second)()\n\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n}", "func (ft *FacadeUnitTest) Test_PoolCacheEditService(c *C) {\n\tft.setupMockDFSLocking()\n\n\tpc := NewPoolCacheEnv()\n\n\tft.hostStore.On(\"FindHostsWithPoolID\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]host.Host{pc.firstHost, pc.secondHost}, nil)\n\n\tft.poolStore.On(\"GetResourcePools\", ft.ctx).\n\t\tReturn([]pool.ResourcePool{pc.resourcePool}, nil)\n\n\tft.serviceStore.On(\"GetServicesByPool\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]service.Service{pc.firstService, pc.secondService}, nil).Once()\n\n\tft.serviceStore.On(\"GetServiceDetails\", ft.ctx, pc.firstService.ID).\n\t\tReturn(&service.ServiceDetails{\n\t\t\tID: pc.firstService.ID,\n\t\t\tRAMCommitment: pc.firstService.RAMCommitment,\n\t\t}, nil)\n\n\tft.serviceStore.On(\"Get\", ft.ctx, pc.firstService.ID).\n\t\tReturn(&pc.firstService, nil)\n\n\tft.serviceStore.On(\"Put\", ft.ctx, mock.AnythingOfType(\"*service.Service\")).\n\t\tReturn(nil)\n\n\tft.serviceStore.On(\"GetServiceDetailsByParentID\", ft.ctx, mock.AnythingOfType(\"string\"), mock.AnythingOfType(\"time.Duration\")).\n\t\tReturn([]service.ServiceDetails{}, nil)\n\n\tft.configStore.On(\"GetConfigFiles\", ft.ctx, mock.AnythingOfType(\"string\"), mock.AnythingOfType(\"string\")).\n\t\tReturn([]*serviceconfigfile.SvcConfigFile{}, nil)\n\n\temptyMap := []*servicetemplate.ServiceTemplate{}\n\tft.templateStore.On(\"GetServiceTemplates\", ft.ctx).Return(emptyMap, nil)\n\n\tft.zzk.On(\"UpdateService\", ft.ctx, mock.AnythingOfType(\"string\"), mock.AnythingOfType(\"*service.Service\"), false, false).\n\t\tReturn(nil)\n\n\n\tpools, err := ft.Facade.GetReadPools(ft.ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(pools, Not(IsNil))\n\tc.Assert(len(pools), Equals, 1)\n\n\tp := pools[0]\n\n\tc.Assert(p.ID, Equals, pc.resourcePool.ID)\n\tc.Assert(p.MemoryCommitment, Equals, uint64(3000))\n\n\tpc.firstService.RAMCommitment = utils.EngNotation{\n\t\tValue: uint64(2000),\n\t}\n\n\terr = ft.Facade.UpdateService(ft.ctx, pc.firstService)\n\tc.Assert(err, IsNil)\n\n\t// Make sure that we return the new secondService with the updated RAMCommitment\n\tft.serviceStore.On(\"GetServicesByPool\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]service.Service{pc.firstService, pc.secondService}, nil).Once()\n\n\t// GetReadPools should see that the cache is dirty, and update itself\n\tpools, err = ft.Facade.GetReadPools(ft.ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(pools, Not(IsNil))\n\tc.Assert(len(pools), Equals, 1)\n\n\tp = pools[0]\n\tc.Assert(p.ID, Equals, pc.resourcePool.ID)\n\tc.Assert(p.MemoryCommitment, Equals, uint64(4000))\n}", "func TestDisablePool(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, 500*time.Millisecond)\n\n\tpoolA.Spec.Disabled = true\n\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !await.Block() {\n\t\tt.Fatal(\"Unexpected service status update\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].externallyDisabled {\n\t\tt.Fatal(\"The range has not been externally disabled\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\tt.Error(\"Expected service status update to occur on service-b\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tserviceB := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-b\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceBUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Create(context.Background(), serviceB, meta_v1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif svc.Name != \"service-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpoolA.Spec.Disabled = false\n\n\t_, err = fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func Test_NewTxPool(t *testing.T) {\n\tvar slot uint64 = 64\n\tassert := assert.New(t)\n\n\tmock_config := mock_txpool_config(slot)\n\ttxpool := NewTxPool(mock_config)\n\tassert.NotNil(txpool)\n\tinstance := txpool.(*TxPool)\n\tassert.Equal(slot, instance.config.GlobalSlots, \"they should be equal\")\n\tassert.NotNil(instance.all)\n\tassert.NotNil(instance.process)\n\tassert.NotNil(instance.txsQueue)\n\n\tmock_config = mock_txpool_config(uint64(0))\n\ttxpool = NewTxPool(mock_config)\n\tinstance = txpool.(*TxPool)\n\tassert.Equal(uint64(4096), instance.config.GlobalSlots, \"they should be equal\")\n\n}", "func TestPool(ctx context.Context, pool *redis.Pool) error {\n\tctx, cancel := context.WithCancel(ctx)\n\tconn, err := pool.GetContext(ctx)\n\tcancel()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"redis.TestPool: getting connection from pool failed\")\n\t}\n\n\t_, err = conn.Do(\"PING\")\n\t_ = conn.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"redis.TestPool: performing PING failed\")\n\t}\n\n\treturn nil\n}", "func (impl *IPv4Pool) NeedCheckDuplication() bool {\n\treturn true\n}", "func BenchmarkPoolStarvation(b *testing.B) {\n\tvar p Pool\n\tcount := 100\n\t// Reduce number of putted objects by 33 %. It creates objects starvation\n\t// that force P-local storage to steal objects from other Ps.\n\tcountStarved := count - int(float32(count)*0.33)\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tfor b := 0; b < countStarved; b++ {\n\t\t\t\tp.Put(1)\n\t\t\t}\n\t\t\tfor b := 0; b < count; b++ {\n\t\t\t\tp.Get()\n\t\t\t}\n\t\t}\n\t})\n}", "func PoolVerification(t *testing.T) bool {\n\tt.Logf(\"Verifying pools...\")\n\tpools := lib.FetchPools(t, AviClients[0])\n\tif ingressType == MULTIHOST && (len(pools) < ((len(ingressesCreated) * 2) + initialNumOfPools)) {\n\t\treturn false\n\t} else if len(pools) < len(ingressesCreated)+initialNumOfPools {\n\t\treturn false\n\t}\n\tvar ingressPoolList []string\n\tvar poolList []string\n\tif ingressType == INSECURE {\n\t\tfor i := 0; i < len(ingressHostNames); i++ {\n\t\t\tingressPoolName := clusterName + \"--\" + ingressHostNames[i] + \"_-\" + namespace + \"-\" + ingressesCreated[i]\n\t\t\tingressPoolList = append(ingressPoolList, ingressPoolName)\n\t\t}\n\t} else if ingressType == SECURE {\n\t\tfor i := 0; i < len(ingressHostNames); i++ {\n\t\t\tingressPoolName := clusterName + \"--\" + namespace + \"-\" + ingressHostNames[i] + \"_-\" + ingressesCreated[i]\n\t\t\tingressPoolList = append(ingressPoolList, ingressPoolName)\n\t\t}\n\t} else if ingressType == MULTIHOST {\n\t\tfor i := 0; i < len(ingressSecureHostNames); i++ {\n\t\t\tingressPoolName := clusterName + \"--\" + namespace + \"-\" + ingressSecureHostNames[i] + \"_-\" + ingressesCreated[i]\n\t\t\tingressPoolList = append(ingressPoolList, ingressPoolName)\n\t\t\tingressPoolName = clusterName + \"--\" + ingressInsecureHostNames[i] + \"_-\" + namespace + \"-\" + ingressesCreated[i]\n\t\t\tingressPoolList = append(ingressPoolList, ingressPoolName)\n\t\t}\n\t}\n\tfor _, pool := range pools {\n\t\tpoolList = append(poolList, *pool.Name)\n\t}\n\tdiffNum := len(DiffOfLists(ingressPoolList, poolList))\n\tif diffNum == initialNumOfPools {\n\t\treturn true\n\t}\n\treturn false\n}", "func (cp *Pool) InUse() int64 {\n\tp := cp.pool()\n\tif p == nil {\n\t\treturn 0\n\t}\n\treturn p.InUse()\n}", "func (m *TrxMgr) shrinkPoolMemories() {\n\tif atomic.LoadUint64(&m.shrinkCounter) > sShrinkCountWaterMark {\n\t\tatomic.StoreUint64(&m.shrinkCounter, 0)\n\n\t\twaiting, fetched := make(map[string]*TrxEntry), make(map[string]*TrxEntry)\n\t\tfor k, e := range m.waiting {\n\t\t\twaiting[k] = e\n\t\t}\n\t\tfor k, e := range m.fetched {\n\t\t\tfetched[k] = e\n\t\t}\n\t\tm.waiting, m.fetched = waiting, fetched\n\t}\n}", "func initPool() {\n\tpool = &sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\tfmt.Println(\"Returning new A\")\n\t\t\treturn new(A)\n\t\t},\n\t}\n}", "func (c Cache) gc(shutdown <-chan struct{}, tickerCh <-chan time.Time) bool {\n\tselect {\n\tcase <-shutdown:\n\t\treturn false\n\tcase <-tickerCh:\n\t\t// garbage collect the numberCache\n\t\tfor id, point := range c.numberCache {\n\t\t\tif point.used {\n\t\t\t\t// for points that have been used, mark them as unused\n\t\t\t\tpoint.used = false\n\t\t\t\tc.numberCache[id] = point\n\t\t\t} else {\n\t\t\t\t// for points that have not been used, delete points\n\t\t\t\tdelete(c.numberCache, id)\n\t\t\t}\n\t\t}\n\t\t// garbage collect the summaryCache\n\t\tfor id, point := range c.summaryCache {\n\t\t\tif point.used {\n\t\t\t\t// for points that have been used, mark them as unused\n\t\t\t\tpoint.used = false\n\t\t\t\tc.summaryCache[id] = point\n\t\t\t} else {\n\t\t\t\t// for points that have not been used, delete points\n\t\t\t\tdelete(c.summaryCache, id)\n\t\t\t}\n\t\t}\n\t\t// garbage collect the histogramCache\n\t\tfor id, point := range c.histogramCache {\n\t\t\tif point.used {\n\t\t\t\t// for points that have been used, mark them as unused\n\t\t\t\tpoint.used = false\n\t\t\t\tc.histogramCache[id] = point\n\t\t\t} else {\n\t\t\t\t// for points that have not been used, delete points\n\t\t\t\tdelete(c.histogramCache, id)\n\t\t\t}\n\t\t}\n\t\t// garbage collect the exponentialHistogramCache\n\t\tfor id, point := range c.exponentialHistogramCache {\n\t\t\tif point.used {\n\t\t\t\t// for points that have been used, mark them as unused\n\t\t\t\tpoint.used = false\n\t\t\t\tc.exponentialHistogramCache[id] = point\n\t\t\t} else {\n\t\t\t\t// for points that have not been used, delete points\n\t\t\t\tdelete(c.exponentialHistogramCache, id)\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func (p *ResourcePool) destroy(wrapper *ResourceWrapper) {\n\n\t//you can destroy a resource if the pool is closed, no harm no foul\n\tp.resClose(wrapper.Resource)\n\tatomic.AddUint32(&p.open, ^uint32(0))\n\twrapper.p = nil\n}", "func registerPoolCleanup(cleanup func()) {\n\t// Ignore.\n}", "func TestFastAccess(t *testing.T) {\n\tflushDb()\n\ts := getLocalStorage()\n\tbucket, err := s.Create(\"testbucket\", 10, time.Millisecond)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\thold := make(chan struct{})\n\twg := sync.WaitGroup{}\n\tfor i := 0; i < 50; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t<-hold\n\t\t\tif _, err := bucket.Add(1); err != nil && err != leakybucket.ErrorFull {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}()\n\t}\n\tclose(hold) // Let all concurrent requests start\n\twg.Wait() // Wait for all concurrent requests to finish\n\n\tconn := s.pool.Get()\n\tdefer conn.Close()\n\n\tif exists, err := conn.Do(\"GET\", \"testbucket\"); err != nil {\n\t\tt.Fatal(err)\n\t} else if exists == nil {\n\t\treturn\n\t}\n\tttl, err := conn.Do(\"PTTL\", \"testbucket\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif ttl.(int64) == -1 {\n\t\tt.Fatal(\"no ttl set on bucket\")\n\t}\n}", "func TestTreeWalkPoolBasic(t *testing.T) {\n\t// Create a treeWalkPool\n\ttw := NewTreeWalkPool(1 * time.Second)\n\n\t// Create sample params\n\tparams := listParams{\n\t\tbucket: \"test-bucket\",\n\t}\n\n\t// Add a treeWalk to the pool\n\tresultCh := make(chan TreeWalkResult)\n\tendWalkCh := make(chan struct{})\n\ttw.Set(params, resultCh, endWalkCh)\n\n\t// Wait for treeWalkPool timeout to happen\n\t<-time.After(2 * time.Second)\n\tif c1, _ := tw.Release(params); c1 != nil {\n\t\tt.Error(\"treeWalk go-routine must have been freed\")\n\t}\n\n\t// Add the treeWalk back to the pool\n\ttw.Set(params, resultCh, endWalkCh)\n\n\t// Release the treeWalk before timeout\n\tselect {\n\tcase <-time.After(1 * time.Second):\n\t\tbreak\n\tdefault:\n\t\tif c1, _ := tw.Release(params); c1 == nil {\n\t\t\tt.Error(\"treeWalk go-routine got freed before timeout\")\n\t\t}\n\t}\n}", "func initPool() {\n\tChopsticks = sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\treturn new(Chopstick)\n\t\t},\n\t}\n}", "func initCachePool(addr string) *redis.Pool {\n\treturn &redis.Pool{\n\t\tMaxIdle: 3,\n\t\tIdleTimeout: 300 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tc, err := redis.Dial(\"tcp\", addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, nil\n\t\t},\n\t}\n}", "func (p *connPool) gc() {\n\tp.openMu.Lock()\n\tdefer p.openMu.Unlock()\n\n\tp.mapMu.Lock()\n\tdefer p.mapMu.Unlock()\n\n\tvar activeRefs int64\n\tfor params, conn := range p.conns {\n\t\t// We hold the openMu write lock, so no one is trying to open a connection.\n\t\t// The only thing we might race with is callers decrementing the refCount,\n\t\t// which is fine. What matters is that no one will race to increment it,\n\t\t// which could reverse a decision we had already made to close the connection.\n\t\tconn.mu.Lock()\n\t\tactiveRefs += conn.refCount\n\t\tif conn.failed() {\n\t\t\t// The connection attempt failed, so remove it without trying to close it.\n\t\t\tdelete(p.conns, params)\n\t\t} else if conn.refCount <= 0 && time.Since(conn.lastOpened) > idleTTL {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"implementation\": params.Implementation,\n\t\t\t\t\"address\": params.Address,\n\t\t\t\t\"rootPath\": params.RootPath,\n\t\t\t}).Info(\"closing connection to Vitess topology server due to idle TTL\")\n\t\t\tdisconnects.WithLabelValues(reasonIdle).Inc()\n\n\t\t\tconn.Server.Close()\n\t\t\tdelete(p.conns, params)\n\t\t}\n\t\tconn.mu.Unlock()\n\t}\n\tconnCount.WithLabelValues(connStateActive).Set(float64(len(p.conns)))\n\tconnRefCount.WithLabelValues(connStateActive).Set(float64(activeRefs))\n\n\t// Clean up bad conns once they're no longer being used.\n\t// Make a list of bad conns that still have refs (we need to keep waiting).\n\tvar deadRefs int64\n\tstillUsed := make([]*Conn, 0, len(p.deadConns))\n\tfor _, conn := range p.deadConns {\n\t\tconn.mu.Lock()\n\t\tdeadRefs += conn.refCount\n\t\tif conn.refCount <= 0 {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"implementation\": conn.params.Implementation,\n\t\t\t\t\"address\": conn.params.Address,\n\t\t\t\t\"rootPath\": conn.params.RootPath,\n\t\t\t}).Info(\"closing connection to Vitess topology server due to liveness check failure\")\n\t\t\tdisconnects.WithLabelValues(reasonDead).Inc()\n\n\t\t\tconn.Server.Close()\n\t\t} else {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"implementation\": conn.params.Implementation,\n\t\t\t\t\"address\": conn.params.Address,\n\t\t\t\t\"rootPath\": conn.params.RootPath,\n\t\t\t}).Warning(\"cached connection to Vitess topology server failed liveness check but is still in use\")\n\n\t\t\tstillUsed = append(stillUsed, conn)\n\t\t}\n\t\tconn.mu.Unlock()\n\t}\n\tp.deadConns = stillUsed\n\tconnCount.WithLabelValues(connStateDead).Set(float64(len(p.deadConns)))\n\tconnRefCount.WithLabelValues(connStateDead).Set(float64(deadRefs))\n}", "func (cp *Pool) Exhausted() int64 {\n\tp := cp.pool()\n\tif p == nil {\n\t\treturn 0\n\t}\n\treturn p.Exhausted()\n}", "func Test_Static_Pool_Slow_Destroy(t *testing.T) {\n\tp, err := Initialize(\n\t\tcontext.Background(),\n\t\tfunc() *exec.Cmd { return exec.Command(\"php\", \"../tests/slow-destroy.php\", \"echo\", \"pipes\") },\n\t\tpipe.NewPipeFactory(),\n\t\t&Config{\n\t\t\tNumWorkers: 5,\n\t\t\tAllocateTimeout: time.Second,\n\t\t\tDestroyTimeout: time.Second,\n\t\t},\n\t)\n\n\tassert.NoError(t, err)\n\tassert.NotNil(t, p)\n\n\tp.Destroy(context.Background())\n}", "func newPool(addr string) (*pool, error) {\n\tp := pool{redis.Pool{\n\t\tMaxActive: 100,\n\t\tWait: true,\n\t\tMaxIdle: 10,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) { return redis.Dial(\"tcp\", addr) },\n\t}}\n\n\t// Test connection\n\tconn := p.Get()\n\tdefer conn.Close()\n\n\t_, err := conn.Do(\"PING\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &p, nil\n}", "func (cr *ChunkIterator) Retain() {\n\tatomic.AddInt64(&cr.refCount, 1)\n}", "func (n *NoOpAllocator) PoolExists(poolID types.PoolID) bool {\n\treturn false\n}", "func TestPoolConn_storeConn(t *testing.T) {\n\tvar conn store.Conn = &store.PoolConn{}\n\t_ = conn\n}", "func (this *PoolTestSuite) TestValidationFailureOnReturnFreesCapacity() {\n\tthis.factory.setValid(false) // Validate will always fail\n\tthis.factory.enableValidation = true\n\tthis.pool.Config.MaxTotal = 2\n\tthis.pool.Config.MaxWaitMillis = int64(1500)\n\tthis.pool.Config.TestOnReturn = true\n\tthis.pool.Config.TestOnBorrow = false\n\t// Borrow an instance and hold if for 5 seconds\n\tch1 := waitTestGoroutine(this.pool, 5000)\n\t// Borrow another instance and return it after 500 ms (validation will fail)\n\tch2 := waitTestGoroutine(this.pool, 500)\n\tsleep(50)\n\t// Try to borrow an object\n\tobj := this.NoErrorWithResult(this.pool.BorrowObject())\n\tthis.NoError(this.pool.ReturnObject(obj))\n\t<-ch1\n\tclose(ch1)\n\t<-ch2\n\tclose(ch2)\n}", "func (s *WorkSuite) TestFull(c *check.C) {\n\tctx, cancel := context.WithCancel(context.TODO())\n\tdefer cancel()\n\tp := NewPool(ctx)\n\tkey := \"some-key\"\n\tvar wg sync.WaitGroup\n\t// signal channel to cause the first group of workers to\n\t// release their leases.\n\tg1done := make(chan struct{})\n\t// timeout channel indicating all of group one should\n\t// have acquired their leases.\n\tg1timeout := make(chan struct{})\n\tgo func() {\n\t\ttime.Sleep(time.Millisecond * 500)\n\t\tclose(g1timeout)\n\t}()\n\tp.Set(key, 200)\n\t// spawn first group of workers.\n\tfor i := 0; i < 200; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase l := <-p.Acquire():\n\t\t\t\t<-g1done\n\t\t\t\tl.Release()\n\t\t\tcase <-g1timeout:\n\t\t\t\tc.Errorf(\"Timeout waiting for lease\")\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\t<-g1timeout\n\t// no additional leases should exist\n\tselect {\n\tcase l := <-p.Acquire():\n\t\tc.Errorf(\"unexpected lease: %+v\", l)\n\tdefault:\n\t}\n\t// spawn a second group of workers that won't be able to\n\t// acquire their leases until the first group is done.\n\tfor i := 0; i < 200; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-p.Acquire():\n\t\t\t\t// leak deliberately\n\t\t\tcase <-time.After(time.Millisecond * 512):\n\t\t\t\tc.Errorf(\"Timeout waiting for lease\")\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\t// signal first group is done\n\tclose(g1done)\n\t// wait for second group to acquire leases.\n\twg.Wait()\n\t// no additional leases should exist\n\tselect {\n\tcase l := <-p.Acquire():\n\t\tcounts := l.loadCounts()\n\t\tc.Errorf(\"unexpected lease grant: %+v, counts=%+v\", l, counts)\n\tcase <-time.After(time.Millisecond * 128):\n\t}\n\t// make one additional lease available\n\tp.Set(key, 201)\n\tselect {\n\tcase l := <-p.Acquire():\n\t\tc.Assert(l.Key().(string), check.Equals, key)\n\t\tl.Release()\n\tcase <-time.After(time.Millisecond * 128):\n\t\tc.Errorf(\"timeout waiting for lease grant\")\n\t}\n}", "func (p *unlimitedPool) Reset() {\n\n\tp.m.Lock()\n\n\tif !p.closed {\n\t\tp.m.Unlock()\n\t\treturn\n\t}\n\n\t// cancelled the pool, not closed it, pool will be usable after calling initialize().\n\tp.initialize()\n\tp.m.Unlock()\n}", "func TestConflictResolution(t *testing.T) {\n\tpoolB := mkPool(poolBUID, \"pool-b\", []string{\"10.0.10.0/24\", \"FF::0/48\"})\n\tpoolB.CreationTimestamp = meta_v1.Date(2022, 10, 16, 13, 30, 00, 0, time.UTC)\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t\tpoolB,\n\t}, true, false, nil)\n\n\tawait := fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\tif pool.Name != \"pool-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif !isPoolConflicting(pool) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Pool B has not been marked conflicting\")\n\t}\n\n\t// All ranges of a conflicting pool must be disabled\n\tpoolBRanges, _ := fixture.lbIPAM.rangesStore.GetRangesForPool(\"pool-b\")\n\tfor _, r := range poolBRanges {\n\t\tif !r.internallyDisabled {\n\t\t\tt.Fatalf(\"Range '%s' from pool B hasn't been disabled\", ipNetStr(r.allocRange.CIDR()))\n\t\t}\n\t}\n\n\t// Phase 2, resolving the conflict\n\n\tawait = fixture.AwaitPool(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != poolResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tpool := fixture.PatchedPool(action)\n\n\t\tif pool.Name != \"pool-b\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif isPoolConflicting(pool) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tpoolB, err := fixture.poolClient.Get(context.Background(), \"pool-b\", meta_v1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatal(poolB)\n\t}\n\n\t// Remove the conflicting range\n\tpoolB.Spec.Cidrs = []cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\t{\n\t\t\tCidr: cilium_api_v2alpha1.IPv4orIPv6CIDR(\"FF::0/48\"),\n\t\t},\n\t}\n\n\t_, err = fixture.poolClient.Update(context.Background(), poolB, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Pool b has not de-conflicted\")\n\t}\n}", "func (pph *PortPoolHandler) ensurePortPool(pool *networkextensionv1.PortPool) (bool, error) {\n\tdefItemMap := make(map[string]*networkextensionv1.PortPoolItem)\n\tfor _, poolItem := range pool.Spec.PoolItems {\n\t\tdefItemMap[poolItem.GetKey()] = poolItem\n\t}\n\tactiveItemMap := make(map[string]*networkextensionv1.PortPoolItemStatus)\n\tfor _, poolItemStatus := range pool.Status.PoolItemStatuses {\n\t\tactiveItemMap[poolItemStatus.GetKey()] = poolItemStatus\n\t}\n\n\t// item to delete\n\tvar delItemsStatus []*networkextensionv1.PortPoolItemStatus\n\tfor k := range activeItemMap {\n\t\tif _, ok := defItemMap[k]; !ok {\n\t\t\tdelItemsStatus = append(delItemsStatus, activeItemMap[k])\n\t\t}\n\t}\n\n\tpoolItemHandler := &PortPoolItemHandler{\n\t\tPortPoolName: pool.Name,\n\t\tNamespace: pph.namespace,\n\t\tDefaultRegion: pph.region,\n\t\tLbClient: pph.lbClient,\n\t\tK8sClient: pph.k8sClient,\n\t\tListenerAttr: pool.Spec.ListenerAttribute}\n\n\tpph.poolCache.Lock()\n\tdefer pph.poolCache.Unlock()\n\n\t// try to delete\n\tsuccessDeletedKeyMap := make(map[string]struct{})\n\tfailedDeletedKeyMap := make(map[string]struct{})\n\tfor _, delItemStatus := range delItemsStatus {\n\t\terr := poolItemHandler.checkPortPoolItemDeletion(delItemStatus)\n\t\tif err != nil {\n\t\t\tblog.Warnf(\"cannot delete active item %s, err %s\", delItemStatus.ItemName, err.Error())\n\t\t\tfailedDeletedKeyMap[delItemStatus.GetKey()] = struct{}{}\n\t\t} else {\n\t\t\tsuccessDeletedKeyMap[delItemStatus.GetKey()] = struct{}{}\n\t\t}\n\t}\n\t// delete from port pool status\n\ttmpItemsStatus := pool.Status.PoolItemStatuses\n\tpool.Status.PoolItemStatuses = make([]*networkextensionv1.PortPoolItemStatus, 0)\n\tfor _, itemStatus := range tmpItemsStatus {\n\t\tif _, ok := successDeletedKeyMap[itemStatus.GetKey()]; !ok {\n\t\t\tif _, inOk := failedDeletedKeyMap[itemStatus.GetKey()]; inOk {\n\t\t\t\titemStatus.Status = constant.PortPoolItemStatusDeleting\n\t\t\t}\n\t\t\tpool.Status.PoolItemStatuses = append(pool.Status.PoolItemStatuses, itemStatus)\n\t\t}\n\t}\n\n\tshouldRetry := false\n\t// try to add or update port pool item\n\tnewItemStatusList := make([]*networkextensionv1.PortPoolItemStatus, 0)\n\tupdateItemStatusMap := make(map[string]*networkextensionv1.PortPoolItemStatus)\n\tfor _, tmpItem := range pool.Spec.PoolItems {\n\t\tvar updateItemStatus *networkextensionv1.PortPoolItemStatus\n\t\tvar retry bool\n\t\ttmpItemStatus, ok := activeItemMap[tmpItem.GetKey()]\n\t\tif !ok {\n\t\t\tupdateItemStatus, retry = poolItemHandler.ensurePortPoolItem(tmpItem, nil)\n\t\t\tnewItemStatusList = append(newItemStatusList, updateItemStatus)\n\t\t} else {\n\t\t\tupdateItemStatus, retry = poolItemHandler.ensurePortPoolItem(tmpItem, tmpItemStatus)\n\t\t\tupdateItemStatusMap[updateItemStatus.GetKey()] = updateItemStatus\n\t\t}\n\t\tif retry {\n\t\t\tshouldRetry = true\n\t\t}\n\t}\n\tfor i, ts := range pool.Status.PoolItemStatuses {\n\t\tif _, ok := updateItemStatusMap[ts.GetKey()]; ok {\n\t\t\tpool.Status.PoolItemStatuses[i] = updateItemStatusMap[ts.GetKey()]\n\t\t}\n\t}\n\tfor _, ts := range newItemStatusList {\n\t\tpool.Status.PoolItemStatuses = append(pool.Status.PoolItemStatuses, ts)\n\t}\n\n\terr := pph.k8sClient.Status().Update(context.Background(), pool, &client.UpdateOptions{})\n\tif err != nil {\n\t\treturn true, fmt.Errorf(\"update %s/%s status failed, err %s\", pool.GetNamespace(), pool.GetName(), err.Error())\n\t}\n\n\t// delete item from pool cache\n\tpoolKey := ingresscommon.GetNamespacedNameKey(pool.GetName(), pool.GetNamespace())\n\tfor _, itemStatus := range tmpItemsStatus {\n\t\titemKey := itemStatus.GetKey()\n\t\tif _, ok := successDeletedKeyMap[itemKey]; !ok {\n\t\t\tif _, inOk := failedDeletedKeyMap[itemKey]; inOk {\n\t\t\t\tpph.poolCache.SetPortPoolItemStatus(poolKey, itemStatus)\n\t\t\t\tblog.Infof(\"set port pool %s item %s status to %s\",\n\t\t\t\t\tpoolKey, itemStatus.ItemName, constant.PortPoolItemStatusDeleting)\n\t\t\t}\n\t\t} else {\n\t\t\tpph.poolCache.DeletePortPoolItem(poolKey, itemKey)\n\t\t\tblog.Infof(\"delete port pool %s item %s\", poolKey, itemStatus.ItemName)\n\t\t}\n\t}\n\t// add item to pool cache\n\tfor _, itemStatus := range newItemStatusList {\n\t\tif err := pph.poolCache.AddPortPoolItem(poolKey, itemStatus); err != nil {\n\t\t\tblog.Warnf(\"failed to add port pool %s item %v to cache, err %s\", poolKey, itemStatus, err.Error())\n\t\t} else {\n\t\t\tblog.Infof(\"add port pool %s item %v to cache\", poolKey, itemStatus)\n\t\t}\n\t}\n\t// update item status\n\tfor _, itemStatus := range updateItemStatusMap {\n\t\tpph.poolCache.SetPortPoolItemStatus(poolKey, itemStatus)\n\t\tblog.Infof(\"set port pool %s item %s status to %s\", poolKey, itemStatus.ItemName, itemStatus.Status)\n\t}\n\n\tif len(failedDeletedKeyMap) != 0 || shouldRetry {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func (p *limitedPool) Reset() {\n\n\tp.m.Lock()\n\n\tif !p.closed {\n\t\tp.m.Unlock()\n\t\treturn\n\t}\n\n\t// cancelled the pool, not closed it, pool will be usable after calling initialize().\n\tp.initialize()\n\tp.m.Unlock()\n}", "func TestCache(t *testing.T) {\n\tt.Parallel()\n\n\tConvey(`An locking LRU cache with size heuristic 3`, t, func() {\n\t\tcache := New(3)\n\n\t\tConvey(`A Get() returns nil.`, func() {\n\t\t\tSo(cache.Get(\"test\"), ShouldBeNil)\n\t\t})\n\n\t\t// Adds values to the cache sequentially, blocking on the values being\n\t\t// processed.\n\t\taddCacheValues := func(values ...string) {\n\t\t\tfor _, v := range values {\n\t\t\t\tisPresent := (cache.Peek(v) != nil)\n\t\t\t\tSo(cache.Put(v, v+\"v\"), ShouldEqual, isPresent)\n\t\t\t}\n\t\t}\n\n\t\tshouldHaveValues := func(actual interface{}, expected ...interface{}) string {\n\t\t\tcache := actual.(*Cache)\n\n\t\t\tactualSnapshot := cache.snapshot()\n\n\t\t\texpectedSnapshot := snapshot{}\n\t\t\tfor _, k := range expected {\n\t\t\t\texpectedSnapshot[k] = k.(string) + \"v\"\n\t\t\t}\n\t\t\treturn ShouldResemble(actualSnapshot, expectedSnapshot)\n\t\t}\n\n\t\tConvey(`With three values, {a, b, c}`, func() {\n\t\t\taddCacheValues(\"a\", \"b\", \"c\")\n\t\t\tSo(cache.Len(), ShouldEqual, 3)\n\n\t\t\tConvey(`Is empty after a purge.`, func() {\n\t\t\t\tcache.Purge()\n\t\t\t\tSo(cache.Len(), ShouldEqual, 0)\n\t\t\t})\n\n\t\t\tConvey(`Can retrieve each of those values.`, func() {\n\t\t\t\tSo(cache.Get(\"a\"), ShouldEqual, \"av\")\n\t\t\t\tSo(cache.Get(\"b\"), ShouldEqual, \"bv\")\n\t\t\t\tSo(cache.Get(\"c\"), ShouldEqual, \"cv\")\n\t\t\t})\n\n\t\t\tConvey(`Get()ting \"a\", then adding \"d\" will cause \"b\" to be evicted.`, func() {\n\t\t\t\tSo(cache.Get(\"a\"), ShouldEqual, \"av\")\n\t\t\t\taddCacheValues(\"d\")\n\t\t\t\tSo(cache, shouldHaveValues, \"a\", \"c\", \"d\")\n\t\t\t})\n\n\t\t\tConvey(`Peek()ing \"a\", then adding \"d\" will cause \"a\" to be evicted.`, func() {\n\t\t\t\tSo(cache.Peek(\"a\"), ShouldEqual, \"av\")\n\t\t\t\taddCacheValues(\"d\")\n\t\t\t\tSo(cache, shouldHaveValues, \"b\", \"c\", \"d\")\n\t\t\t})\n\t\t})\n\n\t\tConvey(`When adding {a, b, c, d}, \"a\" will be evicted.`, func() {\n\t\t\taddCacheValues(\"a\", \"b\", \"c\", \"d\")\n\t\t\tSo(cache.Len(), ShouldEqual, 3)\n\n\t\t\tSo(cache, shouldHaveValues, \"b\", \"c\", \"d\")\n\n\t\t\tConvey(`Requests for \"a\" will be nil.`, func() {\n\t\t\t\tSo(cache.Get(\"a\"), ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(`When adding {a, b, c, a, d}, \"b\" will be evicted.`, func() {\n\t\t\taddCacheValues(\"a\", \"b\", \"c\", \"a\", \"d\")\n\t\t\tSo(cache.Len(), ShouldEqual, 3)\n\n\t\t\tSo(cache, shouldHaveValues, \"a\", \"c\", \"d\")\n\n\t\t\tConvey(`When removing \"c\", will contain {a, d}.`, func() {\n\t\t\t\tSo(cache.Remove(\"c\"), ShouldEqual, \"cv\")\n\t\t\t\tSo(cache, shouldHaveValues, \"a\", \"d\")\n\n\t\t\t\tConvey(`When adding {e, f}, \"a\" will be evicted.`, func() {\n\t\t\t\t\taddCacheValues(\"e\", \"f\")\n\t\t\t\t\tSo(cache, shouldHaveValues, \"d\", \"e\", \"f\")\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tConvey(`When removing a value that isn't there, returns nil.`, func() {\n\t\t\tSo(cache.Remove(\"foo\"), ShouldBeNil)\n\t\t})\n\t})\n}", "func getRawPool(\n\thandle string, context ServerContext, access Access,\n) ([]byte, error) {\n\tloaded := &pool{handle: handle}\n\tbuffer, err := access.LoadRaw(loaded, context)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Deserialize (unfortunately has to be done) for cleaning\n\terr = loaded.UnmarshalBinary(buffer)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If modified after clean, save and reserialize\n\tif loaded.clean(context, access) {\n\t\terr = access.Save(loaded, true, context)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Reserialize\n\t\tbuffer, err = loaded.MarshalBinary()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn buffer, nil\n}", "func Test_Static_Pool_WrongCommand1(t *testing.T) {\n\tp, err := Initialize(\n\t\tcontext.Background(),\n\t\tfunc() *exec.Cmd { return exec.Command(\"phg\", \"../tests/slow-destroy.php\", \"echo\", \"pipes\") },\n\t\tpipe.NewPipeFactory(),\n\t\t&Config{\n\t\t\tNumWorkers: 5,\n\t\t\tAllocateTimeout: time.Second,\n\t\t\tDestroyTimeout: time.Second,\n\t\t},\n\t)\n\n\tassert.Error(t, err)\n\tassert.Nil(t, p)\n}", "func (o *MacpoolLeaseAllOf) HasPool() bool {\n\tif o != nil && o.Pool != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func NewOrEmptyPool(network, addr string, size int) *Pool {\n\tpool, err := NewPool(network, addr, size, 0, \"\", 0)\n\tif err != nil {\n\t\tpool = &Pool{\n\t\t\tnetwork: network,\n\t\t\taddr: addr,\n\t\t\tpool: make(chan *redisClient, size),\n\t\t\tdf: redis.Dial,\n\t\t}\n\t}\n\treturn pool\n}", "func ensureCacheEmpty(t *testing.T, testName string, imagePath string, h *cache.Handle) {\n\tshasum, err := client.ImageHash(imagePath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\t// We may not have the image yet and in that case, we check if the library cache directory is empty\n\t\t\terr := ensureDirEmpty(t, testName, h.Library)\n\t\t\tif err != nil {\n\t\t\t\t// The library directory of the cache is not there, checking the root (we want to make sure that the cache is still coherent)\n\t\t\t\tensureDirEmpty(t, testName, filepath.Join(h.GetBasedir(), \"root\"))\n\t\t\t}\n\t\t} else {\n\t\t\tt.Fatalf(\"failed to compute shasum for %s: %s\", imgName, err)\n\t\t}\n\t}\n\n\tpath := h.LibraryImage(shasum, imgName)\n\tif e2e.PathExists(t, path) {\n\t\tt.Fatalf(\"%s failed: %s is still in the cache (%s)\", testName, imgName, path)\n\t}\n}", "func TestPoolContext(t *testing.T) {\n\tctx, _ := context.WithTimeout(context.Background(), time.Second)\n\tdefer leaktest.CheckContext(ctx, t)()\n\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n}", "func (s *spoolNode) spooled() {}", "func TestUpdateLeaks(t *testing.T) {\n\t// Create an empty state database\n\tdb := database.NewMemDatabase()\n\tstate, _ := New(types.Hash32{}, NewDatabase(db))\n\n\t// Update it with some accounts\n\tfor i := byte(0); i < 255; i++ {\n\t\taddr := types.BytesToAddress([]byte{i})\n\t\tstate.AddBalance(addr, uint64(11*i))\n\t\tstate.SetNonce(addr, uint64(42*i))\n\t\tstate.IntermediateRoot(false)\n\t}\n\t// Ensure that no data was leaked into the database\n\titer := db.Find(nil)\n\tdefer iter.Release()\n\tfor iter.Next() {\n\t\trequire.FailNowf(t, \"leaked key\", \"%x\", iter.Key())\n\t}\n}", "func TestOnEvictCacheNoOnEvictionError(t *testing.T) {\n\trequire := require.New(t)\n\n\tevicted := []int{}\n\tonEviction := func(n int) error {\n\t\tevicted = append(evicted, n)\n\t\treturn nil\n\t}\n\tmaxSize := 3\n\n\tcache := newOnEvictCache[int](maxSize, onEviction)\n\n\t// Get non-existent key\n\t_, ok := cache.Get(0)\n\trequire.False(ok)\n\n\t// Put key\n\trequire.NoError(cache.Put(0, 0))\n\trequire.Equal(1, cache.fifo.Len())\n\n\t// Get key\n\tval, ok := cache.Get(0)\n\trequire.True(ok)\n\trequire.Zero(val)\n\n\t// Get non-existent key\n\t_, ok = cache.Get(1)\n\trequire.False(ok)\n\n\t// Fill the cache\n\tfor i := 1; i < maxSize; i++ {\n\t\trequire.NoError(cache.Put(i, i))\n\t\trequire.Equal(i+1, cache.fifo.Len())\n\t}\n\trequire.Empty(evicted)\n\n\t// Cache has [0,1,2]\n\n\t// Put another key. This should evict the oldest inserted key (0).\n\trequire.NoError(cache.Put(maxSize, maxSize))\n\trequire.Equal(maxSize, cache.fifo.Len())\n\trequire.Len(evicted, 1)\n\trequire.Zero(evicted[0])\n\n\t// Cache has [1,2,3]\n\titer := cache.fifo.NewIterator()\n\trequire.True(iter.Next())\n\trequire.Equal(1, iter.Key())\n\trequire.Equal(1, iter.Value())\n\trequire.True(iter.Next())\n\trequire.Equal(2, iter.Key())\n\trequire.Equal(2, iter.Value())\n\trequire.True(iter.Next())\n\trequire.Equal(3, iter.Key())\n\trequire.Equal(3, iter.Value())\n\trequire.False(iter.Next())\n\n\t// 0 should no longer be in the cache\n\t_, ok = cache.Get(0)\n\trequire.False(ok)\n\n\t// Other keys should still be in the cache\n\tfor i := maxSize; i >= 1; i-- {\n\t\tval, ok := cache.Get(i)\n\t\trequire.True(ok)\n\t\trequire.Equal(i, val)\n\t}\n\n\t// Cache has [1,2,3]\n\titer = cache.fifo.NewIterator()\n\trequire.True(iter.Next())\n\trequire.Equal(1, iter.Key())\n\trequire.Equal(1, iter.Value())\n\trequire.True(iter.Next())\n\trequire.Equal(2, iter.Key())\n\trequire.Equal(2, iter.Value())\n\trequire.True(iter.Next())\n\trequire.Equal(3, iter.Key())\n\trequire.Equal(3, iter.Value())\n\trequire.False(iter.Next())\n\n\t// Put another key to evict the oldest inserted key (1).\n\trequire.NoError(cache.Put(maxSize+1, maxSize+1))\n\trequire.Equal(maxSize, cache.fifo.Len())\n\trequire.Len(evicted, 2)\n\trequire.Equal(1, evicted[1])\n\n\t// Cache has [2,3,4]\n\titer = cache.fifo.NewIterator()\n\trequire.True(iter.Next())\n\trequire.Equal(2, iter.Key())\n\trequire.Equal(2, iter.Value())\n\trequire.True(iter.Next())\n\trequire.Equal(3, iter.Key())\n\trequire.Equal(3, iter.Value())\n\trequire.True(iter.Next())\n\trequire.Equal(4, iter.Key())\n\trequire.Equal(4, iter.Value())\n\trequire.False(iter.Next())\n\n\t// 1 should no longer be in the cache\n\t_, ok = cache.Get(1)\n\trequire.False(ok)\n\n\trequire.NoError(cache.Flush())\n\n\t// Cache should be empty\n\trequire.Zero(cache.fifo.Len())\n\trequire.Len(evicted, 5)\n\trequire.Equal([]int{0, 1, 2, 3, 4}, evicted)\n\trequire.Zero(cache.fifo.Len())\n\trequire.Equal(maxSize, cache.maxSize) // Should be unchanged\n}", "func (img *Image) Pool() *Pool {\n\treturn img.pool\n}", "func (liqoIPAM *IPAM) reservePoolInHalves(pool string) error {\n\tklog.Infof(\"Network %s is equal to a network pool, acquiring first half..\", pool)\n\tmask, err := GetMask(pool)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot retrieve mask lenght from cidr:%w\", err)\n\t}\n\tmask += 1\n\t_, err = liqoIPAM.ipam.AcquireChildPrefix(pool, mask)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot acquire first half of pool %s\", pool)\n\t}\n\tklog.Infof(\"Acquiring second half..\")\n\t_, err = liqoIPAM.ipam.AcquireChildPrefix(pool, mask)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot acquire second half of pool %s\", pool)\n\t}\n\tklog.Infof(\"Network %s has successfully been reserved\", pool)\n\treturn nil\n}", "func ValidateMemoryStoreAndCreatePool() {\n\tif cacheFailed {\n\t\tinstance, err := FindCacheInstance()\n\t\tif err != nil || instance.Host == \"\" || instance.Port == 0 {\n\t\t\tlog.Println(\"FAILED TO GET REDIS!\", err)\n\t\t\tcacheFailed = true\n\t\t} else {\n\t\t\tlog.Println(\"REDIS IS LIVE!\")\n\t\t\tcacheFailed = false\n\t\t\tredisAddr := fmt.Sprintf(\"%s:%d\", instance.Host, instance.Port)\n\t\t\tlog.Println(\"using redis address:\", redisAddr)\n\t\t\tpool = NewPool(redisAddr)\n\t\t}\n\t}\n}", "func (p *DownloadPool) reserve() *Downloader {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\tvar d *Downloader\n\tif p.count == p.Capacity {\n\t\treturn <-p.resource\n\t}\n\tselect {\n\tcase d = <-p.resource:\n\tdefault:\n\t\te.DebugPrint(\"Generating new resource\")\n\t\td = newDownloader(p.endpoint)\n\t\tp.count++\n\t}\n\treturn d\n}", "func TestSourcePool_storeSource(t *testing.T) {\n\tvar src store.Source = &store.SourcePool{}\n\t_ = src\n}", "func PeriodicallyPollForCache() {\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Second * 30)\n\t\t\tValidateMemoryStoreAndCreatePool()\n\t\t}\n\t}()\n}", "func (p *ArgsEnvsCacheEntry) Retain() {\n\tp.refCount++\n}", "func TestNewCache(t *testing.T) {\n\tcache := NewCache(time.Minute)\n\tif _, ok := cache.Get(cacheKey); ok {\n\t\tt.Fatalf(\"Expected empty cache\")\n\t}\n}", "func (_m *MockOptions) BytesPool() pool.CheckedBytesPool {\n\tret := _m.ctrl.Call(_m, \"BytesPool\")\n\tret0, _ := ret[0].(pool.CheckedBytesPool)\n\treturn ret0\n}", "func (c *cache) isExhausted() bool {\n\treturn c.ptr == len(c.data)\n}", "func NewPool() Pool {\n\treturn Pool{\n\t\tBalanceRune: cosmos.ZeroUint(),\n\t\tBalanceAsset: cosmos.ZeroUint(),\n\t\tPoolUnits: cosmos.ZeroUint(),\n\t\tStatus: Enabled,\n\t}\n}", "func BenchmarkSetSize1NoReuse(b *testing.B) {\n\tl := NewCache(uint32(3))\n\tfor i := 0; i < b.N; i++ {\n\t\tl.Set(string(i), i)\n\t}\n}", "func (c *connPool) DelFromPool(in interface{}) bool {\n\tdefer c.nextMutex.Unlock()\n\tc.nextMutex.Lock()\n\tfor i := range c.pool {\n\t\tif c.pool[i] == in {\n\t\t\tc.pool[i] = c.pool[len(c.pool)-1]\n\t\t\tc.pool = c.pool[:len(c.pool)-1]\n\t\t\tc.total = len(c.pool)\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (p *Pool) Release(){\n if(p.availablePool != nil){\n for _,dbCon := range p.availablePool{\n dbCon.Close()\n }\n }else{\n p.availablePool=nil\n }\n}", "func (p *ResourcePool) releaseAtomic(wrapper *ResourceWrapper) {\n\n\tp.fMutex.RLock()\n\tdefer p.fMutex.RUnlock()\n\n\t//if this pool is closed when trying to release this resource\n\t//just close the resource\n\tif p.closed == true {\n\t\tp.resClose(wrapper.Resource)\n\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\twrapper.p = nil\n\t\treturn\n\t}\n\n\t//obtain a lock to return the resource to the pool\n\t//if we end up not needing to, lets undo our lock\n\t//and close the resource\n\tif nAvailable := atomic.AddUint32(&p.nAvailable, 1); nAvailable > p.min {\n\t\t//decriment\n\t\tatomic.AddUint32(&p.nAvailable, ^uint32(0))\n\t\tp.resClose(wrapper.Resource)\n\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\treturn\n\t}\n\n\tp.resources <- *wrapper\n}", "func (_BridgeRegistry *BridgeRegistryCaller) StakingPool(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _BridgeRegistry.contract.Call(opts, out, \"stakingPool\")\n\treturn *ret0, err\n}", "func (pm *peerManager) tryFillPool(metas *[]PeerMeta) {\n\tadded := make([]PeerMeta, 0, len(*metas))\n\tinvalid := make([]string, 0)\n\tfor _, meta := range *metas {\n\t\tif string(meta.ID) == \"\" {\n\t\t\tinvalid = append(invalid, meta.String())\n\t\t\tcontinue\n\t\t}\n\t\t_, found := pm.peerPool[meta.ID]\n\t\tif !found {\n\t\t\t// change some properties\n\t\t\tmeta.Outbound = true\n\t\t\tmeta.Designated = false\n\t\t\tpm.peerPool[meta.ID] = meta\n\t\t\tadded = append(added, meta)\n\t\t}\n\t}\n\tif len(invalid) > 0 {\n\t\tpm.logger.Warn().Strs(\"metas\", invalid).Msg(\"invalid meta list was come\")\n\t}\n\tpm.logger.Debug().Int(\"added_cnt\", len(added)).Msg(\"Filled unknown peer addresses to peerpool\")\n\tpm.tryConnectPeers()\n}", "func (p *connPool) checkConn(params planetscalev2.VitessLockserverParams) {\n\t// Take the usual locks as if we are opening the connection like anyone else.\n\tp.openMu.RLock()\n\tdefer pool.openMu.RUnlock()\n\n\tp.mapMu.Lock()\n\tconn := p.conns[params]\n\tp.mapMu.Unlock()\n\n\tif conn == nil {\n\t\t// The conn we were asked to check was removed anyway.\n\t\treturn\n\t}\n\tif !conn.succeeded() {\n\t\t// We only check conns that claim to be good.\n\t\treturn\n\t}\n\tif !conn.shouldCheck() {\n\t\t// It hasn't been long enough since the last liveness check.\n\t\treturn\n\t}\n\n\t// Try a simple read operation from global topo.\n\tctx, cancel := context.WithTimeout(context.Background(), livenessCheckTimeout)\n\tdefer cancel()\n\n\t_, err := conn.Server.GetCellInfoNames(ctx)\n\tif err == nil || topo.IsErrType(err, topo.NoNode) {\n\t\t// The check passed. Nothing to do.\n\t\tcheckSuccesses.Inc()\n\t\treturn\n\t}\n\n\t// The connection is bad. Remove it from the cache so a new one will be created.\n\tlog.WithFields(logrus.Fields{\n\t\t\"implementation\": conn.params.Implementation,\n\t\t\"address\": conn.params.Address,\n\t\t\"rootPath\": conn.params.RootPath,\n\t}).Info(\"cached connection to Vitess topology server failed liveness check\")\n\tcheckErrors.Inc()\n\n\tp.mapMu.Lock()\n\tdefer p.mapMu.Unlock()\n\n\t// Now that we have the map lock, confirm that the entry in the cache is\n\t// still the same one we checked.\n\tif p.conns[params] != conn {\n\t\t// Someone else already removed or replaced it.\n\t\treturn\n\t}\n\n\t// Send it to the deadConns list so the GC will close it while holding the\n\t// openMu write lock.\n\tdelete(p.conns, params)\n\tp.deadConns = append(p.deadConns, conn)\n}", "func (p *Pool) Close(){\n p.availablePool[con]=p.usedPool[con]\n\tdelete(p.usedPool,con)\n}", "func (fup *folderUpdatePrepper) updateResolutionUsageAndPointersLockedCache(\n\tctx context.Context, lState *kbfssync.LockState, md *RootMetadata,\n\tbps blockPutState, unmergedChains, mergedChains *crChains,\n\tmostRecentUnmergedMD, mostRecentMergedMD ImmutableRootMetadata,\n\tisLocalSquash bool) (\n\tblocksToDelete []kbfsblock.ID, err error) {\n\n\t// Track the refs and unrefs in a set, to ensure no duplicates\n\trefs := make(map[data.BlockPointer]bool)\n\tunrefs := make(map[data.BlockPointer]bool)\n\tfor _, op := range md.data.Changes.Ops {\n\t\t// Iterate in reverse since we may be deleting references as we go.\n\t\tfor i := len(op.Refs()) - 1; i >= 0; i-- {\n\t\t\tptr := op.Refs()[i]\n\t\t\t// Don't add usage if it's an unembedded block change\n\t\t\t// pointer. Also, we shouldn't be referencing this\n\t\t\t// anymore!\n\t\t\tif unmergedChains.blockChangePointers[ptr] {\n\t\t\t\tfup.vlog.CLogf(\n\t\t\t\t\tctx, libkb.VLog1, \"Ignoring block change ptr %v\", ptr)\n\t\t\t\top.DelRefBlock(ptr)\n\t\t\t} else {\n\t\t\t\trefs[ptr] = true\n\t\t\t}\n\t\t}\n\t\t// Iterate in reverse since we may be deleting unrefs as we go.\n\t\tfor i := len(op.Unrefs()) - 1; i >= 0; i-- {\n\t\t\tptr := op.Unrefs()[i]\n\t\t\tunrefs[ptr] = true\n\t\t\tdelete(refs, ptr)\n\t\t\tif _, isCreateOp := op.(*createOp); isCreateOp {\n\t\t\t\t// The only way a create op should have unref blocks\n\t\t\t\t// is if it was created during conflict resolution.\n\t\t\t\t// In that case, we should move the unref to a final\n\t\t\t\t// resolution op, so it doesn't confuse future\n\t\t\t\t// resolutions.\n\t\t\t\top.DelUnrefBlock(ptr)\n\t\t\t\tmd.data.Changes.Ops =\n\t\t\t\t\taddUnrefToFinalResOp(\n\t\t\t\t\t\tmd.data.Changes.Ops, ptr, unmergedChains.doNotUnrefPointers)\n\t\t\t}\n\t\t}\n\t\tfor _, update := range op.allUpdates() {\n\t\t\tif update.Unref != update.Ref {\n\t\t\t\tunrefs[update.Unref] = true\n\t\t\t\tdelete(refs, update.Unref)\n\t\t\t\trefs[update.Ref] = true\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, resOp := range unmergedChains.resOps {\n\t\tfor _, ptr := range resOp.CommittedUnrefs() {\n\t\t\toriginal, err := unmergedChains.originalFromMostRecentOrSame(ptr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif !unmergedChains.isCreated(original) {\n\t\t\t\tfup.vlog.CLogf(\n\t\t\t\t\tctx, libkb.VLog1, \"Unref'ing %v from old resOp\", ptr)\n\t\t\t\tunrefs[ptr] = true\n\t\t\t}\n\t\t}\n\t}\n\n\t// Unreference (and decrement the size) of any to-unref blocks\n\t// that weren't created in the unmerged branch. (Example: non-top\n\t// dir blocks that were changed during the CR process.)\n\tfor ptr := range unmergedChains.toUnrefPointers {\n\t\toriginal, err := unmergedChains.originalFromMostRecentOrSame(ptr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !unmergedChains.isCreated(original) {\n\t\t\tunrefs[ptr] = true\n\t\t}\n\t}\n\n\tif isLocalSquash {\n\t\t// Collect any references made in previous resolution ops that\n\t\t// are being squashed together. These must be re-referenced in\n\t\t// the MD object to survive the squash.\n\t\tresToRef := make(map[data.BlockPointer]bool)\n\t\tfor _, resOp := range unmergedChains.resOps {\n\t\t\tfor _, ptr := range resOp.Refs() {\n\t\t\t\tif !unrefs[ptr] {\n\t\t\t\t\tresToRef[ptr] = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, ptr := range resOp.Unrefs() {\n\t\t\t\tdelete(resToRef, ptr)\n\t\t\t}\n\t\t\tfor _, update := range resOp.allUpdates() {\n\t\t\t\tdelete(resToRef, update.Unref)\n\t\t\t}\n\t\t}\n\t\tfor ptr := range resToRef {\n\t\t\tfup.vlog.CLogf(ctx, libkb.VLog1, \"Ref'ing %v from old resOp\", ptr)\n\t\t\trefs[ptr] = true\n\t\t\tmd.data.Changes.Ops[0].AddRefBlock(ptr)\n\t\t}\n\n\t\tunmergedUsage := mostRecentUnmergedMD.DiskUsage()\n\t\tmergedUsage := mostRecentMergedMD.DiskUsage()\n\n\t\t// Local squashes can just use the bytes and usage from the\n\t\t// latest unmerged MD, and we can avoid all the block fetching\n\t\t// done by `updateResolutionUsage()`.\n\t\tmd.SetDiskUsage(unmergedUsage)\n\t\t// TODO: it might be better to add up all the ref bytes, and\n\t\t// all the unref bytes, from all unmerged MDs, instead of just\n\t\t// calculating the difference between the usages. But that's\n\t\t// not quite right either since it counts blocks that are\n\t\t// ref'd and unref'd within the squash.\n\t\tif md.DiskUsage() > mergedUsage {\n\t\t\tmd.SetRefBytes(md.DiskUsage() - mergedUsage)\n\t\t\tmd.SetUnrefBytes(0)\n\t\t} else {\n\t\t\tmd.SetRefBytes(0)\n\t\t\tmd.SetUnrefBytes(mergedUsage - md.DiskUsage())\n\t\t}\n\n\t\tmergedMDUsage := mostRecentMergedMD.MDDiskUsage()\n\t\tif md.MDDiskUsage() < mergedMDUsage {\n\t\t\treturn nil, fmt.Errorf(\"MD disk usage went down on unmerged \"+\n\t\t\t\t\"branch: %d vs %d\", md.MDDiskUsage(), mergedMDUsage)\n\t\t}\n\n\t\t// Additional MD disk usage will be determined entirely by the\n\t\t// later `unembedBlockChanges()` call.\n\t\tmd.SetMDDiskUsage(mergedMDUsage)\n\t\tmd.SetMDRefBytes(0)\n\t} else {\n\t\terr = fup.updateResolutionUsageLockedCache(\n\t\t\tctx, lState, md, bps, unmergedChains, mergedChains,\n\t\t\tmostRecentMergedMD, refs, unrefs)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Any blocks that were created on the unmerged branch and have\n\t// been flushed, but didn't survive the resolution, should be\n\t// marked as unreferenced in the resolution.\n\ttoUnref := make(map[data.BlockPointer]bool)\n\tfor ptr := range unmergedChains.originals {\n\t\tif !refs[ptr] && !unrefs[ptr] {\n\t\t\ttoUnref[ptr] = true\n\t\t}\n\t}\n\tfor ptr := range unmergedChains.createdOriginals {\n\t\tif !refs[ptr] && !unrefs[ptr] && unmergedChains.byOriginal[ptr] != nil {\n\t\t\ttoUnref[ptr] = true\n\t\t} else if unmergedChains.blockChangePointers[ptr] {\n\t\t\ttoUnref[ptr] = true\n\t\t}\n\t}\n\tfor ptr := range unmergedChains.toUnrefPointers {\n\t\ttoUnref[ptr] = true\n\t}\n\tfor _, resOp := range unmergedChains.resOps {\n\t\tfor _, ptr := range resOp.Refs() {\n\t\t\tif !isLocalSquash && !refs[ptr] && !unrefs[ptr] {\n\t\t\t\ttoUnref[ptr] = true\n\t\t\t}\n\t\t}\n\t\tfor _, ptr := range resOp.Unrefs() {\n\t\t\tif !refs[ptr] && !unrefs[ptr] {\n\t\t\t\ttoUnref[ptr] = true\n\t\t\t}\n\t\t}\n\t}\n\tdeletedRefs := make(map[data.BlockPointer]bool)\n\tdeletedUnrefs := make(map[data.BlockPointer]bool)\n\tfor ptr := range toUnref {\n\t\tif ptr == data.ZeroPtr || unmergedChains.doNotUnrefPointers[ptr] {\n\t\t\t// A zero pointer can sneak in from the unrefs field of a\n\t\t\t// syncOp following a failed syncOp, via\n\t\t\t// `unmergedChains.toUnrefPointers` after a chain collapse.\n\t\t\tcontinue\n\t\t}\n\t\tisUnflushed, err := fup.config.BlockServer().IsUnflushed(\n\t\t\tctx, fup.id(), ptr.ID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif isUnflushed {\n\t\t\tblocksToDelete = append(blocksToDelete, ptr.ID)\n\t\t\tdeletedUnrefs[ptr] = true\n\t\t\t// No need to unreference this since we haven't flushed it yet.\n\t\t\tcontinue\n\t\t}\n\n\t\tdeletedRefs[ptr] = true\n\t\t// Put the unrefs in a new resOp after the final operation, to\n\t\t// cancel out any stray refs in earlier ops.\n\t\tfup.vlog.CLogf(ctx, libkb.VLog1, \"Unreferencing dropped block %v\", ptr)\n\t\tmd.data.Changes.Ops = addUnrefToFinalResOp(\n\t\t\tmd.data.Changes.Ops, ptr, unmergedChains.doNotUnrefPointers)\n\t}\n\n\t// Scrub all refs and unrefs of blocks that never made it to the\n\t// server, for smaller updates and to make things easier on the\n\t// StateChecker. We scrub the refs too because in some cases\n\t// (e.g., on a copied conflict file), we add an unref without\n\t// removing the original ref, and if we remove the unref, the ref\n\t// must go too.\n\tif len(deletedRefs) > 0 || len(deletedUnrefs) > 0 {\n\t\tfor _, op := range md.data.Changes.Ops {\n\t\t\tvar toDelRef []data.BlockPointer\n\t\t\tfor _, ref := range op.Refs() {\n\t\t\t\tif deletedRefs[ref] || deletedUnrefs[ref] {\n\t\t\t\t\ttoDelRef = append(toDelRef, ref)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, ref := range toDelRef {\n\t\t\t\tfup.vlog.CLogf(ctx, libkb.VLog1, \"Scrubbing ref %v\", ref)\n\t\t\t\top.DelRefBlock(ref)\n\t\t\t}\n\t\t\tvar toDelUnref []data.BlockPointer\n\t\t\tfor _, unref := range op.Unrefs() {\n\t\t\t\tif deletedUnrefs[unref] {\n\t\t\t\t\ttoDelUnref = append(toDelUnref, unref)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, unref := range toDelUnref {\n\t\t\t\tfup.vlog.CLogf(ctx, libkb.VLog1, \"Scrubbing unref %v\", unref)\n\t\t\t\top.DelUnrefBlock(unref)\n\t\t\t}\n\t\t}\n\t\tfor _, resOp := range unmergedChains.resOps {\n\t\t\tfor _, unref := range resOp.Unrefs() {\n\t\t\t\tif deletedUnrefs[unref] {\n\t\t\t\t\tfup.vlog.CLogf(\n\t\t\t\t\t\tctx, libkb.VLog1, \"Scrubbing resOp unref %v\", unref)\n\t\t\t\t\tresOp.DelUnrefBlock(unref)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfup.log.CDebugf(ctx, \"New md byte usage: %d ref, %d unref, %d total usage \"+\n\t\t\"(previously %d)\", md.RefBytes(), md.UnrefBytes(), md.DiskUsage(),\n\t\tmostRecentMergedMD.DiskUsage())\n\treturn blocksToDelete, nil\n}", "func (iter *ldbCacheIter) Release() {\n}", "func (p *ResourcePool) Available() reflow.Resources {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tvar reserved reflow.Resources\n\tfor _, alloc := range p.allocs {\n\t\tif !AllocExpired(alloc) {\n\t\t\treserved.Add(reserved, alloc.Resources())\n\t\t}\n\t}\n\tvar avail reflow.Resources\n\tavail.Sub(p.resources, reserved)\n\treturn avail\n}", "func TestBytesPool_Put(t *testing.T) {\n\tpool := NewBytesPool(1, size)\n\tbts1 := make([]byte, size)\n\tpool.Put(bts1)\n\tbts2 := make([]byte, size)\n\tpool.Put(bts2)\n\n\tpool.Get()\n}", "func initElementPool() {\n\tdefaultElementPool = newIDElementPool(nil)\n}", "func (s *ServiceFactory) initPool() {\n\tc := s.Config\n\n\tif c.IdleTimeout <= 0 {\n\t\tc.IdleTimeout = time.Duration(5) * time.Minute\n\t}\n\n\tPool := &redis.Pool{IdleTimeout: time.Duration(5) * time.Minute, MaxIdle: c.MaxIdle, MaxActive: c.MaxActive}\n\tPool.TestOnBorrow = func(c redis.Conn, t time.Time) error {\n\t\t_, err := c.Do(\"PING\")\n\t\treturn err\n\t}\n\n\tPool.Dial = func() (redis.Conn, error) {\n\t\treturn redis.DialURL(c.Url)\n\t}\n\ts.Pool = Pool\n}", "func TestEmpty(t *testing.T) {\n\t_, storage, _, _, destroyFunc := newStorage(t)\n\tdefer destroyFunc()\n\tif err := storage.Allocate(31000); !strings.Contains(err.Error(), \"cannot allocate resources of type servicenodeportallocations at this time\") {\n\t\tt.Fatal(err)\n\t}\n}", "func initPool(config *Config) *redis.Pool {\n\treturn newRedisPoool(config)\n}" ]
[ "0.66354775", "0.65084225", "0.64718693", "0.6448212", "0.64091086", "0.6374555", "0.61580724", "0.61445475", "0.6142347", "0.6084883", "0.6079117", "0.60385364", "0.5989658", "0.5920169", "0.5905672", "0.58007514", "0.5796426", "0.5788675", "0.57364124", "0.57043797", "0.5678826", "0.5671868", "0.56416416", "0.5634575", "0.56304455", "0.5624465", "0.56228673", "0.56026584", "0.5597811", "0.55862427", "0.5574164", "0.55731964", "0.55512387", "0.55476564", "0.5536449", "0.55233413", "0.5509956", "0.55096006", "0.5504609", "0.5498096", "0.5460773", "0.5451802", "0.54499334", "0.5447587", "0.54307085", "0.54226315", "0.541257", "0.5411109", "0.5404873", "0.539158", "0.53866637", "0.53837866", "0.53819", "0.5375341", "0.5373067", "0.535579", "0.53465736", "0.5342536", "0.53396386", "0.5330663", "0.53089166", "0.52975076", "0.52923036", "0.52895534", "0.5288949", "0.528098", "0.52711654", "0.5269742", "0.5261481", "0.5255782", "0.52503556", "0.5242382", "0.52406114", "0.5232021", "0.5231095", "0.52303886", "0.52295935", "0.52186126", "0.52164644", "0.521293", "0.5200653", "0.52000153", "0.51988137", "0.5191605", "0.5188634", "0.51804984", "0.5174765", "0.51741004", "0.51544714", "0.5153557", "0.5150018", "0.51468605", "0.5140494", "0.51396334", "0.5126426", "0.51215434", "0.51124656", "0.51069903", "0.5106282", "0.5099334" ]
0.58948064
15
Test that Pool releases resources on GC.
func TestPoolRelease(t *testing.T) { testPool(t, false) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestPoolGC(t *testing.T) {\n\ttestPool(t, true)\n}", "func TestPool(t *testing.T) {\n\tdefer leaktest.Check(t)()\n\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n}", "func (this *PoolTestSuite) TestInvalidateFreesCapacity() {\n\tthis.pool.Config.MaxTotal = 2\n\tthis.pool.Config.MaxWaitMillis = 500\n\tthis.pool.Config.BlockWhenExhausted = true\n\t// Borrow an instance and hold if for 5 seconds\n\tch1 := waitTestGoroutine(this.pool, 5000)\n\t// Borrow another instance\n\tobj := this.NoErrorWithResult(this.pool.BorrowObject())\n\t// Launch another goroutine - will block, but fail in 500 ms\n\tch2 := waitTestGoroutine(this.pool, 100)\n\t// Invalidate the object borrowed by this goroutine - should allow goroutine2 to create\n\tsleep(20)\n\tthis.NoError(this.pool.InvalidateObject(obj))\n\tsleep(600) // Wait for goroutine2 to timeout\n\tresult2 := <-ch2\n\tclose(ch2)\n\tif result2.error != nil {\n\t\tthis.Fail(result2.error.Error())\n\t}\n\t<-ch1\n\tclose(ch1)\n}", "func Test_Static_Pool_Destroy_And_Close(t *testing.T) {\n\tctx := context.Background()\n\tp, err := Initialize(\n\t\tctx,\n\t\tfunc() *exec.Cmd { return exec.Command(\"php\", \"../tests/client.php\", \"delay\", \"pipes\") },\n\t\tpipe.NewPipeFactory(),\n\t\t&Config{\n\t\t\tNumWorkers: 1,\n\t\t\tAllocateTimeout: time.Second,\n\t\t\tDestroyTimeout: time.Second,\n\t\t},\n\t)\n\n\tassert.NotNil(t, p)\n\tassert.NoError(t, err)\n\n\tp.Destroy(ctx)\n\t_, err = p.Exec(&payload.Payload{Body: []byte(\"100\")})\n\tassert.Error(t, err)\n}", "func TestPool(t *testing.T, p pool.Pool) {\n\tt.Helper()\n\tctx := context.Background()\n\toffers, err := p.Offers(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := len(offers), 1; got != want {\n\t\tt.Fatalf(\"got %v, want %v\", got, want)\n\t}\n\t// We accept half the memory and disk; we use 0 CPUs.\n\to := offers[0]\n\tr := o.Available()\n\tvar orig reflow.Resources\n\torig.Set(r)\n\tr[\"cpu\"] = 0\n\tr[\"mem\"] /= 2\n\tr[\"disk\"] /= 2\n\talloc, err := o.Accept(ctx, pool.AllocMeta{Want: r, Owner: \"test\", Labels: nil})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\toffers, err = p.Offers(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := len(offers), 1; got != want {\n\t\tt.Fatalf(\"got %v, want %v\", got, want)\n\t}\n\to = offers[0]\n\tlog.Printf(\"offer received %v\", o.Available())\n\tif got, want := o.Available()[\"mem\"], (orig[\"mem\"] - orig[\"mem\"]/2); got != want {\n\t\tt.Fatalf(\"got %v, want %v\", got, want)\n\t}\n\n\tid := reflow.Digester.FromString(\"alloctest\")\n\texec, err := alloc.Put(ctx, id, reflow.ExecConfig{\n\t\tType: \"exec\",\n\t\tImage: bashImage,\n\t\tCmd: \"echo logthis; echo foobar > $out\",\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Give it some time to fetch the image, etc.\n\tctx, cancel := context.WithTimeout(ctx, 1*time.Minute)\n\tdefer cancel()\n\terr = exec.Wait(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres, err := exec.Result(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res.Err != nil {\n\t\tt.Fatal(res.Err)\n\t}\n\torigres := res\n\n\t// Now we force expiry to see that we can grab everything.\n\t// We grab a new alloc, and check that our old alloc died;\n\t// there should now be zero offers.\n\tintv := 1 * time.Nanosecond\n\td, err := alloc.Keepalive(ctx, intv)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := d, intv; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\ttime.Sleep(d)\n\toffers, err = p.Offers(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := len(offers), 1; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\to = offers[0]\n\tif got, want := o.Available(), orig; !got.Equal(want) {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\talloc1, err := o.Accept(ctx, pool.AllocMeta{Want: o.Available(), Owner: \"test\", Labels: nil})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := alloc1.Resources(), o.Available(); !got.Equal(want) {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\t// Look it up again to get its zombie.\n\t// Note: in client-server testing we're interacting directly with a client\n\t// not through a cluster implementation, so we'll need to strip off the\n\t// hostname ourselves.\n\tallocID := alloc.ID()\n\tif idx := strings.Index(allocID, \"/\"); idx > 0 {\n\t\tallocID = allocID[idx+1:]\n\t}\n\talloc, err = p.Alloc(ctx, allocID)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\texec, err = alloc.Get(ctx, id)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres, err = exec.Result(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := res, origres; !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\trc, err := exec.Logs(ctx, true, false, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer rc.Close()\n\tb, err := ioutil.ReadAll(rc)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := string(b), \"logthis\\n\"; got != want {\n\t\tt.Errorf(\"got %q, want %q\", got, want)\n\t}\n\n\t// We shouldn't have any offers now.\n\toffers, err = p.Offers(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := len(offers), 0; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n}", "func (p *Pool) Release(){\n if(p.availablePool != nil){\n for _,dbCon := range p.availablePool{\n dbCon.Close()\n }\n }else{\n p.availablePool=nil\n }\n}", "func TestCollectGarbage(t *testing.T) {\n\tCollectGarbage()\n}", "func Test_Static_Pool_Destroy_And_Close_While_Wait(t *testing.T) {\n\tctx := context.Background()\n\tp, err := Initialize(\n\t\tctx,\n\t\tfunc() *exec.Cmd { return exec.Command(\"php\", \"../tests/client.php\", \"delay\", \"pipes\") },\n\t\tpipe.NewPipeFactory(),\n\t\t&Config{\n\t\t\tNumWorkers: 1,\n\t\t\tAllocateTimeout: time.Second,\n\t\t\tDestroyTimeout: time.Second,\n\t\t},\n\t)\n\n\tassert.NotNil(t, p)\n\tassert.NoError(t, err)\n\n\tgo func() {\n\t\t_, errP := p.Exec(&payload.Payload{Body: []byte(\"100\")})\n\t\tif errP != nil {\n\t\t\tt.Errorf(\"error executing payload: error %v\", err)\n\t\t}\n\t}()\n\ttime.Sleep(time.Millisecond * 100)\n\n\tp.Destroy(ctx)\n\t_, err = p.Exec(&payload.Payload{Body: []byte(\"100\")})\n\tassert.Error(t, err)\n}", "func TestPoolTimeout(t *testing.T) {\n\tdefer leaktest.CheckTimeout(t, time.Second)()\n\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n}", "func (p *Pool) Release() {\n\tp.once.Do(func() {\n\t\tatomic.StoreInt32(&p.release, 1)\n\t\tp.lock.Lock()\n\t\tp.workers.reset()\n\t\tp.lock.Unlock()\n\t\tdelete(PoolRecords, p.name)\n\t})\n}", "func (c *ChannelPool) Release() {\n\tc.mu.Lock()\n\tconns := c.conns\n\tc.conns = nil\n\tc.factory = nil\n\tcloseFun := c.close\n\tc.close = nil\n\tc.mu.Unlock()\n\n\tif conns == nil {\n\t\treturn\n\t}\n\n\tclose(conns)\n\tfor wrapConn := range conns {\n\t\tcloseFun(wrapConn.conn)\n\t}\n}", "func Release() {\n\tdefaultRoutinePool.Release()\n}", "func Test_Static_Pool_Handle_Dead(t *testing.T) {\n\tctx := context.Background()\n\tp, err := Initialize(\n\t\tcontext.Background(),\n\t\tfunc() *exec.Cmd { return exec.Command(\"php\", \"../tests/slow-destroy.php\", \"echo\", \"pipes\") },\n\t\tpipe.NewPipeFactory(),\n\t\t&Config{\n\t\t\tNumWorkers: 5,\n\t\t\tAllocateTimeout: time.Second * 100,\n\t\t\tDestroyTimeout: time.Second,\n\t\t},\n\t)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, p)\n\n\ttime.Sleep(time.Second)\n\tfor i := range p.Workers() {\n\t\tp.Workers()[i].State().Set(worker.StateErrored)\n\t}\n\n\t_, err = p.Exec(&payload.Payload{Body: []byte(\"hello\")})\n\tassert.NoError(t, err)\n\tp.Destroy(ctx)\n}", "func (p *WorkerPool[T, R]) Release() {\n\tif p.resChan != nil {\n\t\tclose(p.resChan)\n\t}\n}", "func (p *connPool) gc() {\n\tp.openMu.Lock()\n\tdefer p.openMu.Unlock()\n\n\tp.mapMu.Lock()\n\tdefer p.mapMu.Unlock()\n\n\tvar activeRefs int64\n\tfor params, conn := range p.conns {\n\t\t// We hold the openMu write lock, so no one is trying to open a connection.\n\t\t// The only thing we might race with is callers decrementing the refCount,\n\t\t// which is fine. What matters is that no one will race to increment it,\n\t\t// which could reverse a decision we had already made to close the connection.\n\t\tconn.mu.Lock()\n\t\tactiveRefs += conn.refCount\n\t\tif conn.failed() {\n\t\t\t// The connection attempt failed, so remove it without trying to close it.\n\t\t\tdelete(p.conns, params)\n\t\t} else if conn.refCount <= 0 && time.Since(conn.lastOpened) > idleTTL {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"implementation\": params.Implementation,\n\t\t\t\t\"address\": params.Address,\n\t\t\t\t\"rootPath\": params.RootPath,\n\t\t\t}).Info(\"closing connection to Vitess topology server due to idle TTL\")\n\t\t\tdisconnects.WithLabelValues(reasonIdle).Inc()\n\n\t\t\tconn.Server.Close()\n\t\t\tdelete(p.conns, params)\n\t\t}\n\t\tconn.mu.Unlock()\n\t}\n\tconnCount.WithLabelValues(connStateActive).Set(float64(len(p.conns)))\n\tconnRefCount.WithLabelValues(connStateActive).Set(float64(activeRefs))\n\n\t// Clean up bad conns once they're no longer being used.\n\t// Make a list of bad conns that still have refs (we need to keep waiting).\n\tvar deadRefs int64\n\tstillUsed := make([]*Conn, 0, len(p.deadConns))\n\tfor _, conn := range p.deadConns {\n\t\tconn.mu.Lock()\n\t\tdeadRefs += conn.refCount\n\t\tif conn.refCount <= 0 {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"implementation\": conn.params.Implementation,\n\t\t\t\t\"address\": conn.params.Address,\n\t\t\t\t\"rootPath\": conn.params.RootPath,\n\t\t\t}).Info(\"closing connection to Vitess topology server due to liveness check failure\")\n\t\t\tdisconnects.WithLabelValues(reasonDead).Inc()\n\n\t\t\tconn.Server.Close()\n\t\t} else {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"implementation\": conn.params.Implementation,\n\t\t\t\t\"address\": conn.params.Address,\n\t\t\t\t\"rootPath\": conn.params.RootPath,\n\t\t\t}).Warning(\"cached connection to Vitess topology server failed liveness check but is still in use\")\n\n\t\t\tstillUsed = append(stillUsed, conn)\n\t\t}\n\t\tconn.mu.Unlock()\n\t}\n\tp.deadConns = stillUsed\n\tconnCount.WithLabelValues(connStateDead).Set(float64(len(p.deadConns)))\n\tconnRefCount.WithLabelValues(connStateDead).Set(float64(deadRefs))\n}", "func (p *Pool) Destroy() {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tp.factory = nil\n\tif p.conns == nil {\n\t\treturn\n\t}\n\n\tfor v := range p.conns {\n\t\tif v != nil {\n\t\t\tp.Close(v)\n\t\t}\n\t}\n\tp.conns = nil\n\n}", "func Test_Static_Pool_Slow_Destroy(t *testing.T) {\n\tp, err := Initialize(\n\t\tcontext.Background(),\n\t\tfunc() *exec.Cmd { return exec.Command(\"php\", \"../tests/slow-destroy.php\", \"echo\", \"pipes\") },\n\t\tpipe.NewPipeFactory(),\n\t\t&Config{\n\t\t\tNumWorkers: 5,\n\t\t\tAllocateTimeout: time.Second,\n\t\t\tDestroyTimeout: time.Second,\n\t\t},\n\t)\n\n\tassert.NoError(t, err)\n\tassert.NotNil(t, p)\n\n\tp.Destroy(context.Background())\n}", "func (pool *Pool) Close() {\n\tpool.mutex.Lock()\n\tpool.freelist = nil\n\tpool.mutex.Unlock()\n}", "func (p *Pool) Close(){\n p.availablePool[con]=p.usedPool[con]\n\tdelete(p.usedPool,con)\n}", "func (p *Pool) Release() {\n\tp.dispatcher.stop <- true\n\t<-p.dispatcher.stop\n}", "func registerPoolCleanup(cleanup func()) {\n\t// Ignore.\n}", "func (s *MockManagedThread) Release() {}", "func (c *channelPool) Release() {\n\tc.mu.Lock()\n\tfor _, servConn := range c.servConnsMap {\n\t\tfor ic := range servConn.idleConns {\n\t\t\tic.connWrap.CloseConnWrap()\n\t\t}\n\t\tclose(servConn.idleConns)\n\t\tservConn.openingConnNum = 0\n\t}\n\n\tc.servConnsMap = nil\n\tc.servAddrList = nil\n\n\tc.mu.Unlock()\n}", "func (p *connPool) Purge() {\n\tdpiPool := p.dpiPool\n\tp.dpiPool = nil\n\tif dpiPool != nil {\n\t\tC.dpiPool_close(dpiPool, C.DPI_MODE_POOL_CLOSE_FORCE)\n\t}\n}", "func TestFull(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tt.Cleanup(cancel)\n\tp := NewPool(ctx)\n\tvar wg sync.WaitGroup\n\t// signal channel to cause the first group of workers to\n\t// release their leases.\n\tg1done := make(chan struct{})\n\t// timeout channel indicating all of group one should\n\t// have acquired their leases.\n\tg1timeout := make(chan struct{})\n\tgo func() {\n\t\ttime.Sleep(time.Millisecond * 500)\n\t\tclose(g1timeout)\n\t}()\n\tp.Set(200)\n\t// spawn first group of workers.\n\tfor i := 0; i < 200; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase l := <-p.Acquire():\n\t\t\t\t<-g1done\n\t\t\t\tl.Release()\n\t\t\tcase <-g1timeout:\n\t\t\t\tt.Errorf(\"Timeout waiting for lease\")\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\t<-g1timeout\n\t// no additional leases should exist\n\tselect {\n\tcase l := <-p.Acquire():\n\t\tt.Errorf(\"unexpected lease: %+v\", l)\n\tdefault:\n\t}\n\t// spawn a second group of workers that won't be able to\n\t// acquire their leases until the first group is done.\n\tfor i := 0; i < 200; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-p.Acquire():\n\t\t\t\t// leak deliberately\n\t\t\tcase <-time.After(time.Millisecond * 512):\n\t\t\t\tt.Errorf(\"Timeout waiting for lease\")\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\t// signal first group is done\n\tclose(g1done)\n\t// wait for second group to acquire leases.\n\twg.Wait()\n\t// no additional leases should exist\n\tselect {\n\tcase l := <-p.Acquire():\n\t\tcounts := l.loadCounts()\n\t\tt.Errorf(\"unexpected lease grant: %+v, counts=%+v\", l, counts)\n\tcase <-time.After(time.Millisecond * 128):\n\t}\n\t// make one additional lease available\n\tp.Set(201)\n\tselect {\n\tcase l := <-p.Acquire():\n\t\tl.Release()\n\tcase <-time.After(time.Millisecond * 128):\n\t\tt.Errorf(\"timeout waiting for lease grant\")\n\t}\n}", "func (*waitForBoundPVCsMeasurement) Dispose() {}", "func TestTreeWalkPoolBasic(t *testing.T) {\n\t// Create a treeWalkPool\n\ttw := NewTreeWalkPool(1 * time.Second)\n\n\t// Create sample params\n\tparams := listParams{\n\t\tbucket: \"test-bucket\",\n\t}\n\n\t// Add a treeWalk to the pool\n\tresultCh := make(chan TreeWalkResult)\n\tendWalkCh := make(chan struct{})\n\ttw.Set(params, resultCh, endWalkCh)\n\n\t// Wait for treeWalkPool timeout to happen\n\t<-time.After(2 * time.Second)\n\tif c1, _ := tw.Release(params); c1 != nil {\n\t\tt.Error(\"treeWalk go-routine must have been freed\")\n\t}\n\n\t// Add the treeWalk back to the pool\n\ttw.Set(params, resultCh, endWalkCh)\n\n\t// Release the treeWalk before timeout\n\tselect {\n\tcase <-time.After(1 * time.Second):\n\t\tbreak\n\tdefault:\n\t\tif c1, _ := tw.Release(params); c1 == nil {\n\t\t\tt.Error(\"treeWalk go-routine got freed before timeout\")\n\t\t}\n\t}\n}", "func TestPool(t *testing.T) {\n\n\tvar res []WaitFunc\n\n\tpool := NewLimited(4)\n\tdefer pool.Close()\n\n\tnewFunc := func(d time.Duration) WorkFunc {\n\t\treturn func(context.Context) (interface{}, error) {\n\t\t\ttime.Sleep(d)\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\n\treportCount := int64(0)\n\treport := func(v interface{}, err error) {\n\t\tatomic.AddInt64(&reportCount, 1)\n\t}\n\n\tfor i := 0; i < 4; i++ {\n\t\twu := pool.Queue(context.Background(), newFunc(time.Second*1), report)\n\t\tres = append(res, wu)\n\t}\n\n\tvar count int\n\n\tfor i, wu := range res {\n\t\tfmt.Println(i)\n\t\tv, e := wu()\n\t\trequire.Equal(t, e, nil)\n\t\trequire.Equal(t, v, nil)\n\t\tcount++\n\t}\n\n\trequire.Equal(t, count, 4)\n\trequire.Equal(t, reportCount, int64(4))\n\n\tpool.Close() // testing no error occurs as Close will be called twice once defer pool.Close() fires\n}", "func TestPoolDelete(t *testing.T) {\n\tinitDone := make(chan struct{})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t\tmkPool(poolBUID, \"pool-b\", []string{\"10.0.20.0/24\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t},\n\t\t},\n\t)\n\n\tvar allocPool string\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif strings.HasPrefix(svc.Status.LoadBalancer.Ingress[0].IP, \"10.0.10\") {\n\t\t\tallocPool = \"pool-a\"\n\t\t} else {\n\t\t\tallocPool = \"pool-b\"\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\t<-initDone\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif strings.HasPrefix(svc.Status.LoadBalancer.Ingress[0].IP, \"10.0.10\") {\n\t\t\tif allocPool == \"pool-a\" {\n\t\t\t\tt.Error(\"New IP was allocated from deleted pool\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else {\n\t\t\tif allocPool == \"pool-b\" {\n\t\t\t\tt.Error(\"New IP was allocated from deleted pool\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\terr := fixture.poolClient.Delete(context.Background(), allocPool, meta_v1.DeleteOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (rp *resourcePool) Close() {\n\trp.Lock()\n\tdefer rp.Unlock()\n\n\t// Clear the resources in the pool.\n\tfor ; rp.start != nil; rp.start = rp.start.next {\n\t\trp.closeFn(rp.start.value)\n\t\trp.totalSize--\n\t}\n\tatomic.StoreUint64(&rp.size, 0)\n\trp.end = nil\n\n\t// Stop the maintenance timer. If it's already fired, a call to Maintain might be waiting for the lock to be\n\t// released, so we set closed to make that call a no-op.\n\trp.closed = true\n\t_ = rp.maintainTimer.Stop()\n}", "func TestPools(t *testing.T) {\n\ttc := testutil.SystemTest(t)\n\tbuf := &bytes.Buffer{}\n\n\t// Tests\n\n\t// Get the pool.\n\ttestutil.Retry(t, 3, 2*time.Second, func(r *testutil.R) {\n\t\tpoolName := fmt.Sprintf(\"projects/%s/locations/%s/pools/%s\", tc.ProjectID, location, poolID)\n\t\tif err := getPool(buf, tc.ProjectID, location, poolID); err != nil {\n\t\t\tr.Errorf(\"getPool got err: %v\", err)\n\t\t}\n\t\tif got := buf.String(); !strings.Contains(got, poolName) {\n\t\t\tr.Errorf(\"getPool got\\n----\\n%v\\n----\\nWant to contain:\\n----\\n%v\\n----\\n\", got, poolName)\n\t\t}\n\t})\n\tbuf.Reset()\n\n\t// Update an existing pool. Set the updated peer network to \"\", which\n\t// is the same as the default otherwise the test will take a long time\n\t// to complete.\n\ttestutil.Retry(t, 3, 2*time.Second, func(r *testutil.R) {\n\t\tpoolName := fmt.Sprintf(\"projects/%s/locations/%s/pools/%s\", tc.ProjectID, location, poolID)\n\t\tif err := updatePool(buf, tc.ProjectID, location, poolID, \"\"); err != nil {\n\t\t\tr.Errorf(\"updatePool got err: %v\", err)\n\t\t}\n\t\tif got := buf.String(); !strings.Contains(got, poolName) {\n\t\t\tr.Errorf(\"updatePool got\\n----\\n%v\\n----\\nWant to contain:\\n----\\n%v\\n----\\n\", got, poolName)\n\t\t}\n\t})\n\tbuf.Reset()\n\tt.Logf(\"\\nTestPools() completed\\n\")\n}", "func (p *ResourcePool) destroy(wrapper *ResourceWrapper) {\n\n\t//you can destroy a resource if the pool is closed, no harm no foul\n\tp.resClose(wrapper.Resource)\n\tatomic.AddUint32(&p.open, ^uint32(0))\n\twrapper.p = nil\n}", "func (p *Pool) release() {\n\tif p.closed {\n\t\treturn\n\t}\n\tp.active--\n\tif p.cond != nil {\n\t\tp.cond.Signal()\n\t}\n\n}", "func TestPoolContext(t *testing.T) {\n\tctx, _ := context.WithTimeout(context.Background(), time.Second)\n\tdefer leaktest.CheckContext(ctx, t)()\n\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n}", "func NewPool(t mockConstructorTestingTNewPool) *Pool {\n\tmock := &Pool{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func Close() {\n\tpool.Close()\n}", "func (i *Instance) dispose() {\n\ti.pool.Close()\n}", "func (p *Pool) Close() error {\n\treturn p.cleanup()\n}", "func (p *Pool) Cleanup() {\n\tp.Stop()\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\tp.baseCancel()\n}", "func (ft *FacadeUnitTest) Test_PoolCacheRemoveHost(c *C) {\n\tft.setupMockDFSLocking()\n\n\tpc := NewPoolCacheEnv()\n\n\tft.hostStore.On(\"FindHostsWithPoolID\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]host.Host{pc.firstHost, pc.secondHost}, nil).Once()\n\n\tft.poolStore.On(\"GetResourcePools\", ft.ctx).\n\t\tReturn([]pool.ResourcePool{pc.resourcePool}, nil)\n\n\tft.serviceStore.On(\"GetServicesByPool\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]service.Service{pc.firstService, pc.secondService}, nil)\n\n\tft.serviceStore.On(\"GetServiceDetails\", ft.ctx, pc.firstService.ID).\n\t\tReturn(&service.ServiceDetails{\n\t\t\tID: pc.firstService.ID,\n\t\t\tRAMCommitment: pc.firstService.RAMCommitment,\n\t\t}, nil)\n\n\tft.hostStore.On(\"Get\", ft.ctx, host.HostKey(pc.secondHost.ID), mock.AnythingOfType(\"*host.Host\")).\n\t\tReturn(nil).\n\t\tRun(func(args mock.Arguments) {\n\t\t\t*args.Get(2).(*host.Host) = pc.secondHost\n\t\t})\n\n\tft.zzk.On(\"RemoveHost\", &pc.secondHost).Return(nil)\n\tft.zzk.On(\"UnregisterDfsClients\", []host.Host{pc.secondHost}).Return(nil)\n\n\tft.hostkeyStore.On(\"Delete\", ft.ctx, pc.secondHost.ID).Return(nil)\n\tft.hostStore.On(\"Delete\", ft.ctx, host.HostKey(pc.secondHost.ID)).Return(nil)\n\n\tpools, err := ft.Facade.GetReadPools(ft.ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(pools, Not(IsNil))\n\tc.Assert(len(pools), Equals, 1)\n\n\tp := pools[0]\n\n\tc.Assert(p.ID, Equals, pc.resourcePool.ID)\n\tc.Assert(p.CoreCapacity, Equals, 14)\n\tc.Assert(p.MemoryCapacity, Equals, uint64(22000))\n\n\terr = ft.Facade.RemoveHost(ft.ctx, pc.secondHost.ID)\n\tc.Assert(err, IsNil)\n\n\tft.hostStore.On(\"FindHostsWithPoolID\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]host.Host{pc.firstHost}, nil).Once()\n\n\tpools, err = ft.Facade.GetReadPools(ft.ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(pools, Not(IsNil))\n\tc.Assert(len(pools), Equals, 1)\n\n\tp = pools[0]\n\tc.Assert(p.ID, Equals, pc.resourcePool.ID)\n\tc.Assert(p.CoreCapacity, Equals, 6)\n\tc.Assert(p.MemoryCapacity, Equals, uint64(12000))\n}", "func ClosePool() error {\n\tif enable {\n\t\treturn pool.Close()\n\t}\n\treturn nil\n}", "func (s *WorkSuite) TestFull(c *check.C) {\n\tctx, cancel := context.WithCancel(context.TODO())\n\tdefer cancel()\n\tp := NewPool(ctx)\n\tkey := \"some-key\"\n\tvar wg sync.WaitGroup\n\t// signal channel to cause the first group of workers to\n\t// release their leases.\n\tg1done := make(chan struct{})\n\t// timeout channel indicating all of group one should\n\t// have acquired their leases.\n\tg1timeout := make(chan struct{})\n\tgo func() {\n\t\ttime.Sleep(time.Millisecond * 500)\n\t\tclose(g1timeout)\n\t}()\n\tp.Set(key, 200)\n\t// spawn first group of workers.\n\tfor i := 0; i < 200; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase l := <-p.Acquire():\n\t\t\t\t<-g1done\n\t\t\t\tl.Release()\n\t\t\tcase <-g1timeout:\n\t\t\t\tc.Errorf(\"Timeout waiting for lease\")\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\t<-g1timeout\n\t// no additional leases should exist\n\tselect {\n\tcase l := <-p.Acquire():\n\t\tc.Errorf(\"unexpected lease: %+v\", l)\n\tdefault:\n\t}\n\t// spawn a second group of workers that won't be able to\n\t// acquire their leases until the first group is done.\n\tfor i := 0; i < 200; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-p.Acquire():\n\t\t\t\t// leak deliberately\n\t\t\tcase <-time.After(time.Millisecond * 512):\n\t\t\t\tc.Errorf(\"Timeout waiting for lease\")\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\t// signal first group is done\n\tclose(g1done)\n\t// wait for second group to acquire leases.\n\twg.Wait()\n\t// no additional leases should exist\n\tselect {\n\tcase l := <-p.Acquire():\n\t\tcounts := l.loadCounts()\n\t\tc.Errorf(\"unexpected lease grant: %+v, counts=%+v\", l, counts)\n\tcase <-time.After(time.Millisecond * 128):\n\t}\n\t// make one additional lease available\n\tp.Set(key, 201)\n\tselect {\n\tcase l := <-p.Acquire():\n\t\tc.Assert(l.Key().(string), check.Equals, key)\n\t\tl.Release()\n\tcase <-time.After(time.Millisecond * 128):\n\t\tc.Errorf(\"timeout waiting for lease grant\")\n\t}\n}", "func TestPool(ctx context.Context, pool *redis.Pool) error {\n\tctx, cancel := context.WithCancel(ctx)\n\tconn, err := pool.GetContext(ctx)\n\tcancel()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"redis.TestPool: getting connection from pool failed\")\n\t}\n\n\t_, err = conn.Do(\"PING\")\n\t_ = conn.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"redis.TestPool: performing PING failed\")\n\t}\n\n\treturn nil\n}", "func (t *tOps) close() {\n\tt.bpool.Close()\n\tt.cache.Close()\n\tif t.bcache != nil {\n\t\tt.bcache.CloseWeak()\n\t}\n}", "func (s WorkerSnapshot) Release() {}", "func (pool *Pool) Close() {\n\tif pool.list != nil {\n\t\tC.zpool_list_close(pool.list)\n\t\tpool.list = nil\n\t}\n}", "func (p *Pool) Close() {\n\t// fine to loop through the buckets unlocked\n\t// locking happens at the bucket level\n\tfor b, _ := range p.BucketMap {\n\n\t\t// MB-33208 defer closing connection pools until the bucket is no longer used\n\t\tbucket := p.BucketMap[b]\n\t\tbucket.Lock()\n\t\tbucket.closed = true\n\t\tbucket.Unlock()\n\t}\n}", "func cleanUp() {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tsignal.Notify(c, syscall.SIGTERM)\n\tsignal.Notify(c, syscall.SIGKILL)\n\tgo func() {\n\t\t<-c\n\t\t_ = pool.Close()\n\t\tos.Exit(0)\n\t}()\n}", "func PoolCloseAll(pools []Pool) {\n\tfor _, p := range pools {\n\t\tp.Close()\n\t}\n}", "func (bcp *boltConnPool) Close() error {\n\tst := bcp.pool.Stats()\n\tif st.TotalConns != st.FreeConns {\n\n\t\tlog.Errorf(\n\t\t\t\"connection leaking detected: total_conns=%d free_conns=%d\",\n\t\t\tst.TotalConns, st.FreeConns,\n\t\t)\n\t}\n\treturn bcp.pool.Close()\n}", "func (p *connPool) Release(client *mcc.Client) {\n\t//reset connection deadlines\n\tconn := client.Hijack()\n\n\tconn.(net.Conn).SetReadDeadline(time.Date(1, time.January, 0, 0, 0, 0, 0, time.UTC))\n\tconn.(net.Conn).SetWriteDeadline(time.Date(1, time.January, 0, 0, 0, 0, 0, time.UTC))\n\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\tif p.clients != nil {\n\t\tselect {\n\t\tcase p.clients <- client:\n\t\t\treturn\n\t\tdefault:\n\t\t\t//the pool reaches its capacity, drop the client on the floor\n\t\t\tclient.Close()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (conductor *conductor) Release() {\n\tconductor.peersMutex.Lock()\n\tdefer conductor.peersMutex.Unlock()\n\tfor _, pc := range conductor.peers {\n\t\tglog.Infoln(\"Delete pc\")\n\t\tpc.Delete()\n\t\tdelete(conductor.peers, pc.(*peerConn).Pointer)\n\t}\n\tC.release_shared(conductor.shared)\n\tconductor.shared = nil\n}", "func (r *ResponsePool) Release(resp *Response) {\n\tresp.Reset()\n\tr.pool.Put(resp)\n}", "func (t *Tik) Release() {\n\tt.client.Close()\n}", "func TestPool(t *testing.T) {\n\n\t// All sub-tests to run. All of these tests will be run with a postgres\n\t// database and a bolt database.\n\ttests := map[string]func(*testing.T){\n\t\t\"testCSRFSecret\": testCSRFSecret,\n\t\t\"testLastPaymentInfo\": testLastPaymentInfo,\n\t\t\"testLastPaymentCreatedOn\": testLastPaymentCreatedOn,\n\t\t\"testPoolMode\": testPoolMode,\n\t\t\"testAcceptedWork\": testAcceptedWork,\n\t\t\"testAccount\": testAccount,\n\t\t\"testJob\": testJob,\n\t\t\"testDeleteJobsBeforeHeight\": testDeleteJobsBeforeHeight,\n\t\t\"testShares\": testShares,\n\t\t\"testPPSEligibleShares\": testPPSEligibleShares,\n\t\t\"testPPLNSEligibleShares\": testPPLNSEligibleShares,\n\t\t\"testPruneShares\": testPruneShares,\n\t\t\"testPayment\": testPayment,\n\t\t\"testPaymentAccessors\": testPaymentAccessors,\n\t\t\"testEndpoint\": testEndpoint,\n\t\t\"testClient\": testClient,\n\t\t\"testPaymentMgrPPS\": testPaymentMgrPPS,\n\t\t\"testPaymentMgrPPLNS\": testPaymentMgrPPLNS,\n\t\t\"testPaymentMgrMaturity\": testPaymentMgrMaturity,\n\t\t\"testPaymentMgrPayment\": testPaymentMgrPayment,\n\t\t\"testPaymentMgrDust\": testPaymentMgrDust,\n\t\t\"testChainState\": testChainState,\n\t\t\"testHub\": testHub,\n\t}\n\n\t// Run all tests with bolt DB.\n\tfor testName, test := range tests {\n\t\tboltDB, err := setupBoltDB()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"setupBoltDB error: %v\", err)\n\t\t}\n\n\t\tdb = boltDB\n\n\t\tt.Run(testName+\"_Bolt\", test)\n\n\t\terr = teardownBoltDB(boltDB, testDB)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"bolt teardown error: %v\", err)\n\t\t}\n\t}\n\n\t// Run all tests with postgres DB.\n\tfor testName, test := range tests {\n\t\tpostgresDB, err := setupPostgresDB()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"setupPostgresDB error: %v\", err)\n\t\t}\n\n\t\tdb = postgresDB\n\n\t\tt.Run(testName+\"_Postgres\", test)\n\n\t\terr = teardownPostgresDB(postgresDB)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"postgres teardown error: %v\", err)\n\t\t}\n\t}\n\n}", "func (p *DownloadPool) release(d *Downloader) {\n\tp.resource <- d // can never fail ...\n}", "func garbageCleaner() {\n\tfor _ = range time.Tick(1 * time.Minute) {\n\t\tstorage.Cleanup()\n\t}\n}", "func (pool Pool) Close() error {\n\treturn pool.Pool.Close()\n}", "func TestFreelist_release(t *testing.T) {\n\tf := newFreelist()\n\tf.free(100, &page{id: 12, overflow: 1})\n\tf.free(100, &page{id: 9})\n\tf.free(102, &page{id: 39})\n\tf.release(100)\n\tf.release(101)\n\tif exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) {\n\t\tt.Fatalf(\"exp=%v; got=%v\", exp, f.ids)\n\t}\n\n\tf.release(102)\n\tif exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) {\n\t\tt.Fatalf(\"exp=%v; got=%v\", exp, f.ids)\n\t}\n}", "func (p *Pool) Close() {\n\tclose(p.tasks)\n}", "func TestFinalize(t *testing.T) {\n\tnumGRBefore := runtime.NumGoroutine()\n\t// Create a set of 100 agreement components, and finalize them immediately\n\tfor i := 0; i < 100; i++ {\n\t\tc, _ := agreement.WireAgreement(50)\n\t\tc.FinalizeRound()\n\t}\n\n\t// Ensure we have freed up all of the resources associated with these components\n\tnumGRAfter := runtime.NumGoroutine()\n\t// We should have roughly the same amount of goroutines\n\tassert.InDelta(t, numGRBefore, numGRAfter, 10.0)\n}", "func ClosePool() error {\n\tif cfg.Disable {\n\t\treturn nil\n\t}\n\n\treturn pool.Close()\n}", "func GC(maxAge int) {\n\tmemoryMutex.Lock()\n\tdefer memoryMutex.Unlock()\n\n\tif allocatedSlabs > 0 {\n\t\tfor s := range slabs {\n\t\t\t//if slabs[s] != nil {\n\t\t\t//\tfmt.Printf(\"slab %d, free %d, total %d, touched %.2f sec\\n\", s, slabs[s].free, len(slabs[s].next), time.Since(slabs[s].touched).Seconds())\n\t\t\t//}\n\t\t\tif slabs[s] != nil && slabs[s].free == len(slabs[s].next) && time.Since(slabs[s].touched).Seconds() >= float64(maxAge) {\n\t\t\t\tdeallocateSlab(uint16(s))\n\t\t\t\truntime.GC()\n\t\t\t}\n\t\t}\n\t}\n}", "func (p *request) Release() {\n\tp.ctx = nil\n\tp.Entry = nil\n\tp.read = false\n\trequestPool.Put(p)\n}", "func TestAllocRunner_Destroy(t *testing.T) {\n\tci.Parallel(t)\n\n\t// Ensure task takes some time\n\talloc := mock.BatchAlloc()\n\ttask := alloc.Job.TaskGroups[0].Tasks[0]\n\ttask.Config[\"run_for\"] = \"10s\"\n\n\tconf, cleanup := testAllocRunnerConfig(t, alloc)\n\tdefer cleanup()\n\n\t// Use a MemDB to assert alloc state gets cleaned up\n\tconf.StateDB = state.NewMemDB(conf.Logger)\n\n\tar, err := NewAllocRunner(conf)\n\trequire.NoError(t, err)\n\tgo ar.Run()\n\n\t// Wait for alloc to be running\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tstate := ar.AllocState()\n\n\t\treturn state.ClientStatus == structs.AllocClientStatusRunning,\n\t\t\tfmt.Errorf(\"got client status %v; want running\", state.ClientStatus)\n\t}, func(err error) {\n\t\trequire.NoError(t, err)\n\t})\n\n\t// Assert state was stored\n\tls, ts, err := conf.StateDB.GetTaskRunnerState(alloc.ID, task.Name)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, ls)\n\trequire.NotNil(t, ts)\n\n\t// Now destroy\n\tar.Destroy()\n\n\tselect {\n\tcase <-ar.DestroyCh():\n\t\t// Destroyed properly!\n\tcase <-time.After(10 * time.Second):\n\t\trequire.Fail(t, \"timed out waiting for alloc to be destroyed\")\n\t}\n\n\t// Assert alloc is dead\n\tstate := ar.AllocState()\n\trequire.Equal(t, structs.AllocClientStatusComplete, state.ClientStatus)\n\n\t// Assert the state was cleaned\n\tls, ts, err = conf.StateDB.GetTaskRunnerState(alloc.ID, task.Name)\n\trequire.NoError(t, err)\n\trequire.Nil(t, ls)\n\trequire.Nil(t, ts)\n\n\t// Assert the alloc directory was cleaned\n\tif _, err := os.Stat(ar.allocDir.AllocDir); err == nil {\n\t\trequire.Fail(t, \"alloc dir still exists: %v\", ar.allocDir.AllocDir)\n\t} else if !os.IsNotExist(err) {\n\t\trequire.Failf(t, \"expected NotExist error\", \"found %v\", err)\n\t}\n}", "func TestDoubleFree(t *testing.T) {\n\tcl := NewTestClient(t)\n\tFreeTest(t, cl)\n\tFreeTest(t, cl)\n}", "func PoolDestroy(name string) error {\n\tcmd := &Cmd{}\n\treturn NvlistIoctl(zfsHandle.Fd(), ZFS_IOC_POOL_DESTROY, name, cmd, nil, nil, nil)\n}", "func (p *pool) close() {\n\tif p.closed {\n\t\treturn\n\t}\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tp.closed = true\n\tclose(p.readyChannel)\n\n\tfor connIndex := range p.connList {\n\t\tp.connList[connIndex].close()\n\t}\n\tp.connList = nil\n}", "func PoolDestroy(ctx context.Context, rpcClient UnaryInvoker, req *PoolDestroyReq) error {\n\treq.setRPC(func(ctx context.Context, conn *grpc.ClientConn) (proto.Message, error) {\n\t\treturn mgmtpb.NewMgmtSvcClient(conn).PoolDestroy(ctx, &mgmtpb.PoolDestroyReq{\n\t\t\tSys: req.getSystem(rpcClient),\n\t\t\tId: req.ID,\n\t\t\tForce: req.Force,\n\t\t})\n\t})\n\n\trpcClient.Debugf(\"Destroy DAOS pool request: %v\\n\", req)\n\tur, err := rpcClient.InvokeUnaryRPC(ctx, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmsResp, err := ur.getMSResponse()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"pool destroy failed\")\n\t}\n\trpcClient.Debugf(\"Destroy DAOS pool response: %s\\n\", msResp)\n\n\treturn nil\n}", "func (q *HTTP) Release() {\n\tq.HumanLabel = q.HumanLabel[:0]\n\tq.HumanDescription = q.HumanDescription[:0]\n\tq.id = 0\n\tq.Method = q.Method[:0]\n\tq.Path = q.Path[:0]\n\tq.Body = q.Body[:0]\n\tq.StartTimestamp = 0\n\tq.EndTimestamp = 0\n\n\tHTTPPool.Put(q)\n}", "func (cluster *mongoCluster) Release() {\n\tcluster.Lock()\n\tif cluster.references == 0 {\n\t\tpanic(\"cluster.Release() with references == 0\")\n\t}\n\tcluster.references--\n\tif cluster.references == 0 {\n\t\tfor _, server := range cluster.servers.Slice() {\n\t\t\tserver.Close()\n\t\t}\n\t}\n\tcluster.Unlock()\n}", "func (x *FzPool) Free() {\n\tif x != nil {\n\t\tC.free(unsafe.Pointer(x))\n\t}\n}", "func (c *ChannelPool) Close() {\n\tc.mu.Lock()\n\tconns := c.conns\n\tc.conns = nil\n\tc.factory = nil\n\tc.mu.Unlock()\n\n\tif conns == nil {\n\t\treturn\n\t}\n\n\tclose(conns)\n\tfor conn := range conns {\n\t\tconn.Close()\n\t}\n}", "func (p *Pool) Close() {\n\tp.SetSize(0)\n\tclose(p.reqChan)\n}", "func (cp *Pool) Close() {\n\tlog.Infof(\"connpool - started execution of Close\")\n\tp := cp.pool()\n\tlog.Infof(\"connpool - found the pool\")\n\tif p == nil {\n\t\tlog.Infof(\"connpool - pool is empty\")\n\t\treturn\n\t}\n\t// We should not hold the lock while calling Close\n\t// because it waits for connections to be returned.\n\tlog.Infof(\"connpool - calling close on the pool\")\n\tp.Close()\n\tlog.Infof(\"connpool - acquiring lock\")\n\tcp.mu.Lock()\n\tlog.Infof(\"connpool - acquired lock\")\n\tcp.connections.Close()\n\tcp.connections = nil\n\tcp.mu.Unlock()\n\tlog.Infof(\"connpool - closing dbaPool\")\n\tcp.dbaPool.Close()\n\tlog.Infof(\"connpool - finished execution of Close\")\n}", "func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) {\n\tci.Parallel(t)\n\n\tconst maxAllocs = 6\n\trequire := require.New(t)\n\n\tserver, serverAddr, cleanupS := testServer(t, nil)\n\tdefer cleanupS()\n\ttestutil.WaitForLeader(t, server.RPC)\n\n\tclient, cleanup := TestClient(t, func(c *config.Config) {\n\t\tc.GCMaxAllocs = maxAllocs\n\t\tc.GCDiskUsageThreshold = 100\n\t\tc.GCInodeUsageThreshold = 100\n\t\tc.GCParallelDestroys = 1\n\t\tc.GCInterval = time.Hour\n\t\tc.RPCHandler = server\n\t\tc.Servers = []string{serverAddr}\n\t\tc.ConsulConfig.ClientAutoJoin = new(bool)\n\t})\n\tdefer cleanup()\n\twaitTilNodeReady(client, t)\n\n\tjob := mock.Job()\n\tjob.TaskGroups[0].Count = 1\n\tjob.TaskGroups[0].Tasks[0].Driver = \"mock_driver\"\n\tjob.TaskGroups[0].Tasks[0].Config = map[string]interface{}{\n\t\t\"run_for\": \"30s\",\n\t}\n\n\tindex := uint64(98)\n\tnextIndex := func() uint64 {\n\t\tindex++\n\t\treturn index\n\t}\n\n\tupsertJobFn := func(server *nomad.Server, j *structs.Job) {\n\t\tstate := server.State()\n\t\trequire.NoError(state.UpsertJob(structs.MsgTypeTestSetup, nextIndex(), j))\n\t\trequire.NoError(state.UpsertJobSummary(nextIndex(), mock.JobSummary(j.ID)))\n\t}\n\n\t// Insert the Job\n\tupsertJobFn(server, job)\n\n\tupsertAllocFn := func(server *nomad.Server, a *structs.Allocation) {\n\t\tstate := server.State()\n\t\trequire.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(), []*structs.Allocation{a}))\n\t}\n\n\tupsertNewAllocFn := func(server *nomad.Server, j *structs.Job) *structs.Allocation {\n\t\talloc := mock.Alloc()\n\t\talloc.Job = j\n\t\talloc.JobID = j.ID\n\t\talloc.NodeID = client.NodeID()\n\n\t\tupsertAllocFn(server, alloc)\n\n\t\treturn alloc.Copy()\n\t}\n\n\tvar allocations []*structs.Allocation\n\n\t// Fill the node with allocations\n\tfor i := 0; i < maxAllocs; i++ {\n\t\tallocations = append(allocations, upsertNewAllocFn(server, job))\n\t}\n\n\t// Wait until the allocations are ready\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tar := len(client.getAllocRunners())\n\n\t\treturn ar == maxAllocs, fmt.Errorf(\"Expected %d allocs, got %d\", maxAllocs, ar)\n\t}, func(err error) {\n\t\tt.Fatalf(\"Allocs did not start: %v\", err)\n\t})\n\n\t// Mark the first three as terminal\n\tfor i := 0; i < 3; i++ {\n\t\tallocations[i].DesiredStatus = structs.AllocDesiredStatusStop\n\t\tupsertAllocFn(server, allocations[i].Copy())\n\t}\n\n\t// Wait until the allocations are stopped\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tar := client.getAllocRunners()\n\t\tstopped := 0\n\t\tfor _, r := range ar {\n\t\t\tif r.Alloc().TerminalStatus() {\n\t\t\t\tstopped++\n\t\t\t}\n\t\t}\n\n\t\treturn stopped == 3, fmt.Errorf(\"Expected %d terminal allocs, got %d\", 3, stopped)\n\t}, func(err error) {\n\t\tt.Fatalf(\"Allocs did not terminate: %v\", err)\n\t})\n\n\t// Upsert a new allocation\n\t// This does not get appended to `allocations` as we do not use them again.\n\tupsertNewAllocFn(server, job)\n\n\t// A single allocation should be GC'd\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tar := client.getAllocRunners()\n\t\tdestroyed := 0\n\t\tfor _, r := range ar {\n\t\t\tif r.IsDestroyed() {\n\t\t\t\tdestroyed++\n\t\t\t}\n\t\t}\n\n\t\treturn destroyed == 1, fmt.Errorf(\"Expected %d gc'd ars, got %d\", 1, destroyed)\n\t}, func(err error) {\n\t\tt.Fatalf(\"Allocs did not get GC'd: %v\", err)\n\t})\n\n\t// Upsert a new allocation\n\t// This does not get appended to `allocations` as we do not use them again.\n\tupsertNewAllocFn(server, job)\n\n\t// 2 allocations should be GC'd\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tar := client.getAllocRunners()\n\t\tdestroyed := 0\n\t\tfor _, r := range ar {\n\t\t\tif r.IsDestroyed() {\n\t\t\t\tdestroyed++\n\t\t\t}\n\t\t}\n\n\t\treturn destroyed == 2, fmt.Errorf(\"Expected %d gc'd ars, got %d\", 2, destroyed)\n\t}, func(err error) {\n\t\tt.Fatalf(\"Allocs did not get GC'd: %v\", err)\n\t})\n\n\t// check that all 8 get run eventually\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tar := client.getAllocRunners()\n\t\tif len(ar) != 8 {\n\t\t\treturn false, fmt.Errorf(\"expected 8 ARs, found %d: %v\", len(ar), ar)\n\t\t}\n\t\treturn true, nil\n\t}, func(err error) {\n\t\trequire.NoError(err)\n\t})\n}", "func (m *MockPool) ReleaseAndWait() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"ReleaseAndWait\")\n}", "func TestPoolDoDoesNotBlock(t *T) {\n\tsize := 10\n\trequestTimeout := 200 * time.Millisecond\n\tredialInterval := 100 * time.Millisecond\n\n\tconnFunc := PoolConnFunc(func(string, string) (Conn, error) {\n\t\treturn dial(DialTimeout(requestTimeout)), nil\n\t})\n\tpool := testPool(size,\n\t\tPoolOnEmptyCreateAfter(redialInterval),\n\t\tPoolPipelineWindow(0, 0),\n\t\tconnFunc,\n\t)\n\n\tassertPoolConns := func(exp int) {\n\t\tassert.Equal(t, exp, pool.NumAvailConns())\n\t}\n\tassertPoolConns(size)\n\n\tvar wg sync.WaitGroup\n\tvar timeExceeded uint32\n\n\t// here we try to imitate external requests which come one at a time\n\t// and exceed the number of connections in pool\n\tfor i := 0; i < 5*size; i++ {\n\t\twg.Add(1)\n\t\tgo func(i int) {\n\t\t\ttime.Sleep(time.Duration(i*10) * time.Millisecond)\n\n\t\t\ttimeStart := time.Now()\n\t\t\terr := pool.Do(WithConn(\"\", func(conn Conn) error {\n\t\t\t\ttime.Sleep(requestTimeout)\n\t\t\t\tconn.(*ioErrConn).lastIOErr = errors.New(\"i/o timeout\")\n\t\t\t\treturn nil\n\t\t\t}))\n\t\t\tassert.NoError(t, err)\n\n\t\t\tif time.Since(timeStart)-requestTimeout-redialInterval > 20*time.Millisecond {\n\t\t\t\tatomic.AddUint32(&timeExceeded, 1)\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\n\twg.Wait()\n\tassert.True(t, timeExceeded == 0)\n}", "func TestPoolGet(t *T) {\n\tgetBlock := func(p *Pool) (time.Duration, error) {\n\t\tstart := time.Now()\n\t\t_, err := p.get()\n\t\treturn time.Since(start), err\n\t}\n\n\t// this one is a bit weird, cause it would block infinitely if we let it\n\tt.Run(\"onEmptyWait\", func(t *T) {\n\t\tpool := testPool(1, PoolOnEmptyWait())\n\t\tconn, err := pool.get()\n\t\tassert.NoError(t, err)\n\n\t\tgo func() {\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t\tpool.put(conn)\n\t\t}()\n\t\ttook, err := getBlock(pool)\n\t\tassert.NoError(t, err)\n\t\tassert.True(t, took-2*time.Second < 20*time.Millisecond)\n\t})\n\n\t// the rest are pretty straightforward\n\tgen := func(mkOpt func(time.Duration) PoolOpt, d time.Duration, expErr error) func(*T) {\n\t\treturn func(t *T) {\n\t\t\tpool := testPool(0, PoolOnFullClose(), mkOpt(d))\n\t\t\ttook, err := getBlock(pool)\n\t\t\tassert.Equal(t, expErr, err)\n\t\t\tassert.True(t, took-d < 20*time.Millisecond)\n\t\t}\n\t}\n\n\tt.Run(\"onEmptyCreate\", gen(PoolOnEmptyCreateAfter, 0, nil))\n\tt.Run(\"onEmptyCreateAfter\", gen(PoolOnEmptyCreateAfter, 1*time.Second, nil))\n\tt.Run(\"onEmptyErr\", gen(PoolOnEmptyErrAfter, 0, ErrPoolEmpty))\n\tt.Run(\"onEmptyErrAfter\", gen(PoolOnEmptyErrAfter, 1*time.Second, ErrPoolEmpty))\n}", "func (p *Pools) Close() {\n\tfor _, pool := range p.pools {\n\t\tpool.close()\n\t}\n\tp.Flush(true)\n}", "func (p *unlimitedPool) Close() {\n\n\terr := &ErrPoolClosed{s: errClosed}\n\tp.closeWithError(err)\n}", "func TestSizedBufferPool(t *testing.T) {\n\tsize := 4\n\tcapacity := 1024\n\n\tbufPool := NewSizedBufferPool(size, capacity)\n\n\tb := bufPool.Get()\n\n\t// Check the cap before we use the buffer.\n\tif cap(b.Bytes()) != capacity {\n\t\tt.Fatalf(\"buffer capacity incorrect: got %v want %v\", cap(b.Bytes()),\n\t\t\tcapacity)\n\t}\n\n\t// Grow the buffer beyond our capacity and return it to the pool\n\tb.Grow(capacity * 3)\n\tbufPool.Put(b)\n\n\t// Add some additional buffers to fill up the pool.\n\tfor i := 0; i < size; i++ {\n\t\tbufPool.Put(bytes.NewBuffer(make([]byte, 0, bufPool.alloc*2)))\n\t}\n\n\t// Check that oversized buffers are being replaced.\n\tif len(bufPool.pool) < size {\n\t\tt.Fatalf(\"buffer pool too small: got %v want %v\", len(bufPool.pool), size)\n\t}\n\n\t// Close the channel so we can iterate over it.\n\tclose(bufPool.pool)\n\n\t// Check that there are buffers of the correct capacity in the pool.\n\tfor buffer := range bufPool.pool {\n\t\tif cap(buffer.Bytes()) != bufPool.alloc {\n\t\t\tt.Fatalf(\"returned buffers wrong capacity: got %v want %v\",\n\t\t\t\tcap(buffer.Bytes()), capacity)\n\t\t}\n\t}\n}", "func GC()", "func (cop *ConnPool) close() {\n\tif cop.closed {\n\t\treturn\n\t}\n\tcop.closed = true\n\n\t// wait for connection all closed\n\tclose(cop.connDelayCloseCh)\n\t<-cop.connDelayClosed\n\n\tfor i, cha := range cop.idleChas {\n\t\tcha.close()\n\t\tcop.idleChas[i] = nil\n\t}\n\tcop.idleChas = nil\n\n\tfor i, conn := range cop.conns {\n\t\tconn.close(true)\n\t\tcop.conns[i] = nil\n\t}\n\tcop.conns = nil\n}", "func (bp *BrokerPool) Close() {\n\tbp.lock.Lock()\n\tdefer bp.lock.Unlock()\n\n\t// Closing the shared chan will trigger a cascading teardown procedure for\n\t// brokers and their clients.\n\tclose(bp.ConnectionsChan)\n\tbp.ConnectionsChan = nil\n}", "func (p *ResourcePool) Close() {\n\n\tp.fMutex.Lock()\n\tdefer p.fMutex.Unlock()\n\n\tp.closed = true\n\n\tfor {\n\t\tselect {\n\t\tcase resource := <-p.resources:\n\t\t\tp.resClose(resource.Resource)\n\t\t\tatomic.AddUint32(&p.nAvailable, ^uint32(0))\n\t\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\tdefault:\n\t\t\tclose(p.resources)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (p *Pool) Close() {\n\tp.stopOnce.Do(func() {\n\t\tclose(p.stopCh)\n\n\t\tvar conn *redis.Client\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase conn = <-p.pool:\n\t\t\t\tconn.Close()\n\t\t\tdefault:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n}", "func (c *Ci) Close() {\n\tc.pool.Close()\n}", "func TestGoroutineLeaks_OnSystemShutdown(t *testing.T) {\n\n\tbefore, _ := os.Create(\"/tmp/gorou-shutdown-before.out\")\n\tdefer before.Close()\n\tafter, _ := os.Create(\"/tmp/gorou-shutdown-after.out\")\n\tdefer after.Close()\n\n\tnumGoroutineBefore := runtime.NumGoroutine()\n\tpprof.Lookup(\"goroutine\").WriteTo(before, 1)\n\n\tt.Run(\"TestCreateGazillionTransactionsWhileTransportIsDuplicatingRandomMessages\", TestCreateGazillionTransactionsWhileTransportIsDuplicatingRandomMessages)\n\tt.Run(\"TestCreateGazillionTransactionsWhileTransportIsDroppingRandomMessages\", TestCreateGazillionTransactionsWhileTransportIsDroppingRandomMessages)\n\tt.Run(\"TestCreateGazillionTransactionsWhileTransportIsDelayingRandomMessages\", TestCreateGazillionTransactionsWhileTransportIsDelayingRandomMessages)\n\n\ttime.Sleep(100 * time.Millisecond) // give goroutines time to terminate\n\truntime.GC()\n\ttime.Sleep(100 * time.Millisecond) // give goroutines time to terminate\n\n\tnumGoroutineAfter := runtime.NumGoroutine()\n\tpprof.Lookup(\"goroutine\").WriteTo(after, 1)\n\n\trequire.Equal(t, numGoroutineBefore, numGoroutineAfter, \"number of goroutines should be equal, compare /tmp/gorou-shutdown-before.out and /tmp/gorou-shutdown-after.out to see stack traces of the leaks\")\n}", "func (m *TrxMgr) shrinkPoolMemories() {\n\tif atomic.LoadUint64(&m.shrinkCounter) > sShrinkCountWaterMark {\n\t\tatomic.StoreUint64(&m.shrinkCounter, 0)\n\n\t\twaiting, fetched := make(map[string]*TrxEntry), make(map[string]*TrxEntry)\n\t\tfor k, e := range m.waiting {\n\t\t\twaiting[k] = e\n\t\t}\n\t\tfor k, e := range m.fetched {\n\t\t\tfetched[k] = e\n\t\t}\n\t\tm.waiting, m.fetched = waiting, fetched\n\t}\n}", "func (p *pool) release(ch *channel) error {\n\tif p.closed {\n\t\treturn errors.New(\"pool closed\")\n\t}\n\n\tselect {\n\tcase p.readyChannel <- ch:\n\t\treturn nil\n\tdefault:\n\t\treturn errors.New(\"ConnPool is full\")\n\t}\n}", "func (p *Pool) Close() error {\n\treturn <-p.close()\n}", "func (m *MultiConnPool) Close() {\n\tfor _, p := range m.Pools {\n\t\tp.Close()\n\t}\n}", "func (f *CPool) Close() error {\n\tif f.pool == nil {\n\t\treturn nil\n\t}\n\tC.fd_pool_free(f.pool)\n\tC.free(unsafe.Pointer(f.dialDomain))\n\tf.pool = nil\n\treturn nil\n}", "func (c *client) release(conn Conn, err error) {\n\tif err != nil {\n\t\tif err := conn.Close(); err != nil {\n\t\t\tc.logger.Printf(\"Could not close connection (%s)\", err.Error())\n\t\t}\n\n\t\tconn = nil\n\t}\n\n\tc.pool.Release(conn)\n}", "func (this *PoolTestSuite) TestNoInstanceOverlap() {\n\tmaxTotal := 5\n\tnumGoroutines := 100\n\tdelay := 1\n\titerations := 1000\n\tthis.pool.Config.MaxTotal = maxTotal\n\tthis.pool.Config.MaxIdle = maxTotal\n\tthis.pool.Config.TestOnBorrow = true\n\tthis.pool.Config.BlockWhenExhausted = true\n\tthis.pool.Config.MaxWaitMillis = int64(-1)\n\trunTestGoroutines(this.T(), numGoroutines, iterations, delay, this.pool)\n\tthis.Equal(0, this.pool.GetDestroyedByBorrowValidationCount())\n}", "func TestPackage(t *testing.T) {\n\tif testing.Short() {\n\t\treturn\n\t}\n\n\tss := servers{\n\t\t&server{address: \"127.0.0.1:28081\"},\n\t\t&server{address: \"127.0.0.1:28082\"},\n\t\t&server{address: \"127.0.0.1:28083\"},\n\t\t&server{address: \"127.0.0.1:28084\"},\n\t}\n\n\tc := &client{\n\t\td: &dialer{},\n\t\ts: (&scheduler{}).init(ss),\n\t\texit: make(chan struct{}),\n\t}\n\tc.pool, _ = connpool.New(c.d.dial, poolCapacity, poolCleanupPeriod)\n\n\tgo ss.run()\n\ttime.Sleep(time.Second) // Wait for servers have already been running.\n\tgo c.run()\n\n\ttimer := time.NewTimer(samplingPeriod)\n\n\tvar old = &stats{}\n\texec := func(old *stats) *stats {\n\t\ts := &stats{}\n\t\tss.sampling(s)\n\t\tc.sampling(s)\n\t\ts.assert(t)\n\n\t\tvar diff = &stats{}\n\t\t*diff = *s\n\n\t\tdiff.clientTotalReq -= old.clientTotalReq\n\t\tdiff.clientSuccReq -= old.clientSuccReq\n\t\tdiff.serverTotalReq -= old.serverTotalReq\n\t\tdiff.serverSuccReq -= old.serverSuccReq\n\n\t\tlogf(\"%s\\n%s\", \"statistics\", diff)\n\t\treturn s\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\told = exec(old)\n\t\t\ttimer.Reset(samplingPeriod)\n\t\tcase <-c.exit:\n\t\t\texec(old)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (ft *FacadeUnitTest) Test_PoolCacheEditPool(c *C) {\n\tft.setupMockDFSLocking()\n\n\tpc := NewPoolCacheEnv()\n\n\tft.hostStore.On(\"FindHostsWithPoolID\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]host.Host{pc.firstHost, pc.secondHost}, nil)\n\n\tft.poolStore.On(\"GetResourcePools\", ft.ctx).\n\t\tReturn([]pool.ResourcePool{pc.resourcePool}, nil).Once()\n\n\tft.serviceStore.On(\"GetServicesByPool\", ft.ctx, pc.resourcePool.ID).\n\t\tReturn([]service.Service{pc.firstService, pc.secondService}, nil)\n\n\tft.serviceStore.On(\"GetServiceDetails\", ft.ctx, pc.firstService.ID).\n\t\tReturn(&service.ServiceDetails{\n\t\t\tID: pc.firstService.ID,\n\t\t\tRAMCommitment: pc.firstService.RAMCommitment,\n\t\t}, nil)\n\n\tpools, err := ft.Facade.GetReadPools(ft.ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(pools, Not(IsNil))\n\tc.Assert(len(pools), Equals, 1)\n\n\tp := pools[0]\n\n\tc.Assert(p.ID, Equals, pc.resourcePool.ID)\n\tc.Assert(p.CreatedAt, TimeEqual, pc.resourcePool.CreatedAt)\n\tc.Assert(p.UpdatedAt, TimeEqual, pc.resourcePool.UpdatedAt)\n\tc.Assert(p.Permissions, Equals, pc.resourcePool.Permissions)\n\n\tpc.resourcePool.Permissions = pool.AdminAccess & pool.DFSAccess\n\n\tft.poolStore.On(\"GetResourcePools\", ft.ctx).\n\t\tReturn([]pool.ResourcePool{pc.resourcePool}, nil).Once()\n\n\tft.poolStore.On(\"Get\", ft.ctx, pool.Key(pc.resourcePool.ID), mock.AnythingOfType(\"*pool.ResourcePool\")).\n\t\tReturn(nil).\n\t\tRun(func(args mock.Arguments) {\n\t\t\t*args.Get(2).(*pool.ResourcePool) = pc.resourcePool\n\t\t})\n\n\tft.poolStore.On(\"Put\", ft.ctx, pool.Key(pc.resourcePool.ID), mock.AnythingOfType(\"*pool.ResourcePool\")).\n\t\tReturn(nil)\n\n\tft.zzk.On(\"UpdateResourcePool\", mock.AnythingOfType(\"*pool.ResourcePool\")).\n\t\tReturn(nil)\n\n\tft.Facade.UpdateResourcePool(ft.ctx, &pc.resourcePool)\n\n\t// GetReadPools should see that the cache is dirty, and update itself\n\tpools, err = ft.Facade.GetReadPools(ft.ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(pools, Not(IsNil))\n\tc.Assert(len(pools), Equals, 1)\n\n\tp = pools[0]\n\tc.Assert(p.ID, Equals, pc.resourcePool.ID)\n\tc.Assert(p.CreatedAt, TimeEqual, pc.resourcePool.CreatedAt)\n\tc.Assert(p.UpdatedAt, Not(TimeEqual), pc.resourcePool.UpdatedAt)\n\tc.Assert(p.Permissions, Equals, (pool.AdminAccess & pool.DFSAccess))\n}", "func (c *ClosablePool) Close(timeout time.Duration) error {\n\tstarted := time.Now()\n\n\ttiers := []int{}\n\tfor i := range c.closables {\n\t\ttiers = append(tiers, i)\n\t}\n\tsort.Ints(tiers)\n\n\tfor _, i := range tiers {\n\t\ttier := c.closables[i]\n\t\tfor j := range tier {\n\t\t\ttier[j].CloseAsync()\n\t\t}\n\t\tfor j := range tier {\n\t\t\tif err := tier[j].WaitForClose(timeout - time.Since(started)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tdelete(c.closables, i)\n\t}\n\treturn nil\n}", "func (tester *TransportTesting) cleanup() {\n\tblocks := *tester.blocks\n\tfor i := int32(0); i < blocks; i++ {\n\t\ttester.manager.transportLock.RUnlock()\n\t}\n}" ]
[ "0.79037666", "0.77005196", "0.7212689", "0.69314015", "0.68882775", "0.6821975", "0.677085", "0.67185074", "0.67125475", "0.6660586", "0.66495705", "0.6595156", "0.65770125", "0.6570085", "0.65700454", "0.65200526", "0.651361", "0.64882994", "0.6450801", "0.6413368", "0.6399594", "0.6370763", "0.63699496", "0.6368809", "0.6365865", "0.6345376", "0.63409895", "0.63407534", "0.63335663", "0.6303237", "0.6298715", "0.62864655", "0.62493604", "0.62127525", "0.62121934", "0.61903596", "0.61883664", "0.6165552", "0.6164692", "0.6148403", "0.61335", "0.6129027", "0.61222845", "0.61008686", "0.6073688", "0.6053317", "0.6051559", "0.60291004", "0.60266197", "0.60251564", "0.60235405", "0.59949696", "0.59937626", "0.5981015", "0.5969678", "0.59667295", "0.5965934", "0.59504366", "0.59439445", "0.5942454", "0.5932583", "0.59277284", "0.58933467", "0.5891967", "0.5891965", "0.5885961", "0.58820105", "0.5880099", "0.58766454", "0.58762866", "0.58704513", "0.5866486", "0.5865959", "0.58626235", "0.5861138", "0.58589387", "0.5853028", "0.5832293", "0.5811559", "0.5799879", "0.5790201", "0.5789719", "0.57896644", "0.5776578", "0.57764167", "0.5775896", "0.5772976", "0.5752351", "0.5751464", "0.574949", "0.5747975", "0.573729", "0.5734764", "0.57326347", "0.57324016", "0.5731291", "0.5723381", "0.57228905", "0.572031", "0.5709607" ]
0.79534227
0
Simulate object starvation in order to force Ps to steal objects from other Ps.
func BenchmarkPoolStarvation(b *testing.B) { var p Pool count := 100 // Reduce number of putted objects by 33 %. It creates objects starvation // that force P-local storage to steal objects from other Ps. countStarved := count - int(float32(count)*0.33) b.RunParallel(func(pb *testing.PB) { for pb.Next() { for b := 0; b < countStarved; b++ { p.Put(1) } for b := 0; b < count; b++ { p.Get() } } }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func SyncRuntimeDoSpin()", "func (this *PoolTestSuite) TestInvalidateFreesCapacity() {\n\tthis.pool.Config.MaxTotal = 2\n\tthis.pool.Config.MaxWaitMillis = 500\n\tthis.pool.Config.BlockWhenExhausted = true\n\t// Borrow an instance and hold if for 5 seconds\n\tch1 := waitTestGoroutine(this.pool, 5000)\n\t// Borrow another instance\n\tobj := this.NoErrorWithResult(this.pool.BorrowObject())\n\t// Launch another goroutine - will block, but fail in 500 ms\n\tch2 := waitTestGoroutine(this.pool, 100)\n\t// Invalidate the object borrowed by this goroutine - should allow goroutine2 to create\n\tsleep(20)\n\tthis.NoError(this.pool.InvalidateObject(obj))\n\tsleep(600) // Wait for goroutine2 to timeout\n\tresult2 := <-ch2\n\tclose(ch2)\n\tif result2.error != nil {\n\t\tthis.Fail(result2.error.Error())\n\t}\n\t<-ch1\n\tclose(ch1)\n}", "func ObjMutator(thsObj *gms.ThsObj, objHist *gms.ThsObjs, interval time.Duration) {\n\to := thsObj.Get()\n\to.O = o.O.RandMutate()\n\to.T = time.Now()\n\tobjHist.Add(o.O)\n\tthsObj.Set(o)\n\n\tc := time.Tick(interval)\n\tfor range c {\n\t\to := thsObj.Get()\n\t\to.O = o.O.RandMutate()\n\t\to.T = time.Now()\n\t\tobjHist.Add(o.O)\n\t\tthsObj.Set(o)\n\t}\n}", "func suspend() {}", "func (p philosopher) eat() {\r\n\tdefer eatWgroup.Done()\r\n\tfor j := 0; j < 3; j++ {\r\n\t\tp.leftFork.Lock()\r\n\t\tp.rightFork.Lock()\r\n\r\n\t\tsay(\"eating\", p.id)\r\n\t\ttime.Sleep(time.Second)\r\n\r\n\t\tp.rightFork.Unlock()\r\n\t\tp.leftFork.Unlock()\r\n\r\n\t\tsay(\"finished eating\", p.id)\r\n\t\ttime.Sleep(time.Second)\r\n\t}\r\n\r\n}", "func pollObstructionSwitch(obstructedPub chan<- ObstructedEvent) {\n\tprev := false\n\tfor {\n\t\ttime.Sleep(_pollRate)\n\t\tv := getObstruction()\n\t\tif v != prev {\n\t\t\tevt := ObstructedEvent{utils.ELEVATOR_ID, v}\n\t\t\tobstructedPub <- evt\n\t\t}\n\t\tprev = v\n\t}\n}", "func (s *System) spawn() {\n\tfor i := range s.threads {\n\t\tt := &s.threads[i]\n\t\tfor _, sr := range t.requests {\n\t\t\tstr := len(s.particles)\n\t\t\ts.particles.Resize(str + sr.Amount)\n\t\t\tfor i := 0; i < sr.Amount; i++ {\n\t\t\t\tr := sr.Rotation.Gen()\n\t\t\t\tvel := mat.Rad(sr.Spread.Gen()+sr.Dir, sr.Velocity.Gen())\n\t\t\t\tif sr.RotationRelativeToVelocity {\n\t\t\t\t\tr += vel.Angle()\n\t\t\t\t}\n\n\t\t\t\tp := &s.particles[i+str]\n\n\t\t\t\tp.Type = sr.Type\n\n\t\t\t\tp.vel = vel\n\t\t\t\tp.orig = sr.Pos\n\t\t\t\tp.pos = sr.Pos.Add(sr.Gen(sr.Dir))\n\n\t\t\t\tp.mask = sr.Mask.Mul(sr.Type.Mask.Gen())\n\n\t\t\t\tp.scl.X = sr.ScaleX.Gen()\n\t\t\t\tp.scl.Y = sr.ScaleY.Gen()\n\t\t\t\tp.livetime = 1 / sr.Livetime.Gen()\n\t\t\t\tp.twerk = sr.Twerk.Gen()\n\t\t\t\tp.rot = r\n\t\t\t\tp.progress = 0\n\n\t\t\t\tp.vertex = s.vertex\n\t\t\t\tp.indice = s.indice\n\n\t\t\t\ts.vertex += p.vertexes\n\t\t\t\ts.indice += p.indices\n\t\t\t}\n\t\t}\n\t\tt.requests = t.requests[:0]\n\t}\n}", "func (e *Engine) GoPonder() {\n\tif atomic.CompareAndSwapInt32(&e.running, 0, 1) {\n\t\te.startNow(true)\n\t}\n}", "func (s *State) simulateTick() {\n\tresponses := []newStateInfo{}\n\tdeadNodes := []*growthRoot{}\n\n\t// Tell each root to run until the next move operation\n\tfor root := range s.state.roots {\n\t\tif root.cache != nil {\n\t\t\tcontinue\n\t\t}\n\t\tch, ok := root.node.Update(s.mkWorldState(root))\n\t\tif !ok {\n\t\t\t// We have to remove the node later\n\t\t\tdeadNodes = append(deadNodes, root)\n\t\t\tcontinue\n\t\t}\n\n\t\tresponses = append(responses, newStateInfo{ch, root})\n\t}\n\n\tfor _, dead := range deadNodes {\n\t\tdelete(s.state.roots, dead)\n\t}\n\n\tfor r := range s.state.roots {\n\t\tif r.cache != nil {\n\t\t\ts.applyChanges(r, *r.cache)\n\t\t}\n\t}\n\n\tfor _, response := range responses {\n\t\tnewState, ok := <-response.ch\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\ts.applyChanges(response.root, newState)\n\t}\n\n\tspores := []spore{}\n\tfor _, p := range s.diff.Spores {\n\t\t// UpdateSpore returns true if it has planted the spore\n\t\tspawned := s.UpdateSpore(&p)\n\t\t// Unplanted spores are kept for next tick\n\t\tif spawned {\n\t\t\ts.plantRelease(p.SpeciesId)\n\t\t} else {\n\t\t\tspores = append(spores, p)\n\t\t}\n\t}\n\ts.diff.Spores = spores\n\n\tsurviving := make([]*Plant, 0, len(s.state.plants))\n\tfor _, p := range s.state.plants {\n\t\tdeltaEnergy := 0\n\t\tfactor := 1000.0\n\t\tfor _ = range p.tiles {\n\t\t\tdeltaEnergy += int(float64(2+p.Luck) * (factor / 1000))\n\t\t\tfactor = (factor * (.5))\n\t\t\t// logrus.Infof(\"factor %f\")\n\t\t}\n\n\t\tdeltaEnergy -= (p.Age * p.Age) / 10000\n\t\tp.Energy += deltaEnergy\n\n\t\t// logrus.Infof(\"delta energy: %d\", deltaEnergy)\n\t\tif p.Energy < 0 {\n\t\t\tif !p.terminal {\n\t\t\t\tfirstRoot := true\n\t\t\t\tfor r := range p.roots {\n\t\t\t\t\ts.HaltGrowth(r)\n\n\t\t\t\t\tif firstRoot {\n\t\t\t\t\t\ts.AddSpore(r.Loc, r.SpeciesId)\n\t\t\t\t\t\tfirstRoot = false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tp.roots = map[*growthRoot]struct{}{}\n\t\t\t\tp.terminal = true\n\t\t\t}\n\n\t\t\tp.markedTiles = make(map[Location]Location)\n\n\t\t\tfor _, t := range p.tiles {\n\t\t\t\tp.markedTiles[t] = t\n\t\t\t}\n\t\t\tfor _, t := range p.tiles {\n\t\t\t\tdelete(p.markedTiles, s.GetTile(t).Extra.Parent)\n\t\t\t}\n\t\t\tif len(p.markedTiles) != 0 {\n\n\t\t\t\tfor _, t := range p.markedTiles {\n\t\t\t\t\tif shouldSpawnSpore() {\n\t\t\t\t\t\ts.AddSpore(p.tiles[t.str()], p.SpeciesId)\n\t\t\t\t\t}\n\t\t\t\t\ts.SetTile(t, Tile{AirTile, nil})\n\t\t\t\t\tdelete(p.tiles, t.str())\n\t\t\t\t}\n\t\t\t\tsurviving = append(surviving, p)\n\n\t\t\t} else {\n\t\t\t\ts.plantRelease(p.SpeciesId)\n\t\t\t\tfor _, t := range p.tiles {\n\t\t\t\t\tif shouldSpawnSpore() {\n\t\t\t\t\t\ts.AddSpore(p.tiles[t.str()], p.SpeciesId)\n\t\t\t\t\t}\n\t\t\t\t\ts.SetTile(t, Tile{AirTile, nil})\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t} else {\n\t\t\tsurviving = append(surviving, p)\n\t\t}\n\t\tp.Age++\n\t}\n\n\ts.state.plants = surviving\n}", "func MWAIT() { ctx.MWAIT() }", "func gosched() { time.Sleep(1 * time.Millisecond) }", "func BenchmarkCreateGoroutinesSingle(b *testing.B) {\n\t// Since we are interested in stealing behavior, warm the scheduler to\n\t// get all the Ps running first.\n\twarmupScheduler(runtime.GOMAXPROCS(0))\n\tb.ResetTimer()\n\n\tvar wg sync.WaitGroup\n\twg.Add(b.N)\n\tfor i := 0; i < b.N; i++ {\n\t\tgo func() {\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n}", "func (s *MockManagedThread) SuspendUnsafe() {}", "func busySleep(dur time.Duration) {\n\tstart := time.Now()\n\tfor time.Now().Sub(start) < dur {\n\t}\n}", "func (this *RpcObject) Loop() {\n\tfor this.IsRun {\n\t\tstart := time.Now()\n\t\tthis.ExecuteEvent()\n\t\tdelta := MAX_SLEEP_TIME - time.Now().Sub(start)\n\t\tif delta > 0 {\n\t\t\ttime.Sleep(delta)\n\t\t} else {\n\t\t\truntime.Gosched()\n\t\t}\n\t}\n}", "func (s *SpySleeper) Sleep(){\n\ts.Calls++\n}", "func (s *SpySleeper) Sleep() {\n s.Calls++\n}", "func (s *System) Spawn() {\n\tif s.spawning {\n\t\tpanic(\"already spawning\")\n\t}\n\ts.clear()\n\ts.spawn()\n\ts.allocate()\n}", "func (self *PhysicsP2) SetSleepModeA(member int) {\n self.Object.Set(\"sleepMode\", member)\n}", "func TestVectorClockRace(t *testing.T) {\n\tvc := NewVectorClock(1, 2, 3, 4, 5)\n\tvar wg sync.WaitGroup\n\n\twg.Add(1)\n\tgo func(vc VectorClock) {\n\t\tdefer wg.Done()\n\t\tfor id := uint32(1); id < 10; id++ {\n\t\t\tfor i := 0; i < 10; i++ {\n\t\t\t\tvc.Follow(id, rand.Uint64())\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t}(vc)\n\n\twg.Add(1)\n\tgo func(vc VectorClock) {\n\t\tdefer wg.Done()\n\t\tfor i := 1; i < 100; i++ {\n\t\t\tclone := vc.Clone()\n\t\t\t_ = clone\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t}(vc)\n\n\twg.Wait()\n}", "func (d *specialDevice) mock(gear v1alpha1.DummySpecialDeviceGear, stop <-chan struct{}) {\n\tdefer runtime.HandleCrash(handler.NewPanicsCleanupSocketHandler(metadata.Endpoint))\n\n\td.log.Info(\"Mocking\")\n\tdefer func() {\n\t\td.log.Info(\"Finished mocking\")\n\t}()\n\n\tvar duration time.Duration\n\tswitch gear {\n\tcase v1alpha1.DummySpecialDeviceGearSlow:\n\t\tduration = 3 * time.Second\n\tcase v1alpha1.DummySpecialDeviceGearMiddle:\n\t\tduration = 2 * time.Second\n\tcase v1alpha1.DummySpecialDeviceGearFast:\n\t\tduration = 1 * time.Second\n\t}\n\tvar ticker = time.NewTicker(duration)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t}\n\n\t\td.Lock()\n\t\tfunc() {\n\t\t\tdefer d.Unlock()\n\n\t\t\tvar status = &d.instance.Status\n\t\t\tswitch status.Gear {\n\t\t\tcase v1alpha1.DummySpecialDeviceGearFast:\n\t\t\t\tif status.RotatingSpeed < 300 {\n\t\t\t\t\tstatus.RotatingSpeed++\n\t\t\t\t}\n\t\t\tcase v1alpha1.DummySpecialDeviceGearMiddle:\n\t\t\t\tif status.RotatingSpeed < 200 {\n\t\t\t\t\tstatus.RotatingSpeed++\n\t\t\t\t}\n\t\t\tcase v1alpha1.DummySpecialDeviceGearSlow:\n\t\t\t\tif status.RotatingSpeed < 100 {\n\t\t\t\t\tstatus.RotatingSpeed++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := d.sync(); err != nil {\n\t\t\t\td.log.Error(err, \"failed to sync\")\n\t\t\t}\n\t\t}()\n\n\t\tselect {\n\t\tcase <-stop:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t}\n}", "func (p Philosopher) dine(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tp.leftFork.Lock()\n\tp.rightFork.Lock()\n\n\tfmt.Println(p.id, \" is eating\")\n\t//used pause values to minimise.\n\ttime.Sleep(2 * time.Second)\n\tp.rightFork.Unlock()\n\tp.leftFork.Unlock()\n\n}", "func TestVolumeBindingStress(t *testing.T) {\n\tconfig := setup(t, \"volume-binding-stress\")\n\tdefer config.teardown()\n\n\t// Create enough PVs and PVCs for all the pods\n\tpvs := []*v1.PersistentVolume{}\n\tpvcs := []*v1.PersistentVolumeClaim{}\n\tfor i := 0; i < podLimit*volsPerPod; i++ {\n\t\tpv := makePV(t, fmt.Sprintf(\"pv-stress-%v\", i), classWait, \"\", \"\")\n\t\tpvc := makePVC(fmt.Sprintf(\"pvc-stress-%v\", i), config.ns, &classWait, \"\")\n\n\t\tif pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {\n\t\t\tt.Fatalf(\"Failed to create PersistentVolume %q: %v\", pv.Name, err)\n\t\t}\n\t\tif pvc, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(pvc); err != nil {\n\t\t\tt.Fatalf(\"Failed to create PersistentVolumeClaim %q: %v\", pvc.Name, err)\n\t\t}\n\n\t\tpvs = append(pvs, pv)\n\t\tpvcs = append(pvcs, pvc)\n\t}\n\n\tpods := []*v1.Pod{}\n\tfor i := 0; i < podLimit; i++ {\n\t\t// Generate string of all the PVCs for the pod\n\t\tpodPvcs := []string{}\n\t\tfor j := i * volsPerPod; j < (i+1)*volsPerPod; j++ {\n\t\t\tpodPvcs = append(podPvcs, pvcs[j].Name)\n\t\t}\n\n\t\tpod := makePod(fmt.Sprintf(\"pod%v\", i), config.ns, podPvcs)\n\t\tif pod, err := config.client.CoreV1().Pods(config.ns).Create(pod); err != nil {\n\t\t\tt.Fatalf(\"Failed to create Pod %q: %v\", pod.Name, err)\n\t\t}\n\t\tpods = append(pods, pod)\n\t}\n\n\t// Validate Pods scheduled\n\tfor _, pod := range pods {\n\t\tif err := waitForPodToSchedule(config.client, pod); err != nil {\n\t\t\tt.Errorf(\"Failed to schedule Pod %q: %v\", pod.Name, err)\n\t\t}\n\t}\n\n\t// Validate PVC/PV binding\n\tfor _, pvc := range pvcs {\n\t\tvalidatePVCPhase(t, config.client, pvc, v1.ClaimBound)\n\t}\n\tfor _, pv := range pvs {\n\t\tvalidatePVPhase(t, config.client, pv, v1.VolumeBound)\n\t}\n\n\t// TODO: validate events on Pods and PVCs\n}", "func freezetheworld() {\n\tatomic.Store(&freezing, 1)\n\t// stopwait and preemption requests can be lost\n\t// due to races with concurrently executing threads,\n\t// so try several times\n\tfor i := 0; i < 5; i++ {\n\t\t// this should tell the scheduler to not start any new goroutines\n\t\tsched.stopwait = freezeStopWait\n\t\tatomic.Store(&sched.gcwaiting, 1)\n\t\t// this should stop running goroutines\n\t\tif !preemptall() {\n\t\t\tbreak // no running goroutines\n\t\t}\n\t\tusleep(1000)\n\t}\n\t// to be sure\n\tusleep(1000)\n\tpreemptall()\n\tusleep(1000)\n}", "func stress(apibaseurl, apiport, apipath string, peakhammerpause time.Duration) {\n\ttime.Sleep(1 * time.Second)\n\tep := fmt.Sprintf(\"%v:%v%v\", apibaseurl, apiport, apipath)\n\tlog.Printf(\"Starting to hammer %v every %v\", ep, peakhammerpause)\n\tfor {\n\t\t_, err := http.Get(ep)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\ttime.Sleep(peakhammerpause)\n\t}\n}", "func KeepAlive(x interface{})", "func spawnInALoop() {\n\tfor i := 0; i < 10; i++ {\n\t\t// Our goroutine is a closure, and closures can\n\t\t// access variables in the outer scope, so we can\n\t\t// grab i here to give each goroutine an ID, right?\n\t\t// (Hint: wrong.)\n\t\tgo func() {\n\t\t\tfmt.Println(\"Goroutine\", i)\n\t\t}() // <- Don't forget to call () the closure\n\t}\n\ttime.Sleep(100 * time.Millisecond)\n}", "func TestRPCRatelimit(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tg1 := newNamedTestingGateway(t, \"1\")\n\tdefer g1.Close()\n\tg2 := newNamedTestingGateway(t, \"2\")\n\tdefer g2.Close()\n\n\tvar atomicCalls, atomicErrs uint64\n\tg2.RegisterRPC(\"recv\", func(conn modules.PeerConn) error {\n\t\t_, err := conn.Write([]byte(\"hi\"))\n\t\tif err != nil {\n\t\t\tatomic.AddUint64(&atomicErrs, 1)\n\t\t\treturn err\n\t\t}\n\t\tatomic.AddUint64(&atomicCalls, 1)\n\t\treturn nil\n\t})\n\n\terr := g1.Connect(g2.Address())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Block until the connection is confirmed.\n\tfor i := 0; i < 50; i++ {\n\t\ttime.Sleep(10 * time.Millisecond)\n\t\tg1.mu.Lock()\n\t\tg1Peers := len(g1.peers)\n\t\tg1.mu.Unlock()\n\t\tg2.mu.Lock()\n\t\tg2Peers := len(g2.peers)\n\t\tg2.mu.Unlock()\n\t\tif g1Peers > 0 || g2Peers > 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\tg1.mu.Lock()\n\tg1Peers := len(g1.peers)\n\tg1.mu.Unlock()\n\tg2.mu.Lock()\n\tg2Peers := len(g2.peers)\n\tg2.mu.Unlock()\n\tif g1Peers == 0 || g2Peers == 0 {\n\t\tt.Fatal(\"Peers did not connect to eachother\")\n\t}\n\n\t// Call \"recv\" in a tight loop. Check that the number of successful calls\n\t// does not exceed the ratelimit.\n\tstart := time.Now()\n\tvar wg sync.WaitGroup\n\ttargetDuration := rpcStdDeadline * 4 / 3\n\tmaxCallsForDuration := targetDuration / peerRPCDelay\n\tcallVolume := int(maxCallsForDuration * 3 / 5)\n\tfor i := 0; i < callVolume; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t// Call an RPC on our peer. Error is ignored, as many are expected\n\t\t\t// and indicate that the test is working.\n\t\t\t_ = g1.RPC(g2.Address(), \"recv\", func(conn modules.PeerConn) error {\n\t\t\t\tbuf := make([]byte, 2)\n\t\t\t\t_, err := conn.Read(buf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif string(buf) != \"hi\" {\n\t\t\t\t\treturn errors.New(\"caller rpc failed\")\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}()\n\t\t// Sleep for a little bit so that the connections are coming all in a\n\t\t// row instead of all at once. But sleep for little enough time that the\n\t\t// number of connectings is still far surpassing the allowed ratelimit.\n\t\ttime.Sleep(peerRPCDelay / 10)\n\t}\n\twg.Wait()\n\n\tstop := time.Now()\n\telapsed := stop.Sub(start)\n\texpected := peerRPCDelay * (time.Duration(atomic.LoadUint64(&atomicCalls)) + 1)\n\tif elapsed*10/9 < expected {\n\t\tt.Error(\"ratelimit does not seem to be effective\", expected, elapsed)\n\t}\n}", "func GenerateObjects(c chan *engine.Object) {\n var o *engine.Object\n amt := 0\n for amt < insert {\n amt += 1\n x := rand.Intn(magic)\n y := rand.Intn(magic)\n if rand.Intn(100) > 98 {\n x = 1\n }\n if rand.Intn(100) > 98 {\n y = 1\n }\n o = &engine.Object{\n Pos: engine.Vertex{rand.Intn(w - magic), rand.Intn(h - magic)},\n Size: engine.Vertex{x, y}}\n\n c <- o\n }\n\n close(c)\n}", "func (gen *Gen) sleep() {\n\tnext := gen.epochns + gen.bucket*BucketLen\n\tsleep := time.Duration(next - gen.now().UTC().UnixNano())\n\ttime.Sleep(sleep)\n}", "func (Stop) SP() *Object { panic(\"iolang: a Stop is not an Object!\") }", "func refused(t *testing.T, primary *proxy, syncer *metasyncer) ([]transportData, []transportData) {\n\tvar (\n\t\tch = make(chan transportData, 2) // NOTE: Use 2 to avoid unbuffered channel, http handler can return.\n\t\tid = \"p\"\n\t\taddrInfo = *meta.NewNetInfo(\n\t\t\thttpProto,\n\t\t\t\"127.0.0.1\",\n\t\t\t\"53538\", // the lucky port\n\t\t)\n\t)\n\n\t// handler for /v1/metasync\n\thttp.HandleFunc(apc.URLPathMetasync.S, func(w http.ResponseWriter, r *http.Request) {\n\t\tch <- transportData{true, id, 1}\n\t})\n\n\tclone := primary.owner.smap.get().clone()\n\tclone.Pmap[id] = meta.NewSnode(id, apc.Proxy, addrInfo, addrInfo, addrInfo)\n\tclone.Version++\n\tprimary.owner.smap.put(clone)\n\n\t// function shared between the two cases: start proxy, wait for a sync call\n\tf := func() {\n\t\ttimer := time.NewTimer(time.Minute)\n\t\tdefer timer.Stop()\n\n\t\twg := &sync.WaitGroup{}\n\t\ts := &http.Server{Addr: addrInfo.String()}\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\ts.ListenAndServe()\n\t\t}()\n\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\tt.Log(\"timed out\")\n\t\tcase <-ch:\n\t\t}\n\n\t\ts.Close()\n\t\twg.Wait()\n\t}\n\n\t// testcase #1: short delay\n\tsmap := primary.owner.smap.get()\n\tmsg := primary.newAmsgStr(\"\", nil)\n\tsyncer.sync(revsPair{smap, msg})\n\ttime.Sleep(time.Millisecond)\n\t// sync will return even though the sync actually failed, and there is no error return\n\tf()\n\n\t// testcase #2: long delay\n\tctx := &smapModifier{\n\t\tpre: func(_ *smapModifier, clone *smapX) error {\n\t\t\tclone.Version++\n\t\t\treturn nil\n\t\t},\n\t\tfinal: func(_ *smapModifier, clone *smapX) {\n\t\t\tmsg := primary.newAmsgStr(\"\", nil)\n\t\t\tsyncer.sync(revsPair{clone, msg})\n\t\t},\n\t}\n\tprimary.owner.smap.modify(ctx)\n\n\ttime.Sleep(2 * time.Second)\n\tf()\n\n\t// only cares if the sync call comes, no need to verify the id and cnt as we are the one\n\t// filling those in above\n\texp := []transportData{{true, id, 1}}\n\treturn exp, exp\n}", "func TransportWithProbe() {\n probes=0 \n if maxprobes == true {\n celestial = GetCachedCelestial(from)\n ships, _ = GetShips(celestial.GetID())\n Print(\"Found \" + ships.EspionageProbe + \" Spy probes \")\n probes = ships.EspionageProbe\n } else {\n probes = fixedprobes \n }\n\n for i = 1; i <= repeat; i++ {\n LogDebug(\"Sending \" + probes + \" spy Probes \" + i + \" of \" + repeat + \" times \" )\n \n fleet = NewFleet()\n fleet.SetOrigin(from)\n fleet.SetDestination(to)\n fleet.SetMission(TRANSPORT)\n \n if wantedresource == 1 { fleet.SetAllMetal() }\n if wantedresource == 2 { fleet.SetAllCrystal() }\n if wantedresource == 3 { fleet.SetAllDeuterium() }\n if wantedresource == 4 { fleet.SetAllResources() } \n \n fleet.AddShips(ESPIONAGEPROBE, probes)\n fleet, err = fleet.SendNow()\n LogDebug(\"Probes Fleet.ID/ERROR: \"+ fleet.ID + \"/\" + err + \"\\n waiting \" + waittime/1000 + \" seconds \")\n Sleep(waittime)\n \n \n RND = Random(5000, 10000)\n LogDebug (\"Returned, wait random delay of \" + RND/1000 + \" Seconds \")\n Sleep(RND) // Sleep between 5 and 10 seconds\n \n}\nLogDebug (\"Sent probes \"+ repeat + \" times\" )\nLogDebug (\"End of Script\" )\nExit()\n}", "func (s *spoolNode) spooled() {}", "func (px *Paxos) Start(seq int, v interface{}) {\n\t// run Paxos algorithm in a new thread(run the Paxos protocol concurrently)\n\t// play the role of proposer\n\n\t// Your code here.\n\n\tpx.clog(DBG_PROPOSER, \"Start\", \"Start seq=%d v=%v\", seq, v)\n\n\t// I'm Proposer\n\tgo func() {\n\t\tn := 0\n\t\tmax_reject_pnum := -1\n\t\tfor {\n\t\t\tif px.dead {\n\t\t\t\t// I'm dead\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif px.Lslots[seq].Decided {\n\t\t\t\t// locally decided, wouldn't send prepare and accept anymore\n\t\t\t\t// just propagate the decision\n\t\t\t\tpx.send_decided(seq, px.Lslots[seq].V)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif px.APp[seq]+1 > n {\n\t\t\t\tn = px.APp[seq] + 1\n\t\t\t} else {\n\t\t\t\tn++\n\t\t\t}\n\n\t\t\tif n < max_reject_pnum {\n\t\t\t\tn = max_reject_pnum + 1\n\t\t\t}\n\n\t\t\tpx.clog(DBG_PROPOSER, \"Start\", \"send prepare, seq=%d n=%d\", seq, n)\n\n\t\t\tprepare_ok, p := px.send_prepare(seq, n)\n\t\t\tif !prepare_ok {\n\t\t\t\tmax_reject_pnum = p.PNum\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tnew_p := Proposal{}\n\n\t\t\t// no proposal yet, use v\n\t\t\tif p.PNum == 0 {\n\t\t\t\tnew_p.Value = v\n\t\t\t} else {\n\t\t\t\tnew_p.Value = p.Value\n\t\t\t}\n\n\t\t\tnew_p.PNum = n\n\n\t\t\tpx.clog(DBG_PROPOSER, \"Start\", \"prepare OK, proposal=%v\", new_p)\n\n\t\t\taccept_ok := px.send_accept(seq, new_p)\n\t\t\tif !accept_ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tpx.clog(DBG_PROPOSER, \"Start\", \"accept OK\")\n\n\t\t\tpx.send_decided(seq, new_p.Value)\n\n\t\t\tpx.clog(DBG_PROPOSER, \"Start\", \"decided\")\n\t\t\tbreak\n\t\t}\n\t}()\n}", "func (s *slotted) Reserve() { atomic.AddInt32((*int32)(s), 1) }", "func philosopherPonderanceGoroutine(id int) {\n\tfor {\n\t\tif rand.Float64() < PHILSWITCHCHANCE {\n\t\t\tphilIn[id] <- 2\n\t\t\tisEating := <- philOut[id] == 1\n\t\t\t// Switch: Thinking <-> Eating.\n\t\t\tif isEating {\n\t\t\t\t// Drop forks and return to positing on the nature of the universe.\n\t\t\t\tphilIn[id] <- 1\n\t\t\t} else {\n\t\t\t\t// Attempt to begin eating. Return to postulating, if missing fork.\n\t\t\t\tphilIn[id] <- 0\n\t\t\t}\n\t\t\t<- philOut[id]\n\t\t}\n\t}\n}", "func aliveSpammer(outgoingMsg chan def.Message) {\n\talive := def.Message{Category: def.Alive, Floor: -1, Button: -1, Cost: -1}\n\tfor {\n\t\toutgoingMsg <- alive\n\t\ttime.Sleep(def.SpamInterval)\n\t}\n}", "func (m *TimeServiceManager) Spawn() TimeService {\n\tts := TimeService{\n\t\tDead: make(chan struct{}, 0),\n\t\tReqChan: make(chan Request, 10),\n\t\tAvgResponseTime: rand.Float64() * 3,\n\t}\n\tm.Instances = append(m.Instances, ts)\n\treturn ts\n}", "func (p philosopher) eat() {\n\tfor j := 0; j < numberOfCourses; j++ {\n\t\t//Pick up sticks means lock both mutex\n\t\tp.leftChopstick.Lock()\n\t\tp.rightChopstick.Lock()\n\t\t// Acknowledge the start\n\t\t//starting to eat <number>\n\t\tfmt.Printf(\"Starting to eat %d(%s) [%s] \\n\", p.id+1, nameOfPhilosophers[p.id], courses[j])\n\t\ttime.Sleep(time.Second)\n\t\t//Release mutex\n\t\tp.rightChopstick.Unlock()\n\t\tp.leftChopstick.Unlock()\n\t\t// Acknowledge the finish\n\t\t//finishing eating <number>\n\t\tfmt.Printf(\"finishing eating %d(%s) [%s] \\n\", p.id+1, nameOfPhilosophers[p.id], courses[j])\n\t\ttime.Sleep(time.Second)\n\t}\n\teatWaitingGroup.Done()\n}", "func (s *System) Wait() {\n\tif !s.spawning {\n\t\tpanic(\"spawner is asleep, call RunSpawner to wake him up\")\n\t}\n\t<-s.spawner.out\n\ts.allocate() // so we can draw meanwhile spawning particles\n\ts.spawning = false\n}", "func TestGracePeriod(t *testing.T) {\n\tt.Parallel()\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\teventBus := eventbus.NewBus()\n\tpstore := NewMockPeerstore(ctrl)\n\tconst gracePeriod = 250 * time.Millisecond\n\tman, err := pstoremanager.NewPeerstoreManager(pstore, eventBus, pstoremanager.WithGracePeriod(gracePeriod))\n\trequire.NoError(t, err)\n\tdefer man.Close()\n\tman.Start()\n\n\temitter, err := eventBus.Emitter(new(event.EvtPeerConnectednessChanged))\n\trequire.NoError(t, err)\n\tstart := time.Now()\n\tremoved := make(chan struct{})\n\tpstore.EXPECT().RemovePeer(peer.ID(\"foobar\")).DoAndReturn(func(p peer.ID) {\n\t\tdefer close(removed)\n\t\t// make sure the call happened after the grace period\n\t\trequire.GreaterOrEqual(t, time.Since(start), gracePeriod)\n\t\trequire.LessOrEqual(t, time.Since(start), 3*gracePeriod)\n\t})\n\trequire.NoError(t, emitter.Emit(event.EvtPeerConnectednessChanged{\n\t\tPeer: \"foobar\",\n\t\tConnectedness: network.NotConnected,\n\t}))\n\t<-removed\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n // Your code here.\n if seq < px.Min() {\n return\n }\n go func() {\n instance := px.getInstance(seq)\n instance.mu.Lock()\n defer instance.mu.Unlock()\n for !px.dead {\n if instance.decidedValue != nil {\n break\n }\n instance.proposer.highestSeenProposedNumber++\n instance.proposer.proposedNumber = instance.proposer.highestSeenProposedNumber\n ok, value := px.propose(instance, seq)\n if !ok {\n continue\n }\n if value != nil {\n v = value\n }\n if !px.requestAccept(instance, seq, v) {\n continue\n }\n px.decide(seq, v)\n break\n }\n }()\n}", "func TestPool(t *testing.T) {\n\tdefer leaktest.Check(t)()\n\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n}", "func busyloop() {\n\tfor {\n\t\tif *localWork {\n\t\t\tfor i := 0; i < 100*(1<<16); i++ {\n\t\t\t}\n\t\t}\n\t\tfoo1(100)\n\t\tfoo2(*skew)\n\t\t// Yield so that some preemption happens.\n\t\truntime.Gosched()\n\t}\n}", "func Test_Static_Pool_Destroy_And_Close_While_Wait(t *testing.T) {\n\tctx := context.Background()\n\tp, err := Initialize(\n\t\tctx,\n\t\tfunc() *exec.Cmd { return exec.Command(\"php\", \"../tests/client.php\", \"delay\", \"pipes\") },\n\t\tpipe.NewPipeFactory(),\n\t\t&Config{\n\t\t\tNumWorkers: 1,\n\t\t\tAllocateTimeout: time.Second,\n\t\t\tDestroyTimeout: time.Second,\n\t\t},\n\t)\n\n\tassert.NotNil(t, p)\n\tassert.NoError(t, err)\n\n\tgo func() {\n\t\t_, errP := p.Exec(&payload.Payload{Body: []byte(\"100\")})\n\t\tif errP != nil {\n\t\t\tt.Errorf(\"error executing payload: error %v\", err)\n\t\t}\n\t}()\n\ttime.Sleep(time.Millisecond * 100)\n\n\tp.Destroy(ctx)\n\t_, err = p.Exec(&payload.Payload{Body: []byte(\"100\")})\n\tassert.Error(t, err)\n}", "func TestMovement(t *testing.T) {\n\tspace := state.EmptySpace()\n\te1 := space.CreateEmpire()\n\te2 := space.CreateEmpire()\n\n\tp0 := space.CreatePlanet(e1)\n\tp1 := space.CreatePlanet(e2)\n\tp2 := space.CreatePlanet(e1)\n\tp3 := space.CreatePlanet(e2)\n\n\tp0.Connected = append(p0.Connected, p1)\n\tp0.Connected = append(p0.Connected, p2)\n\tp0.Control = 1\n\tp1.Connected = append(p1.Connected, p0)\n\tp1.Connected = append(p1.Connected, p2)\n\tp1.Connected = append(p1.Connected, p3)\n\tp1.Control = 1\n\tp2.Connected = append(p2.Connected, p0)\n\tp2.Connected = append(p2.Connected, p1)\n\tp2.Control = 1\n\tp2.Connected = append(p3.Connected, p1)\n\tp3.Control = 1\n\n\tfleet := space.CreateFleet(p0, e1)\n\tfleet.RangedSquads = 100\n\tfleet = space.CreateFleet(p2, e1)\n\tfleet.RangedSquads = 10\n\tfleet = space.CreateFleet(p2, e1)\n\tfleet.RangedSquads = 1\n\n\tstrat := Distributed{}\n\tstrat.Init(e1)\n\n\tlog := bytes.Buffer{}\n\n\tfor i:= 0; i < 10 ; i++ {\n\t\tfor i, planet := range space.Planets{\n\t\t\tfleets := 0\n\t\t\tships := 0\n\t\t\tfor _, fleet := range planet.Fleets{\n\t\t\t\tif fleet.Empire == e1 {\n\t\t\t\t\tfleets++\n\t\t\t\t\tships += fleet.Size()\n\t\t\t\t}\n\t\t\t}\n\t\t\t_, _ = fmt.Fprintln(&log, i, \"\\t\", len(planet.Fleets), \"\\t\", fleets, \"\\t\", ships, \"\\t\", planet.Control)\n\t\t}\n\t\t_, _ = fmt.Fprintln(&log,\"id \\t fleets \\t e fleets \\t e ships \\t control\")\n\n\t\tfor _, cmd := range strat.Commands(&space) {\n\t\t\tcmd.Execute(&space)\n\t\t}\n\t}\n\n\tif len(space.Fleets) != 2{\n\t\tt.Errorf(\"Fleets not merged\\n%s\", log.String())\n\t}\n\n\tif len(p0.Fleets) != 0{\n\t\tt.Errorf(\"Fleets still on own planet\\n%s\", log.String())\n\t}\n\n\tif len(p2.Fleets) != 0{\n\t\tt.Errorf(\"Fleets still on own planet\\n%s\", log.String())\n\t}\n}", "func SyncRuntimeCanSpin(i int) bool", "func (s Semaphore) P() {\n\n\ttimer := time.NewTimer(semTimeout)\n\tdefer timer.Stop()\n\n\tselect {\n\tcase s <- 1:\n\tcase <-timer.C:\n\t\tpanic(fmt.Sprintf(\"Timeout in Semaphore.P() after %v of waiting\",\n\t\t\tsemTimeout))\n\t}\n}", "func (pexR *PEXReactor) ensurePeersRoutine() {\n\t// Randomize when routine starts\n\ttime.Sleep(time.Duration(rand.Int63n(500*ensurePeersPeriodSeconds)) * time.Millisecond)\n\n\t// fire once immediately.\n\tpexR.ensurePeers()\n\t// fire periodically\n\ttimer := NewRepeatTimer(\"pex\", ensurePeersPeriodSeconds*time.Second)\nFOR_LOOP:\n\tfor {\n\t\tselect {\n\t\tcase <-timer.Ch:\n\t\t\tpexR.ensurePeers()\n\t\tcase <-pexR.Quit:\n\t\t\tbreak FOR_LOOP\n\t\t}\n\t}\n\n\t// Cleanup\n\ttimer.Stop()\n}", "func (a *Agent) PauseSync() {\n\t// Do this outside of lock as it has it's own locking\n\ta.sync.Pause()\n\n\t// Coordinate local state watchers\n\ta.syncMu.Lock()\n\tdefer a.syncMu.Unlock()\n\tif a.syncCh == nil {\n\t\ta.syncCh = make(chan struct{})\n\t}\n}", "func (s DefaultSleeper) Sleep() {\n\ttime.Sleep(1 * time.Second)\n}", "func (s *Server) timing() {\n\tt := time.Now().Unix()\n\tif t > s.lastPulse {\n\t\ts.lastPulse = t\n\t\tfor _, m := range mobs {\n\t\t\tm.Pulse()\n\t\t}\n\t\tfor _, cl := range s.clients {\n\t\t\tcl.Pulse()\n\t\t}\n\t}\n\tif t > s.nextTick {\n\t\ts.nextTick = t + rand.Int63n(tickLength) + tickLength\n\t\tfor _, m := range mobs {\n\t\t\tm.Tick()\n\t\t}\n\t\tfor _, cl := range s.clients {\n\t\t\tcl.Tick()\n\t\t}\n\t}\n}", "func runSimulation() {\n\tcmd := startServerProcess()\n\n\tclient.SetServerAddr(serverAddr)\n\tfor i := 0; i < simClients; i++ {\n\t\tclient.RunSimClient(simClientPingRateMs)\n\t}\n\n\tfor {\n\t\tval := rand.Intn(100)\n\t\tif val > 90 {\n\t\t\tfmt.Println(\"forcibly killing server process\")\n\t\t\tcmd.Process.Kill()\n\t\t\tcmd = startServerProcess()\n\t\t}\n\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n}", "func (self *Graphics) SetPendingDestroyA(member bool) {\n self.Object.Set(\"pendingDestroy\", member)\n}", "func TestProposalBufferConcurrentWithDestroy(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\tdefer log.Scope(t).Close(t)\n\tctx := context.Background()\n\n\tvar p testProposer\n\tvar b propBuf\n\tvar pc proposalCreator\n\tclock := hlc.NewClock(hlc.UnixNano, time.Nanosecond)\n\tb.Init(&p, tracker.NewLockfreeTracker(), clock, cluster.MakeTestingClusterSettings())\n\n\tmlais := make(map[uint64]struct{})\n\tdsErr := errors.New(\"destroyed\")\n\n\t// Run 20 concurrent producers.\n\tvar g errgroup.Group\n\tconst concurrency = 20\n\tfor i := 0; i < concurrency; i++ {\n\t\tg.Go(func() error {\n\t\t\tfor {\n\t\t\t\tpd, data := pc.newPutProposal()\n\t\t\t\t_, tok := b.TrackEvaluatingRequest(ctx, hlc.MinTimestamp)\n\t\t\t\tmlai, err := b.Insert(ctx, pd, data, tok)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif errors.Is(err, dsErr) {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\treturn errors.Wrap(err, \"Insert\")\n\t\t\t\t}\n\t\t\t\tp.Lock()\n\t\t\t\tif _, ok := mlais[mlai]; ok {\n\t\t\t\t\tp.Unlock()\n\t\t\t\t\treturn errors.New(\"max lease index collision\")\n\t\t\t\t}\n\t\t\t\tmlais[mlai] = struct{}{}\n\t\t\t\tp.Unlock()\n\t\t\t}\n\t\t})\n\t}\n\n\t// Run a concurrent consumer.\n\tg.Go(func() error {\n\t\tfor {\n\t\t\tif stop, err := func() (bool, error) {\n\t\t\t\tp.Lock()\n\t\t\t\tdefer p.Unlock()\n\t\t\t\tif !p.ds.IsAlive() {\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\t\t\t\tif err := b.flushLocked(ctx); err != nil {\n\t\t\t\t\treturn true, errors.Wrap(err, \"flushLocked\")\n\t\t\t\t}\n\t\t\t\treturn false, nil\n\t\t\t}(); stop {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t})\n\n\t// Wait for a random duration before destroying.\n\ttime.Sleep(time.Duration(rand.Intn(1000)) * time.Microsecond)\n\n\t// Destroy the proposer. All producers and consumers should notice.\n\tp.Lock()\n\tp.ds.Set(dsErr, destroyReasonRemoved)\n\tp.Unlock()\n\n\trequire.Nil(t, g.Wait())\n\tt.Logf(\"%d successful proposals before destroy\", len(mlais))\n}", "func Test_Static_Pool_Handle_Dead(t *testing.T) {\n\tctx := context.Background()\n\tp, err := Initialize(\n\t\tcontext.Background(),\n\t\tfunc() *exec.Cmd { return exec.Command(\"php\", \"../tests/slow-destroy.php\", \"echo\", \"pipes\") },\n\t\tpipe.NewPipeFactory(),\n\t\t&Config{\n\t\t\tNumWorkers: 5,\n\t\t\tAllocateTimeout: time.Second * 100,\n\t\t\tDestroyTimeout: time.Second,\n\t\t},\n\t)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, p)\n\n\ttime.Sleep(time.Second)\n\tfor i := range p.Workers() {\n\t\tp.Workers()[i].State().Set(worker.StateErrored)\n\t}\n\n\t_, err = p.Exec(&payload.Payload{Body: []byte(\"hello\")})\n\tassert.NoError(t, err)\n\tp.Destroy(ctx)\n}", "func spawner(t *testing.T, numProc int, startCh chan bool, spawnedCh chan []int) {\n\tsrc := rand.NewSource(time.Now().UnixNano())\n\trandom := rand.New(src)\n\n\t<-startCh\n\n\tlog.Debugf(\"spawner: started ...\\n\")\n\n\tfor i := 0; i < numProc; i++ {\n\t\tpidList, err := spawnDummyProcesses(1)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"spawnDummyProcesses() failed: %s\\n\", err)\n\t\t}\n\n\t\tspawnedCh <- pidList\n\n\t\tlog.Debugf(\"spawner: spawned %v\\n\", pidList)\n\n\t\tdelay := random.Intn(10)\n\t\ttime.Sleep(time.Duration(delay) * time.Millisecond)\n\t}\n}", "func (self *PhysicsP2) Pause() {\n self.Object.Call(\"pause\")\n}", "func TestMetasyncMembership(t *testing.T) {\n\t{\n\t\t// pending server dropped without sync\n\t\tprimary := newPrimary()\n\t\tsyncer := testSyncer(primary)\n\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tgo func(wg *sync.WaitGroup) {\n\t\t\tdefer wg.Done()\n\t\t\tsyncer.Run()\n\t\t}(&wg)\n\n\t\tvar cnt atomic.Int32\n\t\ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tcnt.Add(1)\n\t\t\thttp.Error(w, \"i don't know how to deal with you\", http.StatusNotAcceptable)\n\t\t}))\n\n\t\tdefer s.Close()\n\n\t\tid := \"t\"\n\t\taddrInfo := serverTCPAddr(s.URL)\n\t\tclone := primary.owner.smap.get().clone()\n\t\tclone.addTarget(meta.NewSnode(id, apc.Target, addrInfo, addrInfo, addrInfo))\n\t\tprimary.owner.smap.put(clone)\n\t\tmsg := primary.newAmsgStr(\"\", nil)\n\t\twg1 := syncer.sync(revsPair{clone, msg})\n\t\twg1.Wait()\n\t\ttime.Sleep(time.Millisecond * 300)\n\n\t\tclone = primary.owner.smap.get().clone()\n\t\tclone.delTarget(id)\n\t\tprimary.owner.smap.put(clone)\n\n\t\ttime.Sleep(time.Millisecond * 300)\n\t\tsavedCnt := cnt.Load()\n\t\ttime.Sleep(time.Millisecond * 300)\n\t\tif cnt.Load() != savedCnt {\n\t\t\tt.Fatal(\"Sync call didn't stop after traget is deleted\")\n\t\t}\n\n\t\tsyncer.Stop(nil)\n\t\twg.Wait()\n\t}\n\n\tprimary := newPrimary()\n\tsyncer := testSyncer(primary)\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func(wg *sync.WaitGroup) {\n\t\tdefer wg.Done()\n\t\tsyncer.Run()\n\t}(&wg)\n\n\tch := make(chan struct{}, 10)\n\tf := func(w http.ResponseWriter, r *http.Request) {\n\t\tch <- struct{}{}\n\t}\n\n\t{\n\t\t// sync before smap sync (no previous sync saved in metasyncer)\n\t\ts1 := httptest.NewServer(http.HandlerFunc(f))\n\t\tdefer s1.Close()\n\n\t\tid := \"t1111\"\n\t\taddrInfo := serverTCPAddr(s1.URL)\n\t\tdi := meta.NewSnode(id, apc.Target, addrInfo, addrInfo, addrInfo)\n\t\tclone := primary.owner.smap.get().clone()\n\t\tclone.addTarget(di)\n\t\tprimary.owner.smap.put(clone)\n\t\tbmd := primary.owner.bmd.get()\n\t\tmsg := primary.newAmsgStr(\"\", bmd)\n\t\twg := syncer.sync(revsPair{bmd, msg})\n\t\twg.Wait()\n\t\t<-ch\n\n\t\t// sync smap so metasyncer has a smap\n\t\twg = syncer.sync(revsPair{clone, msg})\n\t\twg.Wait()\n\t\t<-ch\n\t}\n\n\t{\n\t\t// add a new target but new smap is not synced\n\t\t// metasyncer picks up the new target directly from primary's smap\n\t\t// and metasyncer will also add the new target to pending to sync all previously synced data\n\t\t// that's why the extra channel read\n\t\ts2 := httptest.NewServer(http.HandlerFunc(f))\n\t\tdefer s2.Close()\n\n\t\tid := \"t22222\"\n\t\taddrInfo := serverTCPAddr(s2.URL)\n\t\tdi := meta.NewSnode(id, apc.Target, addrInfo, addrInfo, addrInfo)\n\t\tclone := primary.owner.smap.get().clone()\n\t\tclone.addTarget(di)\n\t\tprimary.owner.smap.put(clone)\n\n\t\tbmd := primary.owner.bmd.get()\n\t\tmsg := primary.newAmsgStr(\"\", bmd)\n\t\twg := syncer.sync(revsPair{bmd, msg})\n\t\twg.Wait()\n\t\t<-ch // target 1\n\t\t<-ch // target 2\n\t\tif len(ch) != 0 {\n\t\t\tt.Fatal(\"Too many sync calls received\")\n\t\t}\n\n\t\tsyncer.Stop(nil)\n\t\twg.Wait()\n\t}\n}", "func (g *Gossiper) Suspect(endpoint network.EndPoint) {\n\tepState := g.endPointStateMap[endpoint]\n\tif epState.isAlive {\n\t\tlog.Printf(\"EndPoint %v is not dead\\n\", endpoint)\n\t\tg.isAlive(endpoint, epState, false)\n\t\t// notify an endpoint is dead to interested parties\n\t\tdeltaState := NewEndPointState(epState.GetHeartBeatState())\n\t\tg.doNotifications(endpoint, deltaState)\n\t}\n}", "func main() {\n\tgamer := NewGamer(100)\n\tmemento := gamer.CreateMemento()\n\n\tfor i := 0; i < 100; i++ {\n\t\tfmt.Println(\"==== \" + strconv.Itoa(i))\n\t\tfmt.Println(\"Current state: \" + gamer.ToString())\n\n\t\tgamer.Play()\n\n\t\tfmt.Println(\"Gamer's money is \" + strconv.Itoa(gamer.money) + \".\")\n\n\t\tif gamer.money > memento.money {\n\t\t\tfmt.Println(\"(Save the current state because money has increased.)\")\n\t\t\tmemento = gamer.CreateMemento()\n\t\t} else if gamer.money < memento.money/2 {\n\t\t\tfmt.Println(\"(Go back to the previous state because money has decreased.)\")\n\t\t\tgamer.RestoreMemento(memento)\n\t\t}\n\n\t\ttime.Sleep(1 * time.Second)\n\n\t\tfmt.Println(\"\")\n\t}\n}", "func (s *Stealer) Go(ctx context.Context, e *reflow.Eval) {\n\tticker := time.NewTicker(pollInterval)\n\tdefer ticker.Stop()\n\tvar n int\npoll:\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tneed := e.Need()\n\t\t\tif need.IsZero() {\n\t\t\t\tcontinue poll\n\t\t\t}\n\t\t\tn++\n\t\t\ts.Log.Debugf(\"need %v; starting new task stealing worker\", need)\n\t\t\tactx, acancel := context.WithTimeout(ctx, allocTimeout)\n\t\t\talloc, err := s.Cluster.Allocate(actx, need, s.Labels)\n\t\t\tacancel()\n\t\t\tif err != nil {\n\t\t\t\tcontinue poll\n\t\t\t}\n\t\t\tw := &worker{\n\t\t\t\tExecutor: alloc,\n\t\t\t\tEval: e,\n\t\t\t\tLog: s.Log.Tee(nil, alloc.ID()+\": \"),\n\t\t\t}\n\t\t\twctx, wcancel := context.WithCancel(ctx)\n\t\t\tgo func() {\n\t\t\t\terr := pool.Keepalive(wctx, s.Log, alloc)\n\t\t\t\tif err != wctx.Err() {\n\t\t\t\t\ts.Log.Errorf(\"worker %s died: %v\", alloc.ID(), err)\n\t\t\t\t}\n\t\t\t\twcancel()\n\t\t\t}()\n\t\t\tgo func() {\n\t\t\t\tw.Go(wctx)\n\t\t\t\twcancel()\n\t\t\t\talloc.Free(context.Background())\n\t\t\t}()\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func (rcs *Service) pingLoop(done <-chan struct{}) {\n\tpingChan := make(chan *model.RemoteCluster, MaxConcurrentSends*2)\n\n\t// create a thread pool to send pings concurrently to remotes.\n\tfor i := 0; i < MaxConcurrentSends; i++ {\n\t\tgo rcs.pingEmitter(pingChan, done)\n\t}\n\n\tgo rcs.pingGenerator(pingChan, done)\n}", "func TestRPCScheduling(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip()\n\t}\n\t// create RPCServers\n\trpcs1, err := NewRPCServer(10002)\n\tif err != nil {\n\t\tt.Fatal(\"Failed to initialize RPCServer:\", err)\n\t}\n\tdefer rpcs1.Close()\n\trpcs2, err := NewRPCServer(10003)\n\tif err != nil {\n\t\tt.Fatal(\"Failed to initialize RPCServer:\", err)\n\t}\n\tdefer rpcs2.Close()\n\n\t// add a mesage handler to the servers\n\ttsh1 := new(TestStoreHandler)\n\taddr1 := rpcs1.RegisterHandler(tsh1)\n\ttsh2 := new(TestStoreHandler)\n\taddr2 := rpcs2.RegisterHandler(tsh2)\n\n\t// begin transferring large payload\n\tlargeChan := rpcs2.SendAsyncMessage(Message{\n\t\tDest: addr1,\n\t\tProc: \"TestStoreHandler.StoreMessage\",\n\t\tArgs: string(bytes.Repeat([]byte{0x10}, 1<<20)),\n\t})\n\n\t// begin transferring small payload\n\tsmallChan := rpcs1.SendAsyncMessage(Message{\n\t\tDest: addr2,\n\t\tProc: \"TestStoreHandler.StoreMessage\",\n\t\tArgs: string(bytes.Repeat([]byte{0x10}, 1<<16)),\n\t})\n\n\t// poll until both transfers complete\n\tvar t1, t2 time.Time\n\tfor i := 0; i < 2; i++ {\n\t\tselect {\n\t\tcase <-largeChan:\n\t\t\tt1 = time.Now()\n\t\tcase <-smallChan:\n\t\t\tt2 = time.Now()\n\t\t}\n\t}\n\n\tif t2.After(t1) {\n\t\tt.Fatal(\"small transfer was blocked by large transfer\")\n\t}\n}", "func Sync(d Driver, spec *v1alpha1.IPPoolSpec, logger *log.Logger) error {\n\n\tlogger.Println(\"Sync start\")\n\tspecAddressListSize := len(spec.Addresses)\n\tspecAllocationListSize := len(spec.Allocations)\n\tsizeDiff := specAddressListSize - specAllocationListSize - reserveAddressCount\n\tlogger.Printf(\"address count=%d, allocation count=%d, reserve count=%d\",\n\t\tspecAddressListSize, specAllocationListSize, reserveAddressCount)\n\n\tif sizeDiff < 0 {\n\t\t// need more address\n\t\tlogger.Printf(\"need %d more address. creating...\", -sizeDiff)\n\t\tif err := d.CreateAddress(-sizeDiff); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tipamAddrLst, err := d.GetAddresses()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif sizeDiff > 0 {\n\t\tlogger.Println(\"too many address. deleting unallocated address...\")\n\t\ttmpList := []IpamAddress{}\n\t\tfor _, ipamAddr := range ipamAddrLst {\n\t\t\tif !ipamAddr.MarkedWith(Allocated) && ipamAddr.MarkedWith(Automated) {\n\t\t\t\tif err = d.DeleteAddress(ipamAddr); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tsizeDiff--\n\t\t\t}\n\t\t\ttmpList = append(tmpList, ipamAddr)\n\t\t\tif sizeDiff <= 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tipamAddrLst = tmpList\n\t}\n\n\t// Sync addresses\n\t// Every addresses in driver is force sync to ippool now.\n\tlogger.Println(\"Copying IpamAddr to AddressList\")\n\tspec.Addresses = []string{}\n\tfor _, ipamAddr := range ipamAddrLst {\n\t\tspec.Addresses = append(spec.Addresses, ipamAddr.String())\n\t}\n\n\t// Sync allocations\n\t// Every allocations in ippool is force sync to driver now.\n\tlogger.Println(\"Mark allocation addresses alocated.\")\n\tfor _, ipamAddr := range ipamAddrLst {\n\t\tvar toRelease bool = true\n\t\tvar alct *v1alpha1.IPAllocation\n\t\tfor _, alction := range spec.Allocations {\n\t\t\tip := net.ParseIP(alction.Address)\n\t\t\tif ip == nil {\n\t\t\t\terr = fmt.Errorf(\"sync failed: cannot parse address %v\",\n\t\t\t\t\tspec.Addresses)\n\t\t\t\tlogger.Println(err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\talct = &alction\n\t\t\tif ipamAddr.Equal(ip) {\n\t\t\t\ttoRelease = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tvar err error\n\t\tif toRelease {\n\t\t\terr = d.MarkAddressReleased(ipamAddr)\n\t\t} else {\n\t\t\terr = d.MarkAddressAllocated(ipamAddr, alct.PodNamespace+\"/\"+alct.PodName)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (i *Instance) Freeze() {\n}", "func SyncRuntimeSemacquire(s *uint32)", "func StartProducer(m SafeMaper) {\n\tgo func() {\n\t\tfor {\n\t\t\tstart := time.Now().UnixNano()\n\t\t\tm.Produce()\n\t\t\ttime.Sleep(time.Millisecond - time.Duration(time.Now().UnixNano()-start))\n\t\t}\n\t}()\n}", "func (d *DefaultSleeper) Sleep() {\n\ttime.Sleep(1 * time.Second)\n}", "func (t *Time) Sleep(s time.Duration) {\n\tt.current = t.current.Add(s)\n}", "func TestReactorNoBroadcastToSender(t *testing.T) {\n\tconfig := cfg.TestConfig()\n\tconst N = 2\n\treactors := makeAndConnectReactors(config, N)\n\tdefer func() {\n\t\tfor _, r := range reactors {\n\t\t\tif err := r.Stop(); err != nil {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t}\n\t}()\n\tfor _, r := range reactors {\n\t\tfor _, peer := range r.Switch.Peers().List() {\n\t\t\tpeer.Set(types.PeerStateKey, peerState{1})\n\t\t}\n\t}\n\n\tconst peerID = 1\n\tcheckTxs(t, reactors[0].mempool, numTxs, peerID)\n\tensureNoTxs(t, reactors[peerID], 100*time.Millisecond)\n}", "func lockMaintenance(ctx context.Context) {\n\t// Wait until the object API is ready\n\t// no need to start the lock maintenance\n\t// if ObjectAPI is not initialized.\n\n\tvar objAPI ObjectLayer\n\n\tfor {\n\t\tobjAPI = newObjectLayerFn()\n\t\tif objAPI == nil {\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\tif _, ok := objAPI.(*erasureServerPools); !ok {\n\t\treturn\n\t}\n\n\t// Initialize a new ticker with 1 minute between each ticks.\n\tlkTimer := time.NewTimer(lockMaintenanceInterval)\n\t// Stop the timer upon returning.\n\tdefer lkTimer.Stop()\n\n\tfor {\n\t\t// Verifies every minute for locks held more than 2 minutes.\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-lkTimer.C:\n\t\t\t// Reset the timer for next cycle.\n\t\t\tlkTimer.Reset(lockMaintenanceInterval)\n\n\t\t\tglobalLockServer.expireOldLocks(lockValidityDuration)\n\t\t}\n\t}\n}", "func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) {\n\ttime.Sleep(timeout) // wait for the txs in all mempools\n\tassert.Zero(t, reactor.mempool.Size())\n}", "func (m *TimeServiceManager) Kill() {\n\tif len(m.Instances) > 0 {\n\t\tfmt.Printf(\"There are %d instances. \", len(m.Instances))\n\t\tn := rand.Intn(len(m.Instances))\n\t\tfmt.Printf(\"Killing Instance %d\\n\", n)\n\t\tclose(m.Instances[n].Dead)\n\t\tm.Instances = append(m.Instances[:n], m.Instances[n+1:]...)\n\t} else {\n\t\tfmt.Println(\"No instance to kill\")\n\t}\n}", "func (me *Mgr) doPing() {\n\tfor !me.stopped {\n\t\tme.workers.Scan(func(id string, w interface{}) {\n\t\t\terr := w.(*Worker).Ping()\n\t\t\tif err != DEADERR {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// dead\n\t\t\tme.deadChan <- id\n\t\t\tme.deadWorkers.Set(id, []byte(\"OK\"))\n\t\t\tme.workers.Delete(id)\n\t\t})\n\t\ttime.Sleep(15 * time.Second)\n\t}\n}", "func (p *Producer) StartProducing(sim *Simulator) {\n\ttime.Sleep(time.Duration(3 * time.Second))\n\n\tfmt.Println(\"Producer Starting...\")\n\tp.simulator = sim\n\tfor {\n\t\tp.simulator.full.Acquire(p.simulator.context, 1) //Block if buffer is full\n\t\tp.simulator.mutex.Acquire(p.simulator.context, 1) //Block if buffer is being modified\n\n\t\tvalue := p.simulator.nextToProduce\n\t\tfmt.Println(\"<<<<<Inserting value \", value)\n\t\tp.simulator.buffer.Insert(value)\n\t\tp.simulator.nextToProduce++\n\n\t\tp.simulator.mutex.Release(1) //Signal buffer is free to use\n\t\tp.simulator.empty.Release(1) //Signal buffer has data to be consumed\n\n\t\ttime.Sleep(time.Duration(rand.Intn(4)+1) * time.Second)\n\n\t}\n}", "func TestSyncer(t *testing.T) {\n\t// we have process with the same ID\n\tid := \"sameid\"\n\taddress := fmt.Sprint(\"localhost:9999\")\n\tcfg := client.Config{\n\t\tEndPoint: address,\n\t\tLockTimeout: time.Duration(10) * time.Second,\n\t}\n\tcli, err := client.NewClient(cfg)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tprocessCount := 10\n\n\tres := make(chan string, processCount*2)\n\tvar i int\n\tfor i < processCount {\n\t\tgo func(a int) {\n\t\t\tcli.Lock(id)\n\t\t\td := getRandomDuration()\n\t\t\tres <- \"start process\"\n\t\t\t// simulate random duration process\n\t\t\ttime.Sleep(time.Duration(d))\n\t\t\tres <- \"finish process\"\n\t\t\tif i > 7 {\n\t\t\t\t// deliberately not unlocking the last 2 process\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcli.Unlock(id)\n\t\t}(i)\n\t\ti += 1\n\t}\n\n\ti = 0\n\tvar msg string\n\tfor i < processCount {\n\t\t// this expect the result will be synchronous\n\t\tmsg = <-res\n\t\tassert.Equal(t, \"start process\", msg)\n\t\tfmt.Println(msg)\n\t\tmsg = <-res\n\t\tassert.Equal(t, \"finish process\", msg)\n\t\tfmt.Println(msg)\n\t\ti += 1\n\t}\n}", "func (px *Paxos) Start(seq int, v interface{}) {\n \n // if consensus on this seq number is no longer\n // desired from the client, ignore this request\n if seq < px.Min() {\n return\n }\n\n // access state up front\n px.mu.Lock()\n \n doneMap := make(map[int]int) // local copy of the done map\n for k, v := range px.done { \n doneMap[k] = v\n }\n\n px.freeMemory()\n \n px.mu.Unlock()\n\n // Question could you optimize the initial proposal number?\n\n go func(seq int, v interface{}) { // launch thread\n np := ProposalId { 0, px.me } // initial proposal \n instance := px.getInstance(seq) // retrieve instance from log\n\n // while loop driving consensus:\n for !px.isdead() && instance.Fate == Pending {\n na := NullProposal() // highest na seen\n va := v // va associated with (na, va)\n count := 0 // count prepare_oks\n\n // follows the pseudocode for the Paxos algorithm\n for i := 0; i < len(px.peers); i++ {\n args := PrepareArgs{seq, np}\n var reply PrepareReply\n if px.sendPrepare(i, args, &reply) && reply.Ok {\n doneMap[i] = reply.Done\n count += 1\n if reply.Na.Greater(na) {\n na = reply.Na\n va = reply.Va\n }\n }\n }\n\n // did not gain a majority response during prepare\n // check instance for consensus and try again with\n // the next highest proposal id\n if count < 1 + len(px.peers) / 2 {\n instance = px.getInstance(seq)\n if na.Greater(np) {\n np = np.Next(na)\n } else {\n np = np.Next(np)\n }\n continue // return to top of for loop\n }\n\n count = 0 // count accept_oks\n \n // follows the pseudocode for the Paxos algorithm\n for i := 0; i < len(px.peers); i++ {\n args := AcceptArgs{seq, np, va}\n var reply AcceptReply\n if px.sendAccept(i, args, &reply) && reply.Ok {\n doneMap[i] = reply.Done\n count += 1\n }\n }\n\n // did not gain a majority response during accept\n // check instance for consensus and try again with\n // the next highest proposal id\n if count < 1 + len(px.peers) / 2 {\n instance = px.getInstance(seq)\n if na.Greater(np) {\n np = np.Next(na)\n } else {\n np = np.Next(np)\n }\n continue // return to top of for loop\n }\n\n // consensus achieved, sendDecide()\n for i := 0; i < len(px.peers); i++ {\n args := DecideArgs{seq, np, va}\n var reply DecideReply\n if px.sendDecide(i, args, &reply) && reply.Ok {\n doneMap[i] = reply.Done\n }\n }\n\n // Question: Should we retry if sendDecide() fails?\n\n break // consesus achieved break out of the while loop\n }\n\n // update done pointer to address the local updated copy\n px.mu.Lock()\n \n px.done = doneMap\n px.freeMemory()\n \n px.mu.Unlock()\n\n // Question: Should I check for higher value in px.done?\n // Local copy's done[i] may be less than the global max. \n\n }(seq, v)\n}", "func DontUsePool() {\n\thabbo.Lock()\n\tusePool = false\n\thabbo.Unlock()\n}", "func Release(o *Object) {\n\to.hardReset()\n\topool.Put(o)\n}", "func TestOrphanageTSC(t *testing.T) {\n\tt.Skip(\"TSC is currently disabled in the codebase. This test will be re-enabled once TSC is re-enabled.\")\n\tconst tscThreshold = 30 * time.Second\n\n\tsnapshotOptions := tests.OrphanageSnapshotOptions\n\tsnapshotInfo := snapshotcreator.NewOptions(snapshotOptions...)\n\tctx, cancel := tests.Context(context.Background(), t)\n\tdefer cancel()\n\tn, err := f.CreateNetwork(ctx, t.Name(), 4,\n\t\tframework.CreateNetworkConfig{\n\t\t\tStartSynced: false,\n\t\t\tFaucet: false,\n\t\t\tActivity: true,\n\t\t\tAutopeering: false,\n\t\t\tSnapshot: snapshotOptions,\n\t\t}, tests.CommonSnapshotConfigFunc(t, snapshotInfo, func(peerIndex int, isPeerMaster bool, conf config.GoShimmer) config.GoShimmer {\n\t\t\tconf.UseNodeSeedAsWalletSeed = true\n\t\t\tconf.TimeSinceConfirmationThreshold = tscThreshold\n\t\t\tconf.ValidatorActivityWindow = 10 * time.Minute\n\t\t\treturn conf\n\t\t}))\n\trequire.NoError(t, err)\n\tdefer tests.ShutdownNetwork(ctx, t, n)\n\n\tlog.Println(\"Bootstrapping network...\")\n\ttests.BootstrapNetwork(t, n)\n\tlog.Println(\"Bootstrapping network... done\")\n\n\tconst delayBetweenDataMessages = 500 * time.Millisecond\n\n\tvar (\n\t\tnode1 = n.Peers()[0]\n\t\tnode2 = n.Peers()[1]\n\t\tnode3 = n.Peers()[2]\n\t\tnode4 = n.Peers()[3]\n\t)\n\n\tlog.Printf(\"Sending %d data blocks to the whole network\", 10)\n\ttests.SendDataBlocksWithDelay(t, n.Peers(), 10, delayBetweenDataMessages)\n\n\tpartition1 := []*framework.Node{node4}\n\tpartition2 := []*framework.Node{node2, node3, node1}\n\n\t// split partitions\n\terr = n.CreatePartitionsManualPeering(ctx, partition1, partition2)\n\trequire.NoError(t, err)\n\n\t// check consensus mana\n\trequire.EqualValues(t, snapshotInfo.PeersAmountsPledged[0], tests.Mana(t, node1).Consensus)\n\tlog.Printf(\"node1 (%s): %d\", node1.ID().String(), tests.Mana(t, node1).Consensus)\n\trequire.EqualValues(t, snapshotInfo.PeersAmountsPledged[1], tests.Mana(t, node2).Consensus)\n\tlog.Printf(\"node2 (%s): %d\", node2.ID().String(), tests.Mana(t, node2).Consensus)\n\trequire.EqualValues(t, snapshotInfo.PeersAmountsPledged[2], tests.Mana(t, node3).Consensus)\n\tlog.Printf(\"node3 (%s): %d\", node3.ID().String(), tests.Mana(t, node3).Consensus)\n\trequire.EqualValues(t, snapshotInfo.PeersAmountsPledged[3], tests.Mana(t, node4).Consensus)\n\tlog.Printf(\"node4 (%s): %d\", node4.ID().String(), tests.Mana(t, node4).Consensus)\n\n\tlog.Printf(\"Sending %d data blocks on minority partition\", 30)\n\tblocksToOrphan := tests.SendDataBlocksWithDelay(t, partition1, 30, delayBetweenDataMessages)\n\tlog.Printf(\"Sending %d data blocks on majority partition\", 10)\n\tblocksToConfirm := tests.SendDataBlocksWithDelay(t, partition2, 10, delayBetweenDataMessages)\n\n\t// merge partitions\n\terr = n.DoManualPeering(ctx)\n\trequire.NoError(t, err)\n\n\t// sleep 10 seconds to make sure that TSC threshold is exceeded\n\ttime.Sleep(tscThreshold + time.Second)\n\n\tlog.Printf(\"Sending %d data messages to make sure that all nodes share the same view\", 30)\n\ttests.SendDataBlocksWithDelay(t, n.Peers(), 30, delayBetweenDataMessages)\n\n\ttests.RequireBlocksAvailable(t, n.Peers(), blocksToConfirm, time.Minute, tests.Tick, true)\n\ttests.RequireBlocksOrphaned(t, partition1, blocksToOrphan, time.Minute, tests.Tick)\n}", "func TestSync(t *testing.T) {\n\n\tfakeTime := newClock()\n\tserverResult := Result{}\n\tts, resultLock := testServer(&serverResult, fakeTime.now, nil)\n\tdefer ts.Close()\n\n\tcontext := authtest.NewContext(ts.URL)\n\n\tquotaID := \"id\"\n\trequest := &Request{\n\t\tIdentifier: quotaID,\n\t\tInterval: 1,\n\t\tTimeUnit: quotaSecond,\n\t\tAllow: 1,\n\t\tWeight: 3,\n\t}\n\tresult := &Result{\n\t\tUsed: 1,\n\t}\n\n\tm := &manager{\n\t\tclose: make(chan bool),\n\t\tclient: http.DefaultClient,\n\t\tnow: fakeTime.now,\n\t\tsyncRate: 2 * time.Millisecond,\n\t\tbucketToSyncQueue: make(chan *bucket, 10),\n\t\tbaseURL: context.InternalAPI(),\n\t\tnumSyncWorkers: 1,\n\t\tbucketsSyncing: map[*bucket]struct{}{},\n\t}\n\n\tb := newBucket(*request, m, prometheus.Labels{\"org\": \"org\", \"env\": \"env\", \"quota\": quotaID})\n\tb.checked = fakeTime.now()\n\tb.result = result\n\tm.buckets = map[string]*bucket{quotaID: b}\n\tb.refreshAfter = time.Millisecond\n\n\tm.Start()\n\tdefer m.Close()\n\n\tfakeTime.add(10)\n\ttime.Sleep(10 * time.Millisecond) // allow idle sync\n\tb.lock.Lock()\n\tb.refreshAfter = time.Hour\n\tb.lock.Unlock()\n\n\tresultLock.Lock()\n\tserverResult.ExpiryTime /= 1000 // convert back to seconds for comparison\n\tresultLock.Unlock()\n\n\tb.lock.RLock()\n\tif b.request.Weight != 0 {\n\t\tt.Errorf(\"pending request weight got: %d, want: %d\", b.request.Weight, 0)\n\t}\n\tresultLock.Lock()\n\tif !reflect.DeepEqual(*b.result, serverResult) {\n\t\tt.Errorf(\"result got: %#v, want: %#v\", *b.result, serverResult)\n\t}\n\tresultLock.Unlock()\n\tif b.synced != m.now() {\n\t\tt.Errorf(\"synced got: %#v, want: %#v\", b.synced, m.now())\n\t}\n\tif m.buckets[quotaID] == nil {\n\t\tt.Errorf(\"old bucket should not have been deleted\")\n\t}\n\tb.lock.RUnlock()\n\n\t// do interactive sync\n\treq := &Request{\n\t\tIdentifier: quotaID,\n\t\tInterval: 1,\n\t\tTimeUnit: quotaSecond,\n\t\tAllow: 1,\n\t\tWeight: 2,\n\t}\n\t_, err := b.apply(req)\n\tif err != nil {\n\t\tt.Errorf(\"should not have received error on apply: %v\", err)\n\t}\n\tfakeTime.add(10)\n\terr = b.sync()\n\tif err != nil {\n\t\tt.Errorf(\"should not have received error on sync: %v\", err)\n\t}\n\n\tresultLock.Lock()\n\tserverResult.ExpiryTime /= 1000 // convert back to seconds for comparison\n\tresultLock.Unlock()\n\n\tb.lock.Lock()\n\tif b.request.Weight != 0 {\n\t\tt.Errorf(\"pending request weight got: %d, want: %d\", b.request.Weight, 0)\n\t}\n\tresultLock.Lock()\n\tif !reflect.DeepEqual(*b.result, serverResult) {\n\t\tt.Errorf(\"result got: %#v, want: %#v\", *b.result, serverResult)\n\t}\n\tresultLock.Unlock()\n\tif b.synced != m.now() {\n\t\tt.Errorf(\"synced got: %#v, want: %#v\", b.synced, m.now())\n\t}\n\n\tfakeTime.add(10 * 60)\n\tb.lock.Unlock()\n\ttime.Sleep(10 * time.Millisecond) // allow background delete\n\tm.bucketsLock.RLock()\n\tdefer m.bucketsLock.RUnlock()\n\tif m.buckets[quotaID] != nil {\n\t\tt.Errorf(\"old bucket should have been deleted\")\n\t}\n}", "func (a *Agent) spawnInstance(ctx context.Context, c instance.Config) {\n\tinst, err := a.instanceFactory(a.cfg.Global, c, a.cfg.WALDir, a.logger)\n\tif err != nil {\n\t\tlevel.Error(a.logger).Log(\"msg\", \"failed to create instance\", \"err\", err)\n\t\treturn\n\t}\n\n\tfor {\n\t\terr = inst.Run(ctx)\n\t\tif err != nil && err != context.Canceled {\n\t\t\tinstanceAbnormalExits.WithLabelValues(c.Name).Inc()\n\t\t\tlevel.Error(a.logger).Log(\"msg\", \"instance stopped abnormally, restarting after backoff period\", \"err\", err, \"backoff\", a.cfg.InstanceRestartBackoff, \"instance\", c.Name)\n\t\t\ttime.Sleep(a.cfg.InstanceRestartBackoff)\n\t\t} else {\n\t\t\tlevel.Info(a.logger).Log(\"msg\", \"stopped instance\", \"instance\", c.Name)\n\t\t\tbreak\n\t\t}\n\t}\n}", "func TestMultiInstanceRace(t *testing.T) {\n\tt.Skipf(\"Skipping %v until file lock is implemented\", t.Name())\n\tcli := ce.NewContainerClient()\n\tqmsharedlogs := createVolume(t, cli, \"qmsharedlogs\")\n\tdefer removeVolume(t, cli, qmsharedlogs)\n\tqmshareddata := createVolume(t, cli, \"qmshareddata\")\n\tdefer removeVolume(t, cli, qmshareddata)\n\n\tqmsChannel := make(chan QMChan)\n\n\tgo singleMultiInstanceQueueManager(t, cli, qmsharedlogs, qmshareddata, qmsChannel)\n\tgo singleMultiInstanceQueueManager(t, cli, qmsharedlogs, qmshareddata, qmsChannel)\n\n\tqm1a := <-qmsChannel\n\tif qm1a.Error != nil {\n\t\tt.Fatal(qm1a.Error)\n\t}\n\n\tqm1b := <-qmsChannel\n\tif qm1b.Error != nil {\n\t\tt.Fatal(qm1b.Error)\n\t}\n\n\tqm1aId, qm1aData := qm1a.QMId, qm1a.QMData\n\tqm1bId, qm1bData := qm1b.QMId, qm1b.QMData\n\n\tdefer removeVolume(t, cli, qm1aData)\n\tdefer removeVolume(t, cli, qm1bData)\n\tdefer cleanContainer(t, cli, qm1aId)\n\tdefer cleanContainer(t, cli, qm1bId)\n\n\twaitForReady(t, cli, qm1aId)\n\twaitForReady(t, cli, qm1bId)\n\n\terr, _, _ := getActiveStandbyQueueManager(t, cli, qm1aId, qm1bId)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (g *Game) steal() {\n\tg.Lock()\n\tdefer func() {\n\t\tg.Unlock()\n\t\tg.updatePlayers()\n\t}()\n\n\tif g.Stage != stagePlaying {\n\t\treturn\n\t}\n\tg.Stage = stageStealing\n\n\tg.Unlock()\n\t// update to steal stage immediately\n\tg.updatePlayers()\n\tg.Lock()\n\tteam := &g.Team1\n\tif g.clueGiverTrack.team1 {\n\t\tteam = &g.Team2\n\t}\n\n\tg.startTimer(secondsToSteal, func() {\n\t\tg.RLock()\n\t\tplayTimerSound(g, team)\n\t\tg.RUnlock()\n\t\tg.updatePlayers()\n\t}, nil, func() {\n\t\tg.Lock()\n\t\tdefer func() {\n\t\t\tg.Unlock()\n\t\t\tg.updatePlayers()\n\t\t}()\n\t\tteam.playSound(soundTimerAlarm)\n\t\tnextPlayerTurn(g)\n\t})\n\n\tg.ClueGiver.SendMsg(Msg{Type: \"stealcheck\"})\n}", "func fakeSyscall(duration time.Duration) {\n\truntime.Entersyscall()\n\tfor start := runtime.Nanotime(); runtime.Nanotime()-start < int64(duration); {\n\t}\n\truntime.Exitsyscall()\n}", "func Spawn(actDesc *Descriptor, wg *sync.WaitGroup, f func()) {\n\tif wg != nil {\n\t\twg.Add(1)\n\t}\n\tgo func() {\n\t\tif wg != nil {\n\t\t\tdefer wg.Done()\n\t\t}\n\t\tactDesc.Log().Info(\"Started\")\n\t\tdefer func() {\n\t\t\tif p := recover(); p != nil {\n\t\t\t\tactDesc.Log().Errorf(\"Paniced: %v, stack=%s\", p, debug.Stack())\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\tactDesc.Log().Info(\"Stopped\")\n\t\t}()\n\t\tf()\n\t}()\n}", "func (this *PoolTestSuite) TestNoInstanceOverlap() {\n\tmaxTotal := 5\n\tnumGoroutines := 100\n\tdelay := 1\n\titerations := 1000\n\tthis.pool.Config.MaxTotal = maxTotal\n\tthis.pool.Config.MaxIdle = maxTotal\n\tthis.pool.Config.TestOnBorrow = true\n\tthis.pool.Config.BlockWhenExhausted = true\n\tthis.pool.Config.MaxWaitMillis = int64(-1)\n\trunTestGoroutines(this.T(), numGoroutines, iterations, delay, this.pool)\n\tthis.Equal(0, this.pool.GetDestroyedByBorrowValidationCount())\n}", "func oneNewExtraM() {\n\t// Create extra goroutine locked to extra m.\n\t// The goroutine is the context in which the cgo callback will run.\n\t// The sched.pc will never be returned to, but setting it to\n\t// goexit makes clear to the traceback routines where\n\t// the goroutine stack ends.\n\tmp := allocm(nil, nil)\n\tgp := malg(4096)\n\tgp.sched.pc = funcPC(goexit) + sys.PCQuantum\n\tgp.sched.sp = gp.stack.hi\n\tgp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame\n\tgp.sched.lr = 0\n\tgp.sched.g = guintptr(unsafe.Pointer(gp))\n\tgp.syscallpc = gp.sched.pc\n\tgp.syscallsp = gp.sched.sp\n\tgp.stktopsp = gp.sched.sp\n\tgp.gcscanvalid = true\n\tgp.gcscandone = true\n\t// malg returns status as _Gidle. Change to _Gdead before\n\t// adding to allg where GC can see it. We use _Gdead to hide\n\t// this from tracebacks and stack scans since it isn't a\n\t// \"real\" goroutine until needm grabs it.\n\tcasgstatus(gp, _Gidle, _Gdead)\n\tgp.m = mp\n\tmp.curg = gp\n\tmp.lockedInt++\n\tmp.lockedg.set(gp)\n\tgp.lockedm.set(mp)\n\tgp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))\n\tif raceenabled {\n\t\tgp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum)\n\t}\n\t// put on allg for garbage collector\n\tallgadd(gp)\n\n\t// gp is now on the allg list, but we don't want it to be\n\t// counted by gcount. It would be more \"proper\" to increment\n\t// sched.ngfree, but that requires locking. Incrementing ngsys\n\t// has the same effect.\n\tatomic.Xadd(&sched.ngsys, +1)\n\n\t// Add m to the extra list.\n\tmnext := lockextra(true)\n\tmp.schedlink.set(mnext)\n\textraMCount++\n\tunlockextra(mp)\n}", "func (n *Node) periodicRun() {\n\tfor {\n\t\ttime.Sleep(time.Second * 1)\n\t\tn.stabilize()\n\t}\n}", "func TestPointerToStructs(t *testing.T) {\n\tt.Parallel()\n\tc := make(chan struct{})\n\ts := SomeStruct{first: \"v\", last: \"z\", address: \"d\", age: 10}\n\tfmt.Printf(\"struct pointer: %p\\n\", &s)\n\tfmt.Printf(\"first pointer: %p\\n\", &(s.first))\n\tgo DangerousStructPointers(c, &s)\n\ts.first += \"w\"\n\t<-c\n\tt.FailNow() // Because of DATA RACE\n}", "func (*NoCopy) Lock() {}", "func Simulate(scheduler Scheduler, tasks []*SimTask) {\n\tfor _, t := range tasks {\n\t\tscheduler.Put(t)\n\t}\n\tcurrentTimeMs := 0\n\tendtimesPerUser := make(map[int][]int)\n\ttaskLatencyPerUser := make(map[int][]int)\n\trunningTasks := map[ScheduledTask]int{}\n\tfor scheduler.Size() > 0 || len(runningTasks) > 0 {\n\t\tif scheduler.Size() > 0 {\n\t\t\tfor nextTask := scheduler.Next(); nextTask != nil; nextTask = scheduler.Next() {\n\t\t\t\tst := nextTask.Task().(*SimTask)\n\t\t\t\trunningTasks[nextTask] = currentTimeMs + st.RuntimeMs\n\t\t\t}\n\t\t}\n\t\tif len(runningTasks) > 0 {\n\t\t\t// simulate completion of shortest task\n\t\t\tearliestCompTimeMs := -1\n\t\t\tearliestCompTimeTasks := []ScheduledTask{}\n\t\t\tfor ta, tm := range runningTasks {\n\t\t\t\tif earliestCompTimeMs == -1 || tm < earliestCompTimeMs {\n\t\t\t\t\tearliestCompTimeMs = tm\n\t\t\t\t\tearliestCompTimeTasks = nil\n\t\t\t\t}\n\t\t\t\tif tm == earliestCompTimeMs {\n\t\t\t\t\tearliestCompTimeTasks = append(earliestCompTimeTasks, ta)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(earliestCompTimeTasks) > 0 {\n\t\t\t\tcurrentTimeMs += earliestCompTimeTasks[0].Task().(*SimTask).RuntimeMs\n\t\t\t\tfor i := range earliestCompTimeTasks {\n\t\t\t\t\tst := earliestCompTimeTasks[i].Task().(*SimTask)\n\t\t\t\t\tendtimesPerUser[st.UserId] = append(endtimesPerUser[st.UserId], earliestCompTimeMs)\n\t\t\t\t\ttaskLatencyPerUser[st.UserId] = append(taskLatencyPerUser[st.UserId], currentTimeMs)\n\t\t\t\t\tearliestCompTimeTasks[i].Close()\n\t\t\t\t\tdelete(runningTasks, earliestCompTimeTasks[i])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tuserIds := []int{}\n\tfor k := range endtimesPerUser {\n\t\tuserIds = append(userIds, k)\n\t\tfor i := len(userIds) - 1; i > 0 && userIds[i] < userIds[i-1]; i-- {\n\t\t\ttemp := userIds[i]\n\t\t\tuserIds[i] = userIds[i-1]\n\t\t\tuserIds[i-1] = temp\n\t\t}\n\t}\n\n\tfor _, id := range userIds {\n\t\tet := endtimesPerUser[id]\n\t\tfmt.Printf(\"\\t\\tuser %d:\\n\", id)\n\t\tfmt.Printf(\"\\t\\t\\tclock time:\\t\\t\\t %d ms\\n\", et[len(et)-1])\n\t\tfmt.Printf(\"\\t\\t\\tthroughput (tasks / sec):\\t %f\\n\", float32(len(et))/float32(et[len(et)-1])*1000)\n\t}\n}", "func TestConcurrentSimulation(t *testing.T) {\n\tif !(*sim) {\n\t\tt.Log(\"Skipping sim since -sim not passed\")\n\t\tt.Skip()\n\t}\n\n\tconst (\n\t\tperSecond = 1000\n\t\ttestSeconds = 1\n\t\titerations = perSecond * testSeconds\n\t)\n\n\twg := sync.WaitGroup{}\n\n\tc := Capped{cap: 10}\n\n\tfor i := 0; i < 2; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tinner := sync.WaitGroup{}\n\n\t\t\tlimiter := New(Config{\n\t\t\t\tCapacity: 100,\n\t\t\t\tMaxLimit: 20,\n\t\t\t})\n\n\t\t\tsuccess := int64(0)\n\n\t\t\tfor i := 0; i < iterations; i++ {\n\t\t\t\ttime.Sleep(msToWait(perSecond))\n\t\t\t\tinner.Add(1)\n\n\t\t\t\tgo func() {\n\t\t\t\t\tdefer inner.Done()\n\n\t\t\t\t\terr := limiter.Acquire(context.Background(), 0)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tdefer limiter.Release()\n\n\t\t\t\t\terr = c.Lock()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlimiter.Backoff()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tdefer c.Unlock()\n\n\t\t\t\t\ttime.Sleep(msToWait(100))\n\t\t\t\t\tatomic.AddInt64(&success, 1)\n\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\t// Wait for the inner loop to finish\n\t\t\tinner.Wait()\n\n\t\t\tt.Logf(\"limit=%d, success=%f\", limiter.limit, (float64(success) / iterations))\n\n\t\t}()\n\t}\n\n\twg.Wait()\n\n}", "func (f *OrganismFactory) PassivateObject(ctx context.Context, object *pool.PooledObject) error {\n\t// log.Printf(\"%v\", string(debug.Stack()))\n\torganism := object.Object.(*Organism)\n\tif organism.Instructions != nil {\n\t\tfor i := 0; i < len(organism.Instructions); i++ {\n\t\t\torganism.Instructions[i] = nil\n\t\t}\n\t\torganism.Instructions = organism.Instructions[:0]\n\t}\n\torganism.AffectedAreas = organism.AffectedAreas[:0]\n\torganism.Diff = -1\n\torganism.hash = \"\"\n\torganism.Parent = nil\n\torganism.Patch = nil\n\torganism.diffMap = nil\n\treturn nil\n}", "func (vms *vmSupervisor) sleep() stateFunc {\n\ttime.Sleep(time.Second)\n\tif time.Since(vms.collectedAt) > 2*time.Minute {\n\t\treturn vms.collectVMs\n\t}\n\treturn vms.scaleUp\n}", "func TestManagerRestart(t *testing.T) {\n\tts := memorytopo.NewServer(\"cell1\")\n\tm := NewManager(ts)\n\n\t// Run the manager in the background.\n\twg, _, cancel := StartManager(m)\n\n\t// Create a Sleep job.\n\tuuid, err := m.Create(context.Background(), sleepFactoryName, []string{\"--duration\", \"60\"})\n\tif err != nil {\n\t\tt.Fatalf(\"cannot create sleep workflow: %v\", err)\n\t}\n\n\t// Start the job.\n\tif err := m.Start(context.Background(), uuid); err != nil {\n\t\tt.Fatalf(\"cannot start sleep workflow: %v\", err)\n\t}\n\n\t// Stop the manager.\n\tcancel()\n\twg.Wait()\n\t// Recreate the manager imitating restart.\n\tm = NewManager(ts)\n\n\t// Make sure the workflow is still in the topo server. This\n\t// validates that interrupting the Manager leaves the jobs in\n\t// the right state in the topo server.\n\twi, err := ts.GetWorkflow(context.Background(), uuid)\n\tif err != nil {\n\t\tt.Fatalf(\"cannot read workflow %v: %v\", uuid, err)\n\t}\n\tif wi.State != workflowpb.WorkflowState_Running {\n\t\tt.Fatalf(\"unexpected workflow state %v was expecting %v\", wi.State, workflowpb.WorkflowState_Running)\n\t}\n\n\t// Restart the manager.\n\twg, _, cancel = StartManager(m)\n\n\t// Make sure the job is in there shortly.\n\ttimeout := 0\n\tfor {\n\t\ttree, err := m.NodeManager().GetFullTree()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"cannot get full node tree: %v\", err)\n\t\t}\n\t\tif strings.Contains(string(tree), uuid) {\n\t\t\tbreak\n\t\t}\n\t\ttimeout++\n\t\tif timeout == 1000 {\n\t\t\tt.Fatalf(\"failed to wait for full node tree to appear: %v\", string(tree))\n\t\t}\n\t\ttime.Sleep(time.Millisecond)\n\t}\n\n\t// Stop the job. Note Stop() waits until the background go\n\t// routine that saves the job is done, so when we return from\n\t// this call, the job is saved with the right updated State\n\t// inside the topo server.\n\tif err := m.Stop(context.Background(), uuid); err != nil {\n\t\tt.Fatalf(\"cannot stop sleep workflow: %v\", err)\n\t}\n\n\t// And stop the manager.\n\tcancel()\n\twg.Wait()\n\n\t// Make sure the workflow is stopped in the topo server.\n\twi, err = ts.GetWorkflow(context.Background(), uuid)\n\tif err != nil {\n\t\tt.Fatalf(\"cannot read workflow %v: %v\", uuid, err)\n\t}\n\tif wi.State != workflowpb.WorkflowState_Done {\n\t\tt.Fatalf(\"unexpected workflow state %v was expecting %v\", wi.State, workflowpb.WorkflowState_Running)\n\t}\n\tif !strings.Contains(wi.Error, \"canceled\") {\n\t\tt.Errorf(\"invalid workflow error: %v\", wi.Error)\n\t}\n}", "func (mixerService *MixerService) CyclePool() {\n\tfmt.Println(\"\\nCycling Pool\")\n\t// check pathway deposit addresses for new deposits\n\t// move empty deposit addresses to pool && update pathway debt amount (with 1% cut)\n\tfmt.Println(\"\\n\\nChecking deposit addresses for new deposits\")\n\tmixerService.checkAndEmptyDepositAddresses()\n\n\tfmt.Println(\"\\n\\nPruning Pool (moving funds to members)\")\n\t// go through all pathways with outstanding debt, and \"prune\" each pathway\n\tmixerService.prunePathwayDebt()\n}", "func (s *State) StartSimulate(actions <-chan Action) <-chan MarshalledState {\n\tch := make(chan MarshalledState)\n\n\tgo func() {\n\t\t// We want to run the thing every 500 milliseconds\n\t\ttick := time.NewTicker(time.Millisecond * 200)\n\t\tdefer tick.Stop()\n\n\t\tfor {\n\t\t\ts.simulateTick()\n\n\t\t\tif !s.diff.isEmpty() {\n\t\t\t\tmd, err := json.Marshal(s.diff)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t}\n\t\t\t\tms, err := json.Marshal(s.state)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tspores := []spore{}\n\t\t\t\tif s.diff.Spores != nil {\n\t\t\t\t\tspores = s.diff.Spores\n\t\t\t\t}\n\n\t\t\t\ts.diff = diff{[]tileDiff{}, map[string]*Species{}, []string{}, spores}\n\t\t\t\tch <- MarshalledState{ms, md}\n\t\t\t}\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-tick.C:\n\t\t\t\tcase action := <-actions:\n\t\t\t\t\ts.handleAction(&action)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}" ]
[ "0.57773536", "0.5751083", "0.55268425", "0.5509779", "0.5475696", "0.54533315", "0.5447968", "0.5350582", "0.5280517", "0.52645826", "0.5243615", "0.52426934", "0.5230957", "0.5226076", "0.5213898", "0.51857793", "0.51736426", "0.51404357", "0.51184314", "0.5090954", "0.50567055", "0.5032514", "0.5031977", "0.49988925", "0.4998405", "0.49888527", "0.49864188", "0.49675056", "0.49618846", "0.49607167", "0.49587235", "0.4954876", "0.4949031", "0.4942905", "0.49250564", "0.49179924", "0.4915724", "0.49061608", "0.48988003", "0.48947293", "0.4893795", "0.48852864", "0.4884091", "0.48766452", "0.48721495", "0.48496753", "0.4848659", "0.48449057", "0.48412246", "0.48386306", "0.483495", "0.48298907", "0.48250258", "0.48248497", "0.48201945", "0.48184562", "0.48138624", "0.48093283", "0.47930488", "0.47927076", "0.4790907", "0.4790331", "0.4771603", "0.47567093", "0.4755249", "0.47531044", "0.47530866", "0.47505587", "0.47479773", "0.4747767", "0.47454697", "0.47412363", "0.47395095", "0.47322482", "0.47275227", "0.47227937", "0.47196144", "0.4717086", "0.47104865", "0.47091946", "0.46999907", "0.46993658", "0.46953654", "0.469473", "0.4693963", "0.46910816", "0.46880803", "0.46771294", "0.46746072", "0.46658847", "0.46653807", "0.46598715", "0.46578276", "0.46560484", "0.46523526", "0.46501845", "0.4647511", "0.4646587", "0.46448326", "0.4637889" ]
0.6317484
0
NewMockFetcher creates a new mock instance
func NewMockFetcher(ctrl *gomock.Controller) *MockFetcher { mock := &MockFetcher{ctrl: ctrl} mock.recorder = &MockFetcherMockRecorder{mock} return mock }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func newTestFetcher() *FetcherTest {\n\treturn &FetcherTest{}\n}", "func New() *Mock {\n\treturn &Mock{\n\t\tm: mockMap{},\n\t\toldTransport: http.DefaultTransport,\n\t}\n}", "func (c *Crawler) newFetcher(height uint64) {\n\t\n\t// Stop previous fetcher\n\tif c.fetcherStop != nil {\n\t\tc.fetcherStop <- true\n\t}\n\t\n\t// Both channels to be closed by fetcher task\n\tc.fetcherStop = make(chan bool)\n\tc.fetcherBlocks = make(chan blockRecord, FetcherBlockBufferSize)\n\n\t//\n\tgo fetcher(c.rpcConfig, height, c.fetcherBlocks, c.fetcherStop)\n}", "func (e *Exporter) newFetcher(hostname string) *Fetcher {\n\treturn NewFetcher(hostname, e.chAccessInfo.Username, e.chAccessInfo.Password, e.chAccessInfo.Port)\n}", "func NewMock(t *testing.T) *MockT { return &MockT{t: t} }", "func NewForge(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *Forge {\n\tmock := &Forge{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newFetcher(conn *libnet.Conn) *fetcher {\n\tf := &fetcher{\n\t\tconn: conn,\n\t\tbr: bufio.NewReader(conn, bufio.Get(4096)),\n\t\tbw: bufio.NewWriter(conn),\n\t}\n\treturn f\n}", "func NewMockObject(uid, name, ns string, res api.Resource) api.Object {\n\treturn NewObject(uuid.NewFromString(uid), name, ns, res)\n}", "func New() (*mock, error) {\n\treturn &mock{\n\t\tConfigService: ConfigService{},\n\t\tContainerService: ContainerService{},\n\t\tDistributionService: DistributionService{},\n\t\tImageService: ImageService{},\n\t\tNetworkService: NetworkService{},\n\t\tNodeService: NodeService{},\n\t\tPluginService: PluginService{},\n\t\tSecretService: SecretService{},\n\t\tServiceService: ServiceService{},\n\t\tSystemService: SystemService{},\n\t\tSwarmService: SwarmService{},\n\t\tVolumeService: VolumeService{},\n\t\tVersion: Version,\n\t}, nil\n}", "func NewMock() Cache {\n\treturn &mock{}\n}", "func NewMock(now time.Time) *Mock {\n\treturn &Mock{\n\t\tnow: now,\n\t\tmockTimers: &timerHeap{},\n\t}\n}", "func NewRequester(t mockConstructorTestingTNewRequester) *Requester {\n\tmock := &Requester{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func CreateMock(method interface{}, url interface{}, headers interface{}, body interface{}) *go_mock_yourself_http.Mock {\n\tmockRequest := new(go_mock_yourself_http.Request)\n\n\tif method != nil {\n\t\tmockRequest.SetMethod(method)\n\t}\n\n\tif url != nil {\n\t\tmockRequest.SetUrl(url)\n\t}\n\n\tif body != nil {\n\t\tmockRequest.SetBody(body)\n\t}\n\n\tif headers != nil {\n\t\tmockRequest.SetHeaders(headers)\n\t}\n\n\tmockResponse := new(go_mock_yourself_http.Response)\n\tmockResponse.SetStatusCode(222)\n\tmockResponse.SetBody(\"i'm a cute loving mock, almost as cute as mumi, bichi and rasti\")\n\n\tmock, _ := go_mock_yourself_http.NewMock(\"my lovely testing mock\", mockRequest, mockResponse)\n\treturn mock\n}", "func New(client client.Client, namespace string) *fakeManager {\n\treturn &fakeManager{\n\t\tclient: client,\n\t\tnamespace: namespace,\n\t}\n}", "func New(t *testing.T, requests []ExpectedRequest) *httptest.Server {\n\th := mockHandler(t, requests)\n\treturn httptest.NewServer(h)\n}", "func New(t *testing.T, requests []ExpectedRequest) *httptest.Server {\n\th := mockHandler(t, requests)\n\treturn httptest.NewServer(h)\n}", "func NewFetcher(ASlist []string) *Fetcher {\n\treturn &Fetcher{\n\t\tASNs: ASlist,\n\t}\n\n}", "func NewMock() *Mock {\n\tc := &Mock{\n\t\tFakeIncoming: func() chan []byte {\n\t\t\treturn make(chan []byte, 2)\n\t\t},\n\t\tFakeName: func() string {\n\t\t\treturn \"TestClient\"\n\t\t},\n\t\tFakeGame: func() string {\n\t\t\treturn \"test\"\n\t\t},\n\t\tFakeClose: func() {\n\t\t\t// Do nothing\n\t\t},\n\t\tFakeStopTimer: func() {\n\t\t\t// Do nothing\n\t\t},\n\t\tFakeRoom: func() interfaces.Room {\n\t\t\treturn nil\n\t\t},\n\t\tFakeSetRoom: func(interfaces.Room) {\n\n\t\t},\n\t}\n\n\tc.FakeWritePump = func() {\n\t\tfor range c.Incoming() {\n\t\t\t// Do nothing\n\t\t}\n\t}\n\n\tc.FakeSetName = func(string) interfaces.Client {\n\t\treturn c\n\t}\n\treturn c\n}", "func NewFetcher() *Fetcher {\n\treturn new(Fetcher)\n}", "func NewMock(response string) *Operator {\n\treturn &Operator{cli: client.NewMock(response)}\n}", "func NewMock() *Mock {\n\treturn &Mock{\n\t\tData: MockData{\n\t\t\tUptime: true,\n\t\t\tFile: true,\n\t\t\tTCPResponse: true,\n\t\t\tHTTPStatus: true,\n\t\t},\n\t}\n}", "func NewFetcher(cluster ClusterSource, rpc RPCClient) *Fetcher {\n\treturn &Fetcher{\n\t\tcluster: cluster,\n\t\trpc: rpc,\n\t}\n}", "func newLightFetcher(h *clientHandler) *lightFetcher {\n\tf := &lightFetcher{\n\t\thandler: h,\n\t\tchain: h.backend.blockchain,\n\t\tpeers: make(map[*peer]*fetcherPeerInfo),\n\t\tdeliverChn: make(chan fetchResponse, 100),\n\t\trequested: make(map[uint64]fetchRequest),\n\t\ttimeoutChn: make(chan uint64),\n\t\trequestTrigger: make(chan struct{}, 1),\n\t\tsyncDone: make(chan *peer),\n\t\tcloseCh: make(chan struct{}),\n\t\tmaxConfirmedTd: big.NewInt(0),\n\t}\n\th.backend.peers.notify(f)\n\n\tf.wg.Add(1)\n\tgo f.syncLoop()\n\treturn f\n}", "func NewMock(r ...MockResponse) *http.Client {\n\treturn &http.Client{\n\t\tTransport: newRoundTripper(r...),\n\t}\n}", "func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client {\n\treturn &http.Client{\n\t\tTransport: transportFunc(doer),\n\t}\n}", "func NewFetcher(concurrency int, waitTime int) (f *Fetcher) {\n\tf = &Fetcher{concurrency, waitTime}\n\treturn\n}", "func NewMock() *Mock {\n\treturn &Mock{now: time.Unix(0, 0)}\n}", "func NewFetcher(client LogClient, opts *FetcherOptions) *Fetcher {\n\tcancel := func() {} // Protect against calling Stop before Run.\n\treturn &Fetcher{\n\t\turi: client.BaseURI(),\n\t\tclient: client,\n\t\topts: opts,\n\t\tcancel: cancel,\n\t}\n}", "func (f *FactoryFake) New(address string) (client.Interface, error) {\n\tc, _ := f.Clients[address]\n\treturn c, nil\n}", "func (o *FakeObject) New(args ...interface{}) Object { return o.Invoke(args) }", "func newFakeReconciler(initObjects ...runtime.Object) *ReconcileMachineRemediation {\n\tfakeClient := fake.NewFakeClient(initObjects...)\n\tremediator := &FakeRemedatior{}\n\treturn &ReconcileMachineRemediation{\n\t\tclient: fakeClient,\n\t\tremediator: remediator,\n\t\tnamespace: consts.NamespaceOpenshiftMachineAPI,\n\t}\n}", "func New(\n\tmid module.MID,\n\tclient *http.Client,\n\tscoreCalculator module.CalculateScore,\n\tmaxThread int) (downloader module.Downloader, yierr *constant.YiError) {\n\tmoduleBase, yierr := stub.NewModuleInternal(mid, scoreCalculator)\n\t//check whether the args are vaild\n\tif yierr != nil {\n\t\treturn\n\t}\n\tif client == nil {\n\t\tyierr = constant.NewYiErrorf(constant.ERR_NEW_DOWNLOADER_FAIL, \"Client is nil.\")\n\t\treturn\n\t}\n\n\treturn &myDownloader{\n\t\tModuleInternal: moduleBase,\n\t\thttpClient: client,\n\t\tPool: *pool.NewPool(maxThread),\n\t}, nil\n}", "func NewMock(path string, nodes uint, replicas uint, vbuckets uint, specs ...BucketSpec) (m *Mock, err error) {\n\tvar lsn *net.TCPListener\n\tchAccept := make(chan bool)\n\tm = &Mock{}\n\n\tdefer func() {\n\t\tclose(chAccept)\n\t\tif lsn != nil {\n\t\t\tif err := lsn.Close(); err != nil {\n\t\t\t\tlog.Printf(\"Failed to close listener: %v\", err)\n\t\t\t}\n\t\t}\n\t\texc := recover()\n\n\t\tif exc == nil {\n\t\t\t// No errors, everything is OK\n\t\t\treturn\n\t\t}\n\n\t\t// Close mock on error, destroying resources\n\t\tm.Close()\n\t\tif mExc, ok := exc.(mockError); !ok {\n\t\t\tpanic(mExc)\n\t\t} else {\n\t\t\tm = nil\n\t\t\terr = mExc\n\t\t}\n\t}()\n\n\tif lsn, err = net.ListenTCP(\"tcp\", &net.TCPAddr{Port: 0}); err != nil {\n\t\tthrowMockError(\"Couldn't set up listening socket\", err)\n\t}\n\t_, ctlPort, err := net.SplitHostPort(lsn.Addr().String())\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to split host and port: %v\", err)\n\t}\n\tlog.Printf(\"Listening for control connection at %s\\n\", ctlPort)\n\n\tgo func() {\n\t\tvar err error\n\n\t\tdefer func() {\n\t\t\tchAccept <- false\n\t\t}()\n\t\tif m.conn, err = lsn.Accept(); err != nil {\n\t\t\tthrowMockError(\"Couldn't accept incoming control connection from mock\", err)\n\t\t\treturn\n\t\t}\n\t}()\n\n\tif len(specs) == 0 {\n\t\tspecs = []BucketSpec{{Name: \"default\", Type: BCouchbase}}\n\t}\n\n\toptions := []string{\n\t\t\"-jar\", path, \"--harakiri-monitor\", \"localhost:\" + ctlPort, \"--port\", \"0\",\n\t\t\"--replicas\", strconv.Itoa(int(replicas)),\n\t\t\"--vbuckets\", strconv.Itoa(int(vbuckets)),\n\t\t\"--nodes\", strconv.Itoa(int(nodes)),\n\t\t\"--buckets\", m.buildSpecStrings(specs),\n\t}\n\n\tlog.Printf(\"Invoking java %s\", strings.Join(options, \" \"))\n\tm.cmd = exec.Command(\"java\", options...)\n\n\tm.cmd.Stdout = os.Stdout\n\tm.cmd.Stderr = os.Stderr\n\n\tif err = m.cmd.Start(); err != nil {\n\t\tm.cmd = nil\n\t\tthrowMockError(\"Couldn't start command\", err)\n\t}\n\n\tselect {\n\tcase <-chAccept:\n\t\tbreak\n\n\tcase <-time.After(mockInitTimeout):\n\t\tthrowMockError(\"Timed out waiting for initialization\", errors.New(\"timeout\"))\n\t}\n\n\tm.rw = bufio.NewReadWriter(bufio.NewReader(m.conn), bufio.NewWriter(m.conn))\n\n\t// Read the port buffer, which is delimited by a NUL byte\n\tif portBytes, err := m.rw.ReadBytes(0); err != nil {\n\t\tthrowMockError(\"Couldn't get port information\", err)\n\t} else {\n\t\tportBytes = portBytes[:len(portBytes)-1]\n\t\tif entryPort, err := strconv.Atoi(string(portBytes)); err != nil {\n\t\t\tthrowMockError(\"Incorrectly formatted port from mock\", err)\n\t\t} else {\n\t\t\tm.EntryPort = uint16(entryPort)\n\t\t}\n\t}\n\n\tlog.Printf(\"Mock HTTP port at %d\\n\", m.EntryPort)\n\treturn\n}", "func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client {\n\tv := &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout: 30 * time.Second,\n\t\t\tKeepAlive: 30 * time.Second,\n\t\t\tDualStack: true,\n\t\t}).DialContext,\n\t\tMaxIdleConns: 100,\n\t\tIdleConnTimeout: 90 * time.Second,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tExpectContinueTimeout: 1 * time.Second,\n\t}\n\tv.RegisterProtocol(\"http\", transportFunc(doer))\n\treturn &http.Client{\n\t\tTransport: http.RoundTripper(v),\n\t}\n}", "func New(cfg *Config,\n\tapiManager apimanager.Provider,\n\tlogger logger.Logger, registerer prometheus.Registerer) (Provider, error) {\n\tservice := &MockServer{\n\t\tcfg: cfg,\n\t\tregisterer: registerer,\n\t\tapiManager: apiManager,\n\t\tLogger: logger.NewLogger(\"httpMockServer\"),\n\t}\n\treturn service, nil\n}", "func newFactory() func(config *client.Config) (client.Client, *probe.Error) {\n\tclientCache := make(map[uint32]minio.CloudStorageAPI)\n\tmutex := &sync.Mutex{}\n\n\t// Return New function.\n\treturn func(config *client.Config) (client.Client, *probe.Error) {\n\t\tu := client.NewURL(config.HostURL)\n\t\ttransport := http.DefaultTransport\n\t\tif config.Debug == true {\n\t\t\tif config.Signature == \"S3v4\" {\n\t\t\t\ttransport = httptracer.GetNewTraceTransport(NewTraceV4(), http.DefaultTransport)\n\t\t\t}\n\t\t\tif config.Signature == \"S3v2\" {\n\t\t\t\ttransport = httptracer.GetNewTraceTransport(NewTraceV2(), http.DefaultTransport)\n\t\t\t}\n\t\t}\n\n\t\t// New S3 configuration.\n\t\ts3Conf := minio.Config{\n\t\t\tAccessKeyID: config.AccessKey,\n\t\t\tSecretAccessKey: config.SecretKey,\n\t\t\tTransport: transport,\n\t\t\tEndpoint: u.Scheme + u.SchemeSeparator + u.Host,\n\t\t\tSignature: func() minio.SignatureType {\n\t\t\t\tif config.Signature == \"S3v2\" {\n\t\t\t\t\treturn minio.SignatureV2\n\t\t\t\t}\n\t\t\t\treturn minio.SignatureV4\n\t\t\t}(),\n\t\t}\n\n\t\ts3Conf.SetUserAgent(config.AppName, config.AppVersion, config.AppComments...)\n\n\t\t// Generate a hash out of s3Conf.\n\t\tconfHash := fnv.New32a()\n\t\tconfHash.Write([]byte(s3Conf.Endpoint + s3Conf.AccessKeyID + s3Conf.SecretAccessKey))\n\t\tconfSum := confHash.Sum32()\n\n\t\t// Lookup previous cache by hash.\n\t\tmutex.Lock()\n\t\tdefer mutex.Unlock()\n\t\tvar api minio.CloudStorageAPI\n\t\tfound := false\n\t\tif api, found = clientCache[confSum]; !found {\n\t\t\t// Not found. Instantiate a new minio client.\n\t\t\tvar e error\n\t\t\tapi, e = minio.New(s3Conf)\n\t\t\tif e != nil {\n\t\t\t\treturn nil, probe.NewError(e)\n\t\t\t}\n\t\t\t// Cache the new minio client with hash of config as key.\n\t\t\tclientCache[confSum] = api\n\t\t}\n\n\t\ts3Clnt := &s3Client{\n\t\t\tmu: new(sync.Mutex),\n\t\t\tapi: api,\n\t\t\thostURL: u,\n\t\t\tvirtualStyle: isVirtualHostStyle(u.Host),\n\t\t}\n\t\treturn s3Clnt, nil\n\t}\n}", "func NewMock(middleware []Middleware) OrganizationService {\n\tvar svc OrganizationService = NewBasicOrganizationServiceServiceMock()\n\tfor _, m := range middleware {\n\t\tsvc = m(svc)\n\t}\n\treturn svc\n}", "func NewFetcher(ctx context.Context, bsrv bserv.BlockService, bv consensus.BlockSyntaxValidator) *Fetcher {\n\treturn &Fetcher{\n\t\tsession: bserv.NewSession(ctx, bsrv),\n\t\tvalidator: bv,\n\t}\n}", "func NewMock(serverHost string) (*MockClient, error) {\n\treturn &MockClient{}, nil\n}", "func (cfg FetcherConfig) New() *Fetcher {\n\treturn &Fetcher{\n\t\tValidator: cfg.Validator,\n\t\tSplitter: cfg.Splitter,\n\t\tResolver: NewResolver(cfg.PathDB, cfg.RevCache, cfg.LocalInfo),\n\t\tRequester: &DefaultRequester{API: cfg.RequestAPI, DstProvider: cfg.DstProvider},\n\t\tReplyHandler: &seghandler.Handler{\n\t\t\tVerifier: &seghandler.DefaultVerifier{Verifier: cfg.VerificationFactory.NewVerifier()},\n\t\t\tStorage: &seghandler.DefaultStorage{PathDB: cfg.PathDB, RevCache: cfg.RevCache},\n\t\t},\n\t\tPathDB: cfg.PathDB,\n\t\tQueryInterval: cfg.QueryInterval,\n\t\tNextQueryCleaner: NextQueryCleaner{PathDB: cfg.PathDB},\n\t\tCryptoLookupAtLocalCS: cfg.SciondMode,\n\t\tmetrics: metrics.NewFetcher(cfg.MetricsNamespace),\n\t}\n}", "func newMockTransport() *mockTransport {\n\treturn &mockTransport{\n\t\turlToResponseAndError: make(map[string]mockTransportResponse),\n\t\trequestURLsReceived: make([]string, 0),\n\t}\n}", "func StubNew(cfg CacheConfig) *Cache {\n\tshared := stubnewShared(cfg)\n\treturn &Cache{\n\t\tshared: shared,\n\t\taccounts: &accountService{shared: shared},\n\t\tconfig: &configService{shared: shared},\n\t}\n}", "func MakeFetcher(procName, portFlag string) cluster.Fetcher {\n\treturn &localFetcher{\n\t\tprocName: procName,\n\t\taddrFlag: portFlag,\n\t\tre: regexp.MustCompile(fmt.Sprintf(\"%s.*%s(?: +|=)([^ ]*)\", procName, portFlag)),\n\t}\n}", "func newAWSFetcher(cfg awsFetcherConfig, plugin awsFetcherPlugin) (*awsFetcher, error) {\n\tif err := cfg.CheckAndSetDefaults(plugin.ComponentShortName()); err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\treturn &awsFetcher{cfg: cfg, plugin: plugin}, nil\n}", "func newMockNetworks() (*MockNetwork, *MockNetwork) {\n\tc := mockCon.NewConn()\n\treturn &MockNetwork{c.Client}, &MockNetwork{c.Server}\n}", "func NewDigestHolderMock(t minimock.Tester) *DigestHolderMock {\n\tm := &DigestHolderMock{t: t}\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.AsByteStringMock = mDigestHolderMockAsByteString{mock: m}\n\n\tm.AsBytesMock = mDigestHolderMockAsBytes{mock: m}\n\n\tm.CopyOfDigestMock = mDigestHolderMockCopyOfDigest{mock: m}\n\n\tm.EqualsMock = mDigestHolderMockEquals{mock: m}\n\tm.EqualsMock.callArgs = []*DigestHolderMockEqualsParams{}\n\n\tm.FixedByteSizeMock = mDigestHolderMockFixedByteSize{mock: m}\n\n\tm.FoldToUint64Mock = mDigestHolderMockFoldToUint64{mock: m}\n\n\tm.GetDigestMethodMock = mDigestHolderMockGetDigestMethod{mock: m}\n\n\tm.ReadMock = mDigestHolderMockRead{mock: m}\n\tm.ReadMock.callArgs = []*DigestHolderMockReadParams{}\n\n\tm.SignWithMock = mDigestHolderMockSignWith{mock: m}\n\tm.SignWithMock.callArgs = []*DigestHolderMockSignWithParams{}\n\n\tm.WriteToMock = mDigestHolderMockWriteTo{mock: m}\n\tm.WriteToMock.callArgs = []*DigestHolderMockWriteToParams{}\n\n\treturn m\n}", "func newFetch(g *Goproxy, name, tempDir string) (*fetch, error) {\n\tf := &fetch{\n\t\tg: g,\n\t\tname: name,\n\t\ttempDir: tempDir,\n\t}\n\n\tvar escapedModulePath string\n\tif strings.HasSuffix(name, \"/@latest\") {\n\t\tescapedModulePath = strings.TrimSuffix(name, \"/@latest\")\n\t\tf.ops = fetchOpsResolve\n\t\tf.moduleVersion = \"latest\"\n\t\tf.contentType = \"application/json; charset=utf-8\"\n\t} else if strings.HasSuffix(name, \"/@v/list\") {\n\t\tescapedModulePath = strings.TrimSuffix(name, \"/@v/list\")\n\t\tf.ops = fetchOpsList\n\t\tf.moduleVersion = \"latest\"\n\t\tf.contentType = \"text/plain; charset=utf-8\"\n\t} else {\n\t\tnameParts := strings.SplitN(name, \"/@v/\", 2)\n\t\tif len(nameParts) != 2 {\n\t\t\treturn nil, errors.New(\"missing /@v/\")\n\t\t}\n\n\t\tescapedModulePath = nameParts[0]\n\n\t\tnameExt := path.Ext(nameParts[1])\n\t\tescapedModuleVersion := strings.TrimSuffix(\n\t\t\tnameParts[1],\n\t\t\tnameExt,\n\t\t)\n\t\tswitch nameExt {\n\t\tcase \".info\":\n\t\t\tf.ops = fetchOpsDownloadInfo\n\t\t\tf.contentType = \"application/json; charset=utf-8\"\n\t\tcase \".mod\":\n\t\t\tf.ops = fetchOpsDownloadMod\n\t\t\tf.contentType = \"text/plain; charset=utf-8\"\n\t\tcase \".zip\":\n\t\t\tf.ops = fetchOpsDownloadZip\n\t\t\tf.contentType = \"application/zip\"\n\t\tcase \"\":\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"no file extension in filename %q\",\n\t\t\t\tescapedModuleVersion,\n\t\t\t)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"unexpected extension %q\",\n\t\t\t\tnameExt,\n\t\t\t)\n\t\t}\n\n\t\tvar err error\n\t\tf.moduleVersion, err = module.UnescapeVersion(\n\t\t\tescapedModuleVersion,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif f.moduleVersion == \"latest\" {\n\t\t\treturn nil, errors.New(\"invalid version\")\n\t\t} else if !semver.IsValid(f.moduleVersion) {\n\t\t\tif f.ops == fetchOpsDownloadInfo {\n\t\t\t\tf.ops = fetchOpsResolve\n\t\t\t} else {\n\t\t\t\treturn nil, errors.New(\"unrecognized version\")\n\t\t\t}\n\t\t}\n\t}\n\n\tvar err error\n\tf.modulePath, err = module.UnescapePath(escapedModulePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tf.modAtVer = fmt.Sprint(f.modulePath, \"@\", f.moduleVersion)\n\tf.requiredToVerify = g.goBinEnvGOSUMDB != \"off\" &&\n\t\t!globsMatchPath(g.goBinEnvGONOSUMDB, f.modulePath)\n\n\treturn f, nil\n}", "func NewFake() *FakeTracker {\n\tfake := FakeTracker{}\n\tfake.Tracker = &Tracker{\n\t\thadError: false,\n\t\toutput: &fake.Buffer,\n\t}\n\treturn &fake\n}", "func New() *Prober {\n\treturn newForTest(time.Now, newRealTicker)\n}", "func NewFetcher(sugar *zap.SugaredLogger,\n\tstorage rrstorage.Interface,\n\tcrawler *crawler.ReserveRatesCrawler,\n\tlastBlockResolver lastblockdaily.Interface,\n\tethUSDRate tokenrate.ETHUSDRateProvider,\n\tretryDelay, sleepTime time.Duration,\n\tretryAttempts int,\n\taddressClient client.Interface,\n\toptions ...Option) (*Fetcher, error) {\n\n\tfetcher := &Fetcher{\n\t\tsugar: sugar,\n\t\tstorage: storage,\n\t\tcrawler: crawler,\n\t\tlastBlockResolver: lastBlockResolver,\n\t\tretryDelayTime: retryDelay,\n\t\tsleepTime: sleepTime,\n\t\tretryAttempts: retryAttempts,\n\t\tethUSDRateFetcher: ethUSDRate,\n\t\tmutex: &sync.Mutex{},\n\t\tfailed: false,\n\t\taddressClient: addressClient,\n\t}\n\tfor _, opt := range options {\n\t\topt(fetcher)\n\t}\n\n\treturn fetcher, nil\n}", "func NewRequester4(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *Requester4 {\n\tmock := &Requester4{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newHandler(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *handler {\n\tmock := &handler{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newFileFetcher() *fileFetcherExtender {\n\treturn &fileFetcherExtender{new(DefaultExtender)}\n}", "func Mock(fake string) func() {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\torigin := backend\n\tbackend = fake\n\treturn func() { Mock(origin) }\n}", "func newKeyServerWithMocking(user upspin.UserName, ref string, data []byte) (*server, *storagetest.ExpectDownloadCapturePut) {\n\tmockGCP := &storagetest.ExpectDownloadCapturePut{\n\t\tRef: []string{ref},\n\t\tData: [][]byte{data},\n\t\tPutContents: make([][]byte, 0, 1),\n\t\tPutRef: make([]string, 0, 1),\n\t}\n\ts := &server{\n\t\tstorage: mockGCP,\n\t\tuser: user,\n\t\tlookupTXT: mockLookupTXT,\n\t\tlogger: &noopLogger{},\n\t\tcache: cache.NewLRU(10),\n\t\tnegCache: cache.NewLRU(10),\n\t}\n\treturn s, mockGCP\n}", "func New() *MockLibvirt {\n\tserv, conn := net.Pipe()\n\n\tm := &MockLibvirt{\n\t\tConn: conn,\n\t\tTest: serv,\n\t}\n\n\tgo m.handle(serv)\n\n\treturn m\n}", "func newMockSubscriber() mockSubscriber {\n\treturn mockSubscriber{}\n}", "func New(t *testing.T) *Mocker {\n\tm := &Mocker{\n\t\thandlers: make(map[string]map[string][]Handler, 1),\n\t\tmut: new(sync.Mutex),\n\t\tt: t,\n\t}\n\n\tm.Server = httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tm.mut.Lock()\n\t\tdefer m.mut.Unlock()\n\n\t\tpath := strings.TrimRight(r.URL.Path, \"/\")\n\n\t\tmethHandlers, ok := m.handlers[path]\n\t\tif !assert.True(t, ok, \"unhandled path '\"+path+\"'\") {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\th, ok := methHandlers[r.Method]\n\t\tif !assert.True(t, ok, \"unhandled method '\"+r.Method+\"' on path '\"+path+\"'\") {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\th[0].ServeHTTP(w, r)\n\n\t\tif len(h) == 1 { // this is the only handler for this method\n\t\t\tif len(methHandlers) == 1 { // the current method is the only method for this path\n\t\t\t\tdelete(m.handlers, path)\n\t\t\t} else { // there are other methods for this path\n\t\t\t\tdelete(m.handlers[path], r.Method)\n\t\t\t}\n\t\t} else { // there are multiple handlers for this method\n\t\t\tm.handlers[path][r.Method] = m.handlers[path][r.Method][1:]\n\t\t}\n\t}))\n\n\tm.Server.StartTLS()\n\n\tm.Client = &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDialContext: func(ctx context.Context, network, _ string) (conn net.Conn, err error) {\n\t\t\t\treturn net.Dial(network, m.Server.Listener.Addr().String())\n\t\t\t},\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn m\n}", "func New(fetcherConfig *config.MetainfoFetcherConfig) (fetcher *MetainfoFetcher, err error) {\n\tclientConfig := torrent.Config{}\n\t// Well, it seems this is the right way to convert speed -> rate.Limiter\n\t// https://github.com/anacrolix/torrent/blob/master/cmd/torrent/main.go\n\tif fetcherConfig.UploadRateLimiter != -1 {\n\t\tclientConfig.UploadRateLimiter = rate.NewLimiter(rate.Limit(fetcherConfig.UploadRateLimiter*1024), 256<<10)\n\t}\n\tif fetcherConfig.DownloadRateLimiter != -1 {\n\t\tclientConfig.DownloadRateLimiter = rate.NewLimiter(rate.Limit(fetcherConfig.DownloadRateLimiter*1024), 1<<20)\n\t}\n\n\tclient, err := torrent.NewClient(&clientConfig)\n\n\tfetcher = &MetainfoFetcher{\n\t\ttorrentClient: client,\n\t\tresults: make(chan Result, fetcherConfig.QueueSize),\n\t\tqueueSize: fetcherConfig.QueueSize,\n\t\ttimeout: fetcherConfig.Timeout,\n\t\tmaxDays: fetcherConfig.MaxDays,\n\t\tnewTorrentsOnly: fetcherConfig.FetchNewTorrentsOnly,\n\t\tbaseFailCooldown: fetcherConfig.BaseFailCooldown,\n\t\tmaxFailCooldown: fetcherConfig.MaxFailCooldown,\n\t\tdone: make(chan int, 1),\n\t\tfailedOperations: make(map[uint]time.Time),\n\t\tnumFails: make(map[uint]int),\n\t\twakeUp: time.NewTicker(time.Second * time.Duration(fetcherConfig.WakeUpInterval)),\n\t}\n\n\treturn\n}", "func New(f Func) *Memo {\n\tmemo := &Memo{requests: make(chan request)}\n\tgo memo.server(f)\n\treturn memo\n}", "func New(config *config.Config, fetcher *fetcher.Fetcher) Handler {\n\treturn Handler{\n\t\tfetcher: fetcher,\n\t\tconfig: config,\n\t}\n}", "func newMockTransport(fn string) (*mockTransport, error) {\n\n\tf, err := os.Open(fn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar cfg config\n\n\terr = json.NewDecoder(f).Decode(&cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &mockTransport{responses: cfg}, nil\n}", "func NewMock() Client {\n\treturn &mockClient{}\n}", "func newRunner(output string, err error) *MockRunner {\n\tm := &MockRunner{}\n\tm.On(\"Run\", mock.Anything).Return([]byte(output), err)\n\treturn m\n}", "func newDownloader(endpoint string) *Downloader {\n\treturn &Downloader{\n\t\tClient: bzzclient.NewClient(endpoint),\n\t}\n}", "func newMock(deps mockDependencies, t testing.TB) (Component, error) {\n\tbackupConfig := config.NewConfig(\"\", \"\", strings.NewReplacer())\n\tbackupConfig.CopyConfig(config.Datadog)\n\n\tconfig.Datadog.CopyConfig(config.NewConfig(\"mock\", \"XXXX\", strings.NewReplacer()))\n\n\tconfig.SetFeatures(t, deps.Params.Features...)\n\n\t// call InitConfig to set defaults.\n\tconfig.InitConfig(config.Datadog)\n\tc := &cfg{\n\t\tConfig: config.Datadog,\n\t}\n\n\tif !deps.Params.SetupConfig {\n\n\t\tif deps.Params.ConfFilePath != \"\" {\n\t\t\tconfig.Datadog.SetConfigType(\"yaml\")\n\t\t\terr := config.Datadog.ReadConfig(strings.NewReader(deps.Params.ConfFilePath))\n\t\t\tif err != nil {\n\t\t\t\t// The YAML was invalid, fail initialization of the mock config.\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\twarnings, _ := setupConfig(deps)\n\t\tc.warnings = warnings\n\t}\n\n\t// Overrides are explicit and will take precedence over any other\n\t// setting\n\tfor k, v := range deps.Params.Overrides {\n\t\tconfig.Datadog.Set(k, v)\n\t}\n\n\t// swap the existing config back at the end of the test.\n\tt.Cleanup(func() { config.Datadog.CopyConfig(backupConfig) })\n\n\treturn c, nil\n}", "func NewMock() *Mock {\n\treturn &Mock{VolumesMock: &VolumesServiceMock{}}\n}", "func NewMockRequestHandler(conf *config.Config) *MockRequestHandler {\n\trh := &MockRequestHandler{\n\t\tconf: conf,\n\t\tin: make(chan *protocol.Request, 1000),\n\t\tstopC: make(chan struct{}),\n\t\tpending: make(chan handlerFunc, 1000),\n\t\tdone: make(chan error),\n\t\tfailC: make(chan error, 1),\n\t}\n\n\tgo rh.loop()\n\n\treturn rh\n}", "func NewFakeProvider(t *testing.T) *FakeProvider {\n\tbuilder := chain.NewBuilder(t, address.Address{})\n\treturn &FakeProvider{\n\t\tBuilder: builder,\n\t\tt: t,\n\t\tactors: make(map[address.Address]*types.Actor)}\n}", "func NewMockmakeRequester(ctrl *gomock.Controller) *MockmakeRequester {\n\tmock := &MockmakeRequester{ctrl: ctrl}\n\tmock.recorder = &MockmakeRequesterMockRecorder{mock}\n\treturn mock\n}", "func NewDownloaderMock(t minimock.Tester) *DownloaderMock {\n\tm := &DownloaderMock{t: t}\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.DownloadMock = mDownloaderMockDownload{mock: m}\n\tm.DownloadMock.callArgs = []*DownloaderMockDownloadParams{}\n\n\treturn m\n}", "func New() *cachestub {\n\treturn &cachestub{}\n}", "func New(executor GetExecutor, lc logger.LoggingClient) *get {\n\treturn &get{\n\t\texecutor: executor,\n\t\tloggingClient: lc,\n\t}\n}", "func New(apiKey string) (*Fetcher, error) {\n\tif apiKey == \"\" {\n\t\treturn nil, errors.New(\"The apiKey must have a value\")\n\t}\n\n\tc := newsapi.Client{APIKey: apiKey}\n\n\tf := new(Fetcher)\n\tf.Client = c\n\n\treturn f, nil\n}", "func newFakeClient() client.Client {\n\treturn fakeclient.NewFakeClient()\n}", "func NewFetcherDefault(l *logrusx.Logger, cancelAfter time.Duration, ttl time.Duration, opts ...FetcherOption) *FetcherDefault {\n\tf := &FetcherDefault{\n\t\tcancelAfter: cancelAfter,\n\t\tl: l,\n\t\tttl: ttl,\n\t\tkeys: make(map[string]jose.JSONWebKeySet),\n\t\tfetchedAt: make(map[string]time.Time),\n\t\tclient: httpx.NewResilientClient(httpx.ResilientClientWithConnectionTimeout(15 * time.Second)).StandardClient(),\n\t\tmux: cloudstorage.NewURLMux(),\n\t}\n\tfor _, o := range opts {\n\t\to(f)\n\t}\n\treturn f\n}", "func NewMockFinder(result [][]byte) *MockFinder {\n\treturn &MockFinder{\n\t\tfnd: NewCachedIndex(bytes.Join(result, []byte{'\\n'})),\n\t}\n}", "func (m *HostNetworkMock) NewRequestBuilder() (r network.RequestBuilder) {\n\tatomic.AddUint64(&m.NewRequestBuilderPreCounter, 1)\n\tdefer atomic.AddUint64(&m.NewRequestBuilderCounter, 1)\n\n\tif m.NewRequestBuilderFunc == nil {\n\t\tm.t.Fatal(\"Unexpected call to HostNetworkMock.NewRequestBuilder\")\n\t\treturn\n\t}\n\n\treturn m.NewRequestBuilderFunc()\n}", "func NewCacheMock() *redigomock.Conn {\n\treturn redigomock.NewConn()\n}", "func NewFetch(urlfetch string, method string, headers *map[string]interface{}, data *url.Values, handlerResponse func(response.Response, error)) (Fetch, error) {\n\n\tvar fetch Fetch\n\tvar err error\n\tvar p promise.Promise\n\tif fetchi := GetInterface(); !fetchi.IsNull() {\n\t\tvar goarg map[string]interface{} = make(map[string]interface{})\n\n\t\tgoarg[\"method\"] = method\n\t\tif headers != nil {\n\t\t\tgoarg[\"headers\"] = *headers\n\t\t}\n\t\tif data != nil {\n\t\t\tgoarg[\"body\"] = data.Encode()\n\t\t}\n\n\t\tif headers == nil {\n\t\t\theaders = &map[string]interface{}{}\n\n\t\t}\n\t\tif data == nil {\n\t\t\tdata = &url.Values{}\n\t\t}\n\n\t\targ := js.ValueOf(goarg)\n\n\t\tpromisefetchobj := fetchi.Invoke(urlfetch, arg)\n\t\tif p, err = promise.NewFromJSObject(promisefetchobj); err == nil {\n\n\t\t\tif handlerResponse != nil {\n\t\t\t\tp.Async(func(obj baseobject.BaseObject) *promise.Promise {\n\n\t\t\t\t\tvar r response.Response\n\t\t\t\t\tr, err = response.NewFromJSObject(obj.JSObject())\n\t\t\t\t\thandlerResponse(r, err)\n\n\t\t\t\t\treturn nil\n\t\t\t\t}, func(e error) {\n\t\t\t\t\thandlerResponse(response.Response{}, err)\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tfetch.BaseObject = fetch.SetObject(p.JSObject())\n\t\t}\n\n\t} else {\n\t\terr = ErrNotImplemented\n\t}\n\n\tfetch.Debug(\"❗❗Use of fetch.NewFetch is deprecated❗❗\")\n\treturn fetch, err\n}", "func createMockServer() (*httptest.Server, error) {\n\tdata, err := os.ReadFile(\"testdata/metadata_service.json\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Note(ederst): source of inspiration https://clavinjune.dev/en/blogs/mocking-http-call-in-golang-a-better-way/\n\tmockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif strings.HasSuffix(r.URL.Path, \"/openstack/latest/meta_data.json\") {\n\t\t\tmockMetadataEndpoint(w, r, string(data))\n\t\t} else {\n\t\t\thttp.NotFoundHandler().ServeHTTP(w, r)\n\t\t}\n\t}))\n\n\treturn mockServer, nil\n}", "func newCache(auth azure.Authorizer) *Cache {\n\treturn &Cache{\n\t\tclient: NewClient(auth),\n\t}\n}", "func NewMock() MockClient {\n\treturn NewMockWithLogger(&noopLogger{})\n}", "func New(url, token string, mock bool, l *logrus.Logger) Nest {\n\n\tinitLog(l)\n\n\tlogDebug(funcName(), \"New nest structure\", url)\n\n\t// Read mock file\n\tif mock {\n\t\tlogWarn(funcName(), \"Mock activated !!!\")\n\t\tmockFileByte = readFile(mockFile)\n\t}\n\n\trest = http.New(log)\n\n\treturn &nest{url: url, token: token, mock: mock}\n\n}", "func newMockStream() *mockStream {\n\t// just need a new context specific to this stream,\n\t// easiest way to get it.\n\tctx, _ := context.WithCancel(context.TODO())\n\n\treturn &mockStream{\n\t\tsendChan: make(chan *Response, 100),\n\t\trecvChan: make(chan *Request, 100),\n\t\tctx: ctx,\n\t}\n}", "func NewLocalFetcher() *LocalFetcher {\n\treturn &LocalFetcher{\n\t\tdata: make(map[string]*asset.Asset),\n\t}\n}", "func New(ctx context.Context, next http.Handler, config *Config, name string) (http.Handler, error) {\n\n\tif len(config.Name) == 0 {\n\t\treturn nil, fmt.Errorf(\"name cannot be null\")\n\t}\n\n\trequest, err := buildRequest(config.ServiceUrl, config.Name, config.Timeout, \"\", \"\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error while building request\")\n\t}\n\n\treturn &Manager{\n\t\tnext: next,\n\t\tname: config.Name,\n\t\trequest: request,\n\t\tserviceUrl: config.ServiceUrl,\n\t\ttimeout: config.Timeout,\n\t}, nil\n}", "func newCaller(client *http.Client, marshaller messageMarshaller, url string, compress bool) *caller {\n\treturn &caller{\n\t\tclient: client,\n\t\tmarshaller: marshaller,\n\t\turl: url,\n\t\tcompress: compress,\n\t\tbufferPool: &sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn bytes.NewBuffer(nil)\n\t\t\t},\n\t\t},\n\t}\n}", "func newTestCache() *testCache {\n\tcache := new(testCache)\n\tcache.data = maps.New()\n\treturn cache\n}", "func (f *MockingFetcher) Fetch(ctx context.Context, name string) (Function, error) {\n\tr, err := f.Fetcher.Fetch(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn mockFunction(r), nil\n}", "func NewFetcher(opts FetcherOptions) Fetcher {\n\tcfg := &tls.Config{InsecureSkipVerify: true}\n\ttransport := &http.Transport{TLSClientConfig: cfg}\n\tclient := &http.Client{Transport: transport}\n\treturn &URLFetcher{\n\t\tclient: client,\n\t\toptions: opts,\n\t}\n}", "func New(\n\tfetcher Fetcher,\n\tss ...Setting,\n) FetchCloser {\n\tcfetcher := AsCFetcher{fetcher}\n\tccfetcher := CNew(cfetcher, ss...)\n\treturn struct {\n\t\tFetcher\n\t\tio.Closer\n\t}{AsFetcher{ccfetcher}, ccfetcher}\n}", "func (g *FakeClientFactory) New(context.Context, client.Reader, string, string) (capb.ConfigAgentClient, controllers.ConnCloseFunc, error) {\n\tif g.Caclient == nil {\n\t\tg.Reset()\n\t}\n\treturn g.Caclient, emptyConnCloseFunc, nil\n}", "func newTestGitHubClient() *Client {\n\tgclient := github.NewClient(nil)\n\tclient := Client{\n\t\tclient: gclient,\n\t}\n\treturn &client\n}", "func New() *Client {\n return &Client{&API{}}\n}", "func (bil *baseInstanceList) newMockCloud() cloud.Cloud {\n\tc := cloud.NewMockGCE(nil)\n\n\t// insert hooks to lazy create a instance when needed\n\tc.MockInstances.GetHook = bil.newGAGetHook()\n\tc.MockBetaInstances.GetHook = bil.newBetaGetHook()\n\n\treturn c\n}", "func newRetrievingNetstore() (ret *retrievalMock, mockStore storage.Storer, ns storage.Storer) {\n\tretrieve := &retrievalMock{}\n\tstore := mock.NewStorer()\n\tnstore := netstore.New(store, retrieve, mockValidator{})\n\n\treturn retrieve, store, nstore\n}", "func _getMock(url string) (content []byte, err error) {\n\tvar idnum = crc32.ChecksumIEEE([]byte(url))%uint32(5) + 1\n\tvar response = fmt.Sprintf(mockResponseTemplate, idnum, idnum, \"no message\", 200)\n\treturn []byte(response), nil\n}", "func NewMockrequester(ctrl *gomock.Controller) *Mockrequester {\n\tmock := &Mockrequester{ctrl: ctrl}\n\tmock.recorder = &MockrequesterMockRecorder{mock}\n\treturn mock\n}", "func FakeNew() (*Client, *FakeClientset) {\n\treturn FakeNewWithIngressSupports(false, true)\n}" ]
[ "0.7179141", "0.62835836", "0.6230573", "0.6191782", "0.618321", "0.61163497", "0.6053471", "0.6033386", "0.6015921", "0.5985766", "0.59277", "0.58966744", "0.5880312", "0.58772784", "0.5871133", "0.5871133", "0.58683485", "0.5854461", "0.5852765", "0.5817213", "0.58112985", "0.57880485", "0.57874835", "0.5774056", "0.5768023", "0.57656413", "0.57412285", "0.5728065", "0.57271355", "0.5698483", "0.5673399", "0.563453", "0.56241804", "0.56093484", "0.5591731", "0.55840665", "0.556029", "0.55523884", "0.55467814", "0.5546058", "0.55047756", "0.54989", "0.54775196", "0.5475121", "0.5454481", "0.5445564", "0.5435211", "0.54205513", "0.5402483", "0.539429", "0.53869927", "0.538603", "0.5384951", "0.53830826", "0.53723174", "0.5370546", "0.5347142", "0.53423655", "0.53402287", "0.53365004", "0.53360444", "0.53217834", "0.53158635", "0.5312302", "0.5297244", "0.52917117", "0.5283868", "0.5260206", "0.52576745", "0.5257298", "0.52568597", "0.5232141", "0.5220399", "0.5215878", "0.5213801", "0.5205426", "0.52038765", "0.5196156", "0.5195212", "0.51867616", "0.51765645", "0.5172809", "0.5171269", "0.51706135", "0.5170375", "0.5167917", "0.51622516", "0.5161629", "0.5161459", "0.51425564", "0.51345205", "0.51255774", "0.51200354", "0.51100874", "0.5108984", "0.5100043", "0.50889266", "0.5087869", "0.50878227", "0.50778174" ]
0.6813213
1
EXPECT returns an object that allows the caller to indicate expected use
func (m *MockFetcher) EXPECT() *MockFetcherMockRecorder { return m.recorder }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (mmGetObject *mClientMockGetObject) Expect(ctx context.Context, head insolar.Reference) *mClientMockGetObject {\n\tif mmGetObject.mock.funcGetObject != nil {\n\t\tmmGetObject.mock.t.Fatalf(\"ClientMock.GetObject mock is already set by Set\")\n\t}\n\n\tif mmGetObject.defaultExpectation == nil {\n\t\tmmGetObject.defaultExpectation = &ClientMockGetObjectExpectation{}\n\t}\n\n\tmmGetObject.defaultExpectation.params = &ClientMockGetObjectParams{ctx, head}\n\tfor _, e := range mmGetObject.expectations {\n\t\tif minimock.Equal(e.params, mmGetObject.defaultExpectation.params) {\n\t\t\tmmGetObject.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetObject.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetObject\n}", "func (r Requester) Assert(actual, expected interface{}) Requester {\n\t//r.actualResponse = actual\n\t//r.expectedResponse = expected\n\treturn r\n}", "func (r *Request) Expect(t *testing.T) *Response {\n\tr.apiTest.t = t\n\treturn r.apiTest.response\n}", "func (m *MockNotary) Notarize(arg0 string) (map[string]interface{}, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Notarize\", arg0)\n\tret0, _ := ret[0].(map[string]interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (tc TestCases) expect() {\n\tfmt.Println(cnt)\n\tcnt++\n\tif !reflect.DeepEqual(tc.resp, tc.respExp) {\n\t\ttc.t.Error(fmt.Sprintf(\"\\nRequested: \", tc.req, \"\\nExpected: \", tc.respExp, \"\\nFound: \", tc.resp))\n\t}\n}", "func (r *Request) Expect(t TestingT) *Response {\n\tr.apiTest.t = t\n\treturn r.apiTest.response\n}", "func Expect(t cbtest.T, actual interface{}, matcher matcher.Matcher, labelAndArgs ...interface{}) {\n\tt.Helper()\n\tres := ExpectE(t, actual, matcher, labelAndArgs...)\n\tif !res {\n\t\tt.FailNow()\n\t}\n}", "func (m *MockisObject_Obj) EXPECT() *MockisObject_ObjMockRecorder {\n\treturn m.recorder\n}", "func Expect(t *testing.T, v, m interface{}) {\n\tvt, vok := v.(Equaler)\n\tmt, mok := m.(Equaler)\n\n\tvar state bool\n\tif vok && mok {\n\t\tstate = vt.Equal(mt)\n\t} else {\n\t\tstate = reflect.DeepEqual(v, m)\n\t}\n\n\tif state {\n\t\tflux.FatalFailed(t, \"Value %+v and %+v are not a match\", v, m)\n\t\treturn\n\t}\n\tflux.LogPassed(t, \"Value %+v and %+v are a match\", v, m)\n}", "func (mmState *mClientMockState) Expect() *mClientMockState {\n\tif mmState.mock.funcState != nil {\n\t\tmmState.mock.t.Fatalf(\"ClientMock.State mock is already set by Set\")\n\t}\n\n\tif mmState.defaultExpectation == nil {\n\t\tmmState.defaultExpectation = &ClientMockStateExpectation{}\n\t}\n\n\treturn mmState\n}", "func (mmProvide *mContainerMockProvide) Expect(constructor interface{}) *mContainerMockProvide {\n\tif mmProvide.mock.funcProvide != nil {\n\t\tmmProvide.mock.t.Fatalf(\"ContainerMock.Provide mock is already set by Set\")\n\t}\n\n\tif mmProvide.defaultExpectation == nil {\n\t\tmmProvide.defaultExpectation = &ContainerMockProvideExpectation{}\n\t}\n\n\tmmProvide.defaultExpectation.params = &ContainerMockProvideParams{constructor}\n\tfor _, e := range mmProvide.expectations {\n\t\tif minimock.Equal(e.params, mmProvide.defaultExpectation.params) {\n\t\t\tmmProvide.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmProvide.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmProvide\n}", "func Mock() Env {\n\treturn mock.New()\n}", "func (mmGetCode *mClientMockGetCode) Expect(ctx context.Context, ref insolar.Reference) *mClientMockGetCode {\n\tif mmGetCode.mock.funcGetCode != nil {\n\t\tmmGetCode.mock.t.Fatalf(\"ClientMock.GetCode mock is already set by Set\")\n\t}\n\n\tif mmGetCode.defaultExpectation == nil {\n\t\tmmGetCode.defaultExpectation = &ClientMockGetCodeExpectation{}\n\t}\n\n\tmmGetCode.defaultExpectation.params = &ClientMockGetCodeParams{ctx, ref}\n\tfor _, e := range mmGetCode.expectations {\n\t\tif minimock.Equal(e.params, mmGetCode.defaultExpectation.params) {\n\t\t\tmmGetCode.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetCode.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetCode\n}", "func expect(t *testing.T, method, url string, testieOptions ...func(*http.Request)) *testie {\n\treq, err := http.NewRequest(method, url, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, opt := range testieOptions {\n\t\topt(req)\n\t}\n\n\treturn testReq(t, req)\n}", "func (_m *MockOStream) EXPECT() *MockOStreamMockRecorder {\n\treturn _m.recorder\n}", "func (mmGetUser *mStorageMockGetUser) Expect(ctx context.Context, userID int64) *mStorageMockGetUser {\n\tif mmGetUser.mock.funcGetUser != nil {\n\t\tmmGetUser.mock.t.Fatalf(\"StorageMock.GetUser mock is already set by Set\")\n\t}\n\n\tif mmGetUser.defaultExpectation == nil {\n\t\tmmGetUser.defaultExpectation = &StorageMockGetUserExpectation{}\n\t}\n\n\tmmGetUser.defaultExpectation.params = &StorageMockGetUserParams{ctx, userID}\n\tfor _, e := range mmGetUser.expectations {\n\t\tif minimock.Equal(e.params, mmGetUser.defaultExpectation.params) {\n\t\t\tmmGetUser.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetUser.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetUser\n}", "func (mmGetObject *mClientMockGetObject) Return(o1 ObjectDescriptor, err error) *ClientMock {\n\tif mmGetObject.mock.funcGetObject != nil {\n\t\tmmGetObject.mock.t.Fatalf(\"ClientMock.GetObject mock is already set by Set\")\n\t}\n\n\tif mmGetObject.defaultExpectation == nil {\n\t\tmmGetObject.defaultExpectation = &ClientMockGetObjectExpectation{mock: mmGetObject.mock}\n\t}\n\tmmGetObject.defaultExpectation.results = &ClientMockGetObjectResults{o1, err}\n\treturn mmGetObject.mock\n}", "func (mmGather *mGathererMockGather) Expect() *mGathererMockGather {\n\tif mmGather.mock.funcGather != nil {\n\t\tmmGather.mock.t.Fatalf(\"GathererMock.Gather mock is already set by Set\")\n\t}\n\n\tif mmGather.defaultExpectation == nil {\n\t\tmmGather.defaultExpectation = &GathererMockGatherExpectation{}\n\t}\n\n\treturn mmGather\n}", "func (m *MockParser) EXPECT() *MockParserMockRecorder {\n\treturn m.recorder\n}", "func (m *MockParser) EXPECT() *MockParserMockRecorder {\n\treturn m.recorder\n}", "func (mmWriteTo *mDigestHolderMockWriteTo) Expect(w io.Writer) *mDigestHolderMockWriteTo {\n\tif mmWriteTo.mock.funcWriteTo != nil {\n\t\tmmWriteTo.mock.t.Fatalf(\"DigestHolderMock.WriteTo mock is already set by Set\")\n\t}\n\n\tif mmWriteTo.defaultExpectation == nil {\n\t\tmmWriteTo.defaultExpectation = &DigestHolderMockWriteToExpectation{}\n\t}\n\n\tmmWriteTo.defaultExpectation.params = &DigestHolderMockWriteToParams{w}\n\tfor _, e := range mmWriteTo.expectations {\n\t\tif minimock.Equal(e.params, mmWriteTo.defaultExpectation.params) {\n\t\t\tmmWriteTo.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmWriteTo.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmWriteTo\n}", "func (rb *RequestBuilder) EXPECT() *ResponseAsserter {\n\treq := httptest.NewRequest(rb.method, rb.path, rb.body)\n\tfor k, v := range rb.hdr {\n\t\treq.Header[k] = v\n\t}\n\n\trec := httptest.NewRecorder()\n\trb.cas.h.ServeHTTP(rec, req)\n\n\treturn &ResponseAsserter{\n\t\trec: rec,\n\t\treq: req,\n\t\tb: rb,\n\t\tfail: rb.fail.\n\t\t\tCopy().\n\t\t\tWithRequest(req).\n\t\t\tWithResponse(rec),\n\t}\n}", "func (mmGetState *mGatewayMockGetState) Expect() *mGatewayMockGetState {\n\tif mmGetState.mock.funcGetState != nil {\n\t\tmmGetState.mock.t.Fatalf(\"GatewayMock.GetState mock is already set by Set\")\n\t}\n\n\tif mmGetState.defaultExpectation == nil {\n\t\tmmGetState.defaultExpectation = &GatewayMockGetStateExpectation{}\n\t}\n\n\treturn mmGetState\n}", "func (m *mParcelMockGetSign) Expect() *mParcelMockGetSign {\n\tm.mock.GetSignFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockGetSignExpectation{}\n\t}\n\n\treturn m\n}", "func (mmCreateTag *mTagCreatorMockCreateTag) Expect(t1 semantic.Tag) *mTagCreatorMockCreateTag {\n\tif mmCreateTag.mock.funcCreateTag != nil {\n\t\tmmCreateTag.mock.t.Fatalf(\"TagCreatorMock.CreateTag mock is already set by Set\")\n\t}\n\n\tif mmCreateTag.defaultExpectation == nil {\n\t\tmmCreateTag.defaultExpectation = &TagCreatorMockCreateTagExpectation{}\n\t}\n\n\tmmCreateTag.defaultExpectation.params = &TagCreatorMockCreateTagParams{t1}\n\tfor _, e := range mmCreateTag.expectations {\n\t\tif minimock.Equal(e.params, mmCreateTag.defaultExpectation.params) {\n\t\t\tmmCreateTag.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmCreateTag.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmCreateTag\n}", "func (m *MockActorUsecase) EXPECT() *MockActorUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *mParcelMockGetCaller) Expect() *mParcelMockGetCaller {\n\tm.mock.GetCallerFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockGetCallerExpectation{}\n\t}\n\n\treturn m\n}", "func mockAlwaysRun() bool { return true }", "func (m *MockArg) EXPECT() *MockArgMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (m *MockUsecase) EXPECT() *MockUsecaseMockRecorder {\n\treturn m.recorder\n}", "func (st *SDKTester) Test(resp interface{}) {\n\tif resp == nil || st.respWant == nil {\n\t\tst.t.Logf(\"response want/got is nil, abort\\n\")\n\t\treturn\n\t}\n\n\trespMap := st.getFieldMap(resp)\n\tfor i, v := range st.respWant {\n\t\tif reflect.DeepEqual(v, respMap[i]) {\n\t\t\tcontinue\n\t\t}\n\t\tswitch x := respMap[i].(type) {\n\t\tcase Stringer:\n\t\t\tif !assert.Equal(st.t, v, x.String()) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\tcase map[string]interface{}:\n\t\t\tif value, ok := x[\"Value\"]; ok {\n\t\t\t\tif !assert.Equal(st.t, v, value) {\n\t\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t\t}\n\t\t\t}\n\t\tcase Inter:\n\t\t\tif !assert.Equal(st.t, v, x.Int()) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\tdefault:\n\t\t\tif !assert.Equal(st.t, v, respMap[i]) {\n\t\t\t\tst.t.Errorf(\"%s want %v, got %v\", i, v, respMap[i])\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *MockCreator) EXPECT() *MockCreatorMockRecorder {\n\treturn m.recorder\n}", "func (m *MockCreator) EXPECT() *MockCreatorMockRecorder {\n\treturn m.recorder\n}", "func TestCallFunc_arguments(t *testing.T) {\n\n}", "func (m *mParcelMockGetSender) Expect() *mParcelMockGetSender {\n\tm.mock.GetSenderFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockGetSenderExpectation{}\n\t}\n\n\treturn m\n}", "func TestGetNone4A(t *testing.T) {\n}", "func expectEqual(value, expected interface{}) {\n\tif value != expected {\n\t\tfmt.Printf(\"Fehler: %v bekommen, erwartet war aber %v.\\n\", value, expected)\n\t} else {\n\t\tfmt.Printf(\"OK: %v bekommen, erwartet war aber %v.\\n\", value, expected)\n\t}\n}", "func (mmHasPendings *mClientMockHasPendings) Expect(ctx context.Context, object insolar.Reference) *mClientMockHasPendings {\n\tif mmHasPendings.mock.funcHasPendings != nil {\n\t\tmmHasPendings.mock.t.Fatalf(\"ClientMock.HasPendings mock is already set by Set\")\n\t}\n\n\tif mmHasPendings.defaultExpectation == nil {\n\t\tmmHasPendings.defaultExpectation = &ClientMockHasPendingsExpectation{}\n\t}\n\n\tmmHasPendings.defaultExpectation.params = &ClientMockHasPendingsParams{ctx, object}\n\tfor _, e := range mmHasPendings.expectations {\n\t\tif minimock.Equal(e.params, mmHasPendings.defaultExpectation.params) {\n\t\t\tmmHasPendings.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmHasPendings.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmHasPendings\n}", "func (mmGetPacketSignature *mPacketParserMockGetPacketSignature) Expect() *mPacketParserMockGetPacketSignature {\n\tif mmGetPacketSignature.mock.funcGetPacketSignature != nil {\n\t\tmmGetPacketSignature.mock.t.Fatalf(\"PacketParserMock.GetPacketSignature mock is already set by Set\")\n\t}\n\n\tif mmGetPacketSignature.defaultExpectation == nil {\n\t\tmmGetPacketSignature.defaultExpectation = &PacketParserMockGetPacketSignatureExpectation{}\n\t}\n\n\treturn mmGetPacketSignature\n}", "func Run(t testing.TB, cloud cloud.Client, src string, opts ...RunOption) {\n\n\tif cloud == nil {\n\t\tcloud = mockcloud.Client(nil)\n\t}\n\n\tvm := otto.New()\n\n\tpkg, err := godotto.Apply(context.Background(), vm, cloud)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvm.Set(\"cloud\", pkg)\n\tvm.Set(\"equals\", func(call otto.FunctionCall) otto.Value {\n\t\tvm := call.Otto\n\t\tgot, err := call.Argument(0).Export()\n\t\tif err != nil {\n\t\t\tottoutil.Throw(vm, err.Error())\n\t\t}\n\t\twant, err := call.Argument(1).Export()\n\t\tif err != nil {\n\t\t\tottoutil.Throw(vm, err.Error())\n\t\t}\n\t\tok, cause := deepEqual(got, want)\n\t\tif ok {\n\t\t\treturn otto.UndefinedValue()\n\t\t}\n\t\tmsg := \"assertion failed!\\n\" + cause\n\n\t\tif len(call.ArgumentList) > 2 {\n\t\t\tformat, err := call.ArgumentList[2].ToString()\n\t\t\tif err != nil {\n\t\t\t\tottoutil.Throw(vm, err.Error())\n\t\t\t}\n\t\t\tmsg += \"\\n\" + format\n\t\t}\n\t\tottoutil.Throw(vm, msg)\n\t\treturn otto.UndefinedValue()\n\t})\n\tvm.Set(\"assert\", func(call otto.FunctionCall) otto.Value {\n\t\tvm := call.Otto\n\t\tv, err := call.Argument(0).ToBoolean()\n\t\tif err != nil {\n\t\t\tottoutil.Throw(vm, err.Error())\n\t\t}\n\t\tif v {\n\t\t\treturn otto.UndefinedValue()\n\t\t}\n\t\tmsg := \"assertion failed!\"\n\t\tif len(call.ArgumentList) > 1 {\n\t\t\tformat, err := call.ArgumentList[1].ToString()\n\t\t\tif err != nil {\n\t\t\t\tottoutil.Throw(vm, err.Error())\n\t\t\t}\n\t\t\tmsg += \"\\n\" + format\n\t\t}\n\t\tottoutil.Throw(vm, msg)\n\t\treturn otto.UndefinedValue()\n\t})\n\tscript, err := vm.Compile(\"\", src)\n\tif err != nil {\n\t\tt.Fatalf(\"invalid code: %v\", err)\n\t}\n\n\tfor _, opt := range opts {\n\t\tif err := opt(vm); err != nil {\n\t\t\tt.Fatalf(\"can't apply option: %v\", err)\n\t\t}\n\t}\n\n\tif _, err := vm.Run(script); err != nil {\n\t\tif oe, ok := err.(*otto.Error); ok {\n\t\t\tt.Fatal(oe.String())\n\t\t} else {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func TestSetGoodArgs(t *testing.T) {\n\tfmt.Println(\"Entering the test method for SetGoodArgs\")\n\tprovcc := new(SimpleAsset)\n\tstub := shim.NewMockStub(\"ANY_PARAM\", provcc)\n\n\t// Testing the init. It always return true. No parameters in init. \n\t\n\tcheckInit(t, stub, [][]byte{[]byte(\"init\")})\n\n\tres := stub.MockInvoke(\"1\", [][]byte{[]byte(\"set\"), []byte(\"S52fkpF2rCEArSuwqyDA9tVjawUdrkGzbNQLaa7xJfA=\"),\n\t[]byte(\"agentInfo.atype\"),[]byte(\"1.2.3.4\"),\n\t[]byte(\"agentInfo.id\"),[]byte(\"agentidentifier\"),\n\t[]byte(\"agentinfo.name\"),[]byte(\"7.8.9\"),\n\t[]byte(\"agentinfo.idp\"),[]byte(\"urn:tiani-spirit:sts\"),\n\t[]byte(\"locationInfo.id\"),[]byte(\"urn:oid:1.2.3\"),\n\t[]byte(\"locationInfo.name\"),[]byte(\"General Hospital\"),\n\t[]byte(\"locationInfo.locality\"),[]byte(\"Nashville, TN\"),\n\t[]byte(\"locationInfo.docid\"),[]byte(\"1.2.3\"),\n\t[]byte(\"action\"),[]byte(\"ex:CREATE\"),\n\t[]byte(\"date\"),[]byte(\"2018-11-10T12:15:55.028Z\")})\n\n\tif res.Status != shim.OK {\n\t\tfmt.Println(\"Invoke failed\", string(res.Message))\n\t\tt.FailNow()\n\t}\n\t\n}", "func (mmRegisterResult *mClientMockRegisterResult) Expect(ctx context.Context, request insolar.Reference, result RequestResult) *mClientMockRegisterResult {\n\tif mmRegisterResult.mock.funcRegisterResult != nil {\n\t\tmmRegisterResult.mock.t.Fatalf(\"ClientMock.RegisterResult mock is already set by Set\")\n\t}\n\n\tif mmRegisterResult.defaultExpectation == nil {\n\t\tmmRegisterResult.defaultExpectation = &ClientMockRegisterResultExpectation{}\n\t}\n\n\tmmRegisterResult.defaultExpectation.params = &ClientMockRegisterResultParams{ctx, request, result}\n\tfor _, e := range mmRegisterResult.expectations {\n\t\tif minimock.Equal(e.params, mmRegisterResult.defaultExpectation.params) {\n\t\t\tmmRegisterResult.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmRegisterResult.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmRegisterResult\n}", "func Mock() Cluster { return mockCluster{} }", "func (m *MockS3API) EXPECT() *MockS3APIMockRecorder {\n\treturn m.recorder\n}", "func mockedGranter(kubeutil *kube.Kube, app *v1.RadixRegistration, namespace string, serviceAccount *corev1.ServiceAccount) error {\n\treturn nil\n}", "func (mmGetPendings *mClientMockGetPendings) Expect(ctx context.Context, objectRef insolar.Reference) *mClientMockGetPendings {\n\tif mmGetPendings.mock.funcGetPendings != nil {\n\t\tmmGetPendings.mock.t.Fatalf(\"ClientMock.GetPendings mock is already set by Set\")\n\t}\n\n\tif mmGetPendings.defaultExpectation == nil {\n\t\tmmGetPendings.defaultExpectation = &ClientMockGetPendingsExpectation{}\n\t}\n\n\tmmGetPendings.defaultExpectation.params = &ClientMockGetPendingsParams{ctx, objectRef}\n\tfor _, e := range mmGetPendings.expectations {\n\t\tif minimock.Equal(e.params, mmGetPendings.defaultExpectation.params) {\n\t\t\tmmGetPendings.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetPendings.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetPendings\n}", "func (m *MockOrg) EXPECT() *MockOrgMockRecorder {\n\treturn m.recorder\n}", "func (mmGetUserLocation *mStorageMockGetUserLocation) Expect(ctx context.Context, userID int64) *mStorageMockGetUserLocation {\n\tif mmGetUserLocation.mock.funcGetUserLocation != nil {\n\t\tmmGetUserLocation.mock.t.Fatalf(\"StorageMock.GetUserLocation mock is already set by Set\")\n\t}\n\n\tif mmGetUserLocation.defaultExpectation == nil {\n\t\tmmGetUserLocation.defaultExpectation = &StorageMockGetUserLocationExpectation{}\n\t}\n\n\tmmGetUserLocation.defaultExpectation.params = &StorageMockGetUserLocationParams{ctx, userID}\n\tfor _, e := range mmGetUserLocation.expectations {\n\t\tif minimock.Equal(e.params, mmGetUserLocation.defaultExpectation.params) {\n\t\t\tmmGetUserLocation.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetUserLocation.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetUserLocation\n}", "func (mmCreate *mPaymentRepositoryMockCreate) Expect(ctx context.Context, from int64, to int64, amount int64) *mPaymentRepositoryMockCreate {\n\tif mmCreate.mock.funcCreate != nil {\n\t\tmmCreate.mock.t.Fatalf(\"PaymentRepositoryMock.Create mock is already set by Set\")\n\t}\n\n\tif mmCreate.defaultExpectation == nil {\n\t\tmmCreate.defaultExpectation = &PaymentRepositoryMockCreateExpectation{}\n\t}\n\n\tmmCreate.defaultExpectation.params = &PaymentRepositoryMockCreateParams{ctx, from, to, amount}\n\tfor _, e := range mmCreate.expectations {\n\t\tif minimock.Equal(e.params, mmCreate.defaultExpectation.params) {\n\t\t\tmmCreate.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmCreate.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmCreate\n}", "func (mmAuther *mGatewayMockAuther) Expect() *mGatewayMockAuther {\n\tif mmAuther.mock.funcAuther != nil {\n\t\tmmAuther.mock.t.Fatalf(\"GatewayMock.Auther mock is already set by Set\")\n\t}\n\n\tif mmAuther.defaultExpectation == nil {\n\t\tmmAuther.defaultExpectation = &GatewayMockAutherExpectation{}\n\t}\n\n\treturn mmAuther\n}", "func TestObjectsMeetReq(t *testing.T) {\n\tvar kr verifiable.StorageReader\n\tvar kw verifiable.StorageWriter\n\n\tvar m verifiable.MutatorService\n\n\tvar o verifiable.AuthorizationOracle\n\n\tkr = &memory.TransientStorage{}\n\tkw = &memory.TransientStorage{}\n\n\tkr = &bolt.Storage{}\n\tkw = &bolt.Storage{}\n\n\tkr = &badger.Storage{}\n\tkw = &badger.Storage{}\n\n\tm = &instant.Mutator{}\n\tm = (&batch.Mutator{}).MustCreate()\n\n\to = policy.Open\n\to = &policy.Static{}\n\n\tlog.Println(kr, kw, m, o) // \"use\" these so that go compiler will be quiet\n}", "func (mmInvoke *mContainerMockInvoke) Expect(function interface{}) *mContainerMockInvoke {\n\tif mmInvoke.mock.funcInvoke != nil {\n\t\tmmInvoke.mock.t.Fatalf(\"ContainerMock.Invoke mock is already set by Set\")\n\t}\n\n\tif mmInvoke.defaultExpectation == nil {\n\t\tmmInvoke.defaultExpectation = &ContainerMockInvokeExpectation{}\n\t}\n\n\tmmInvoke.defaultExpectation.params = &ContainerMockInvokeParams{function}\n\tfor _, e := range mmInvoke.expectations {\n\t\tif minimock.Equal(e.params, mmInvoke.defaultExpectation.params) {\n\t\t\tmmInvoke.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmInvoke.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmInvoke\n}", "func (mmGetPosition *mStoreMockGetPosition) Expect(account string, contractID string) *mStoreMockGetPosition {\n\tif mmGetPosition.mock.funcGetPosition != nil {\n\t\tmmGetPosition.mock.t.Fatalf(\"StoreMock.GetPosition mock is already set by Set\")\n\t}\n\n\tif mmGetPosition.defaultExpectation == nil {\n\t\tmmGetPosition.defaultExpectation = &StoreMockGetPositionExpectation{}\n\t}\n\n\tmmGetPosition.defaultExpectation.params = &StoreMockGetPositionParams{account, contractID}\n\tfor _, e := range mmGetPosition.expectations {\n\t\tif minimock.Equal(e.params, mmGetPosition.defaultExpectation.params) {\n\t\t\tmmGetPosition.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetPosition.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetPosition\n}", "func (mmGetAbandonedRequest *mClientMockGetAbandonedRequest) Expect(ctx context.Context, objectRef insolar.Reference, reqRef insolar.Reference) *mClientMockGetAbandonedRequest {\n\tif mmGetAbandonedRequest.mock.funcGetAbandonedRequest != nil {\n\t\tmmGetAbandonedRequest.mock.t.Fatalf(\"ClientMock.GetAbandonedRequest mock is already set by Set\")\n\t}\n\n\tif mmGetAbandonedRequest.defaultExpectation == nil {\n\t\tmmGetAbandonedRequest.defaultExpectation = &ClientMockGetAbandonedRequestExpectation{}\n\t}\n\n\tmmGetAbandonedRequest.defaultExpectation.params = &ClientMockGetAbandonedRequestParams{ctx, objectRef, reqRef}\n\tfor _, e := range mmGetAbandonedRequest.expectations {\n\t\tif minimock.Equal(e.params, mmGetAbandonedRequest.defaultExpectation.params) {\n\t\t\tmmGetAbandonedRequest.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmGetAbandonedRequest.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmGetAbandonedRequest\n}", "func (mmSend *mSenderMockSend) Expect(ctx context.Context, email Email) *mSenderMockSend {\n\tif mmSend.mock.funcSend != nil {\n\t\tmmSend.mock.t.Fatalf(\"SenderMock.Send mock is already set by Set\")\n\t}\n\n\tif mmSend.defaultExpectation == nil {\n\t\tmmSend.defaultExpectation = &SenderMockSendExpectation{}\n\t}\n\n\tmmSend.defaultExpectation.params = &SenderMockSendParams{ctx, email}\n\tfor _, e := range mmSend.expectations {\n\t\tif minimock.Equal(e.params, mmSend.defaultExpectation.params) {\n\t\t\tmmSend.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmSend.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmSend\n}", "func callAndVerify(msg string, client pb.GreeterClient, shouldFail bool) error {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\t_, err := client.SayHello(ctx, &pb.HelloRequest{Name: msg})\n\tif want, got := shouldFail == true, err != nil; got != want {\n\t\treturn fmt.Errorf(\"want and got mismatch, want shouldFail=%v, got fail=%v, rpc error: %v\", want, got, err)\n\t}\n\treturn nil\n}", "func (m *Mockrequester) EXPECT() *MockrequesterMockRecorder {\n\treturn m.recorder\n}", "func expectEqual(actual interface{}, extra interface{}, explain ...interface{}) {\n\tgomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...)\n}", "func (m *MockstackDescriber) EXPECT() *MockstackDescriberMockRecorder {\n\treturn m.recorder\n}", "func (req *outgoingRequest) Assert(t *testing.T, fixture *fixture) {\n\tassert.Equal(t, req.path, fixture.calledPath, \"called path not as expected\")\n\tassert.Equal(t, req.method, fixture.calledMethod, \"called path not as expected\")\n\tassert.Equal(t, req.body, fixture.requestBody, \"call body no as expected\")\n}", "func (mmVerify *mDelegationTokenFactoryMockVerify) Expect(parcel mm_insolar.Parcel) *mDelegationTokenFactoryMockVerify {\n\tif mmVerify.mock.funcVerify != nil {\n\t\tmmVerify.mock.t.Fatalf(\"DelegationTokenFactoryMock.Verify mock is already set by Set\")\n\t}\n\n\tif mmVerify.defaultExpectation == nil {\n\t\tmmVerify.defaultExpectation = &DelegationTokenFactoryMockVerifyExpectation{}\n\t}\n\n\tmmVerify.defaultExpectation.params = &DelegationTokenFactoryMockVerifyParams{parcel}\n\tfor _, e := range mmVerify.expectations {\n\t\tif minimock.Equal(e.params, mmVerify.defaultExpectation.params) {\n\t\t\tmmVerify.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmVerify.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmVerify\n}", "func (mmRead *mDigestHolderMockRead) Expect(p []byte) *mDigestHolderMockRead {\n\tif mmRead.mock.funcRead != nil {\n\t\tmmRead.mock.t.Fatalf(\"DigestHolderMock.Read mock is already set by Set\")\n\t}\n\n\tif mmRead.defaultExpectation == nil {\n\t\tmmRead.defaultExpectation = &DigestHolderMockReadExpectation{}\n\t}\n\n\tmmRead.defaultExpectation.params = &DigestHolderMockReadParams{p}\n\tfor _, e := range mmRead.expectations {\n\t\tif minimock.Equal(e.params, mmRead.defaultExpectation.params) {\n\t\t\tmmRead.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmRead.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmRead\n}", "func (mmSend *mClientMockSend) Expect(ctx context.Context, n *Notification) *mClientMockSend {\n\tif mmSend.mock.funcSend != nil {\n\t\tmmSend.mock.t.Fatalf(\"ClientMock.Send mock is already set by Set\")\n\t}\n\n\tif mmSend.defaultExpectation == nil {\n\t\tmmSend.defaultExpectation = &ClientMockSendExpectation{}\n\t}\n\n\tmmSend.defaultExpectation.params = &ClientMockSendParams{ctx, n}\n\tfor _, e := range mmSend.expectations {\n\t\tif minimock.Equal(e.params, mmSend.defaultExpectation.params) {\n\t\t\tmmSend.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmSend.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmSend\n}", "func (mmAsByteString *mDigestHolderMockAsByteString) Expect() *mDigestHolderMockAsByteString {\n\tif mmAsByteString.mock.funcAsByteString != nil {\n\t\tmmAsByteString.mock.t.Fatalf(\"DigestHolderMock.AsByteString mock is already set by Set\")\n\t}\n\n\tif mmAsByteString.defaultExpectation == nil {\n\t\tmmAsByteString.defaultExpectation = &DigestHolderMockAsByteStringExpectation{}\n\t}\n\n\treturn mmAsByteString\n}", "func Expect(msg string) error {\n\tif msg != \"\" {\n\t\treturn errors.New(msg)\n\t} else {\n\t\treturn nil\n\t}\n}", "func (mmEncrypt *mRingMockEncrypt) Expect(t1 secrets.Text) *mRingMockEncrypt {\n\tif mmEncrypt.mock.funcEncrypt != nil {\n\t\tmmEncrypt.mock.t.Fatalf(\"RingMock.Encrypt mock is already set by Set\")\n\t}\n\n\tif mmEncrypt.defaultExpectation == nil {\n\t\tmmEncrypt.defaultExpectation = &RingMockEncryptExpectation{}\n\t}\n\n\tmmEncrypt.defaultExpectation.params = &RingMockEncryptParams{t1}\n\tfor _, e := range mmEncrypt.expectations {\n\t\tif minimock.Equal(e.params, mmEncrypt.defaultExpectation.params) {\n\t\t\tmmEncrypt.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmEncrypt.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmEncrypt\n}", "func (mmBootstrapper *mGatewayMockBootstrapper) Expect() *mGatewayMockBootstrapper {\n\tif mmBootstrapper.mock.funcBootstrapper != nil {\n\t\tmmBootstrapper.mock.t.Fatalf(\"GatewayMock.Bootstrapper mock is already set by Set\")\n\t}\n\n\tif mmBootstrapper.defaultExpectation == nil {\n\t\tmmBootstrapper.defaultExpectation = &GatewayMockBootstrapperExpectation{}\n\t}\n\n\treturn mmBootstrapper\n}", "func (m *MockNotary) EXPECT() *MockNotaryMockRecorder {\n\treturn m.recorder\n}", "func (m *mParcelMockSetSender) Expect(p insolar.Reference) *mParcelMockSetSender {\n\tm.mock.SetSenderFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockSetSenderExpectation{}\n\t}\n\tm.mainExpectation.input = &ParcelMockSetSenderInput{p}\n\treturn m\n}", "func (mmGetPacketType *mPacketParserMockGetPacketType) Expect() *mPacketParserMockGetPacketType {\n\tif mmGetPacketType.mock.funcGetPacketType != nil {\n\t\tmmGetPacketType.mock.t.Fatalf(\"PacketParserMock.GetPacketType mock is already set by Set\")\n\t}\n\n\tif mmGetPacketType.defaultExpectation == nil {\n\t\tmmGetPacketType.defaultExpectation = &PacketParserMockGetPacketTypeExpectation{}\n\t}\n\n\treturn mmGetPacketType\n}", "func (mmParsePacketBody *mPacketParserMockParsePacketBody) Expect() *mPacketParserMockParsePacketBody {\n\tif mmParsePacketBody.mock.funcParsePacketBody != nil {\n\t\tmmParsePacketBody.mock.t.Fatalf(\"PacketParserMock.ParsePacketBody mock is already set by Set\")\n\t}\n\n\tif mmParsePacketBody.defaultExpectation == nil {\n\t\tmmParsePacketBody.defaultExpectation = &PacketParserMockParsePacketBodyExpectation{}\n\t}\n\n\treturn mmParsePacketBody\n}", "func (mmAsBytes *mDigestHolderMockAsBytes) Expect() *mDigestHolderMockAsBytes {\n\tif mmAsBytes.mock.funcAsBytes != nil {\n\t\tmmAsBytes.mock.t.Fatalf(\"DigestHolderMock.AsBytes mock is already set by Set\")\n\t}\n\n\tif mmAsBytes.defaultExpectation == nil {\n\t\tmmAsBytes.defaultExpectation = &DigestHolderMockAsBytesExpectation{}\n\t}\n\n\treturn mmAsBytes\n}", "func (m *MockArticleLogic) EXPECT() *MockArticleLogicMockRecorder {\n\treturn m.recorder\n}", "func (mmKey *mIteratorMockKey) Expect() *mIteratorMockKey {\n\tif mmKey.mock.funcKey != nil {\n\t\tmmKey.mock.t.Fatalf(\"IteratorMock.Key mock is already set by Set\")\n\t}\n\n\tif mmKey.defaultExpectation == nil {\n\t\tmmKey.defaultExpectation = &IteratorMockKeyExpectation{}\n\t}\n\n\treturn mmKey\n}", "func (m *MockFactory) EXPECT() *MockFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *MockFactory) EXPECT() *MockFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *MockFactory) EXPECT() *MockFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *mOutboundMockCanAccept) Expect(p Inbound) *mOutboundMockCanAccept {\n\tm.mock.CanAcceptFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &OutboundMockCanAcceptExpectation{}\n\t}\n\tm.mainExpectation.input = &OutboundMockCanAcceptInput{p}\n\treturn m\n}", "func (m *MockLoaderFactory) EXPECT() *MockLoaderFactoryMockRecorder {\n\treturn m.recorder\n}", "func (m *MockPKG) EXPECT() *MockPKGMockRecorder {\n\treturn m.recorder\n}", "func (m *MockbucketDescriber) EXPECT() *MockbucketDescriberMockRecorder {\n\treturn m.recorder\n}", "func (m *mParcelMockType) Expect() *mParcelMockType {\n\tm.mock.TypeFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &ParcelMockTypeExpectation{}\n\t}\n\n\treturn m\n}", "func (m *MockKeystore) EXPECT() *MockKeystoreMockRecorder {\n\treturn m.recorder\n}", "func (m *MockKeystore) EXPECT() *MockKeystoreMockRecorder {\n\treturn m.recorder\n}", "func (mmExchange *mMDNSClientMockExchange) Expect(msg *mdns.Msg, address string) *mMDNSClientMockExchange {\n\tif mmExchange.mock.funcExchange != nil {\n\t\tmmExchange.mock.t.Fatalf(\"MDNSClientMock.Exchange mock is already set by Set\")\n\t}\n\n\tif mmExchange.defaultExpectation == nil {\n\t\tmmExchange.defaultExpectation = &MDNSClientMockExchangeExpectation{}\n\t}\n\n\tmmExchange.defaultExpectation.params = &MDNSClientMockExchangeParams{msg, address}\n\tfor _, e := range mmExchange.expectations {\n\t\tif minimock.Equal(e.params, mmExchange.defaultExpectation.params) {\n\t\t\tmmExchange.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmExchange.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmExchange\n}", "func (m *MockStream) EXPECT() *MockStreamMockRecorder {\n\treturn m.recorder\n}", "func (c Chkr) Expect(v validator, args ...interface{}) {\n\tif c.runTest(v, args...) {\n\t\tc.Fail()\n\t}\n}", "func (mmClone *mStorageMockClone) Expect(ctx context.Context, from insolar.PulseNumber, to insolar.PulseNumber, keepActual bool) *mStorageMockClone {\n\tif mmClone.mock.funcClone != nil {\n\t\tmmClone.mock.t.Fatalf(\"StorageMock.Clone mock is already set by Set\")\n\t}\n\n\tif mmClone.defaultExpectation == nil {\n\t\tmmClone.defaultExpectation = &StorageMockCloneExpectation{}\n\t}\n\n\tmmClone.defaultExpectation.params = &StorageMockCloneParams{ctx, from, to, keepActual}\n\tfor _, e := range mmClone.expectations {\n\t\tif minimock.Equal(e.params, mmClone.defaultExpectation.params) {\n\t\t\tmmClone.mock.t.Fatalf(\"Expectation set by When has same params: %#v\", *mmClone.defaultExpectation.params)\n\t\t}\n\t}\n\n\treturn mmClone\n}", "func (m *MockCodeGenerator) EXPECT() *MockCodeGeneratorMockRecorder {\n\treturn m.recorder\n}", "func (m *MockNodeAttestor) EXPECT() *MockNodeAttestorMockRecorder {\n\treturn m.recorder\n}", "func (m *MockNodeAttestor) EXPECT() *MockNodeAttestorMockRecorder {\n\treturn m.recorder\n}", "func (_m *MockIStream) EXPECT() *MockIStreamMockRecorder {\n\treturn _m.recorder\n}", "func (m *mOutboundMockGetEndpointType) Expect() *mOutboundMockGetEndpointType {\n\tm.mock.GetEndpointTypeFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &OutboundMockGetEndpointTypeExpectation{}\n\t}\n\n\treturn m\n}", "func (m *MockAZInfoProvider) EXPECT() *MockAZInfoProviderMockRecorder {\n\treturn m.recorder\n}" ]
[ "0.58157563", "0.5714918", "0.5672776", "0.5639812", "0.56273276", "0.5573085", "0.5567367", "0.5529613", "0.55066866", "0.5486919", "0.54729885", "0.54647803", "0.5460882", "0.54414886", "0.5440682", "0.5405729", "0.54035264", "0.53890616", "0.53831995", "0.53831995", "0.5369224", "0.53682834", "0.5358863", "0.5340405", "0.5338385", "0.5327707", "0.53230935", "0.53132576", "0.5307127", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.5306891", "0.53035146", "0.5295391", "0.5295391", "0.5291368", "0.52822006", "0.52821374", "0.52767164", "0.5273333", "0.5273239", "0.5265769", "0.52593946", "0.52572596", "0.5256972", "0.52545565", "0.5249454", "0.52421427", "0.52410823", "0.5238541", "0.52360845", "0.5235068", "0.5227199", "0.5227038", "0.52227145", "0.52144563", "0.5212412", "0.52120364", "0.5211835", "0.5211705", "0.5208191", "0.5194654", "0.5190334", "0.51877177", "0.5187148", "0.5185659", "0.51827794", "0.51817787", "0.5175451", "0.51730126", "0.5169131", "0.5167294", "0.5162394", "0.51599216", "0.51597583", "0.5159494", "0.51442164", "0.51442164", "0.51442164", "0.5143891", "0.51437116", "0.51395434", "0.51341194", "0.5133995", "0.51337904", "0.51337904", "0.51298875", "0.5129523", "0.5128482", "0.5123544", "0.51224196", "0.51162475", "0.51162475", "0.51148367", "0.51146877", "0.51091874" ]
0.0
-1
FetchLocalImage mocks base method
func (m *MockFetcher) FetchLocalImage(arg0 string) (image.Image, error) { ret := m.ctrl.Call(m, "FetchLocalImage", arg0) ret0, _ := ret[0].(image.Image) ret1, _ := ret[1].(error) return ret0, ret1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockFetcher) FetchUpdatedLocalImage(arg0 context.Context, arg1 string, arg2 io.Writer) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchUpdatedLocalImage\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockFetcher) FetchRemoteImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchRemoteImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestIsImageInLocalRegistry(t *testing.T) {\n\ttype testDef struct {\n\t\timageName string\n\t\tdocker test.FakeDockerClient\n\t\texpectedResult bool\n\t\texpectedError string\n\t}\n\ttests := map[string]testDef{\n\t\t\"ImageFound\": {\"a_test_image\", test.FakeDockerClient{}, true, \"\"},\n\t\t\"ImageNotFound\": {\"a_test_image:sometag\", test.FakeDockerClient{}, false, \"unable to get metadata for a_test_image:sometag\"},\n\t}\n\n\tfor test, def := range tests {\n\t\tdh := getDocker(&def.docker)\n\t\tfake := dh.kubeDockerClient.(*dockertools.FakeDockerClient)\n\t\tif def.expectedResult {\n\t\t\tfake.Image = &dockertypes.ImageInspect{ID: def.imageName}\n\t\t}\n\n\t\tresult, err := dh.IsImageInLocalRegistry(def.imageName)\n\n\t\tif e := fake.AssertCalls([]string{\"inspect_image\"}); e != nil {\n\t\t\tt.Errorf(\"%+v\", e)\n\t\t}\n\n\t\tif result != def.expectedResult {\n\t\t\tt.Errorf(\"Test - %s: Expected result: %v. Got: %v\", test, def.expectedResult, result)\n\t\t}\n\t\tif err != nil && len(def.expectedError) > 0 && !strings.Contains(err.Error(), def.expectedError) {\n\t\t\tt.Errorf(\"Test - %s: Expected error: Got: %+v\", test, err)\n\t\t}\n\t}\n}", "func (imp *Importer) fetchLocalImages() {\n items, err := ioutil.ReadDir(STORE_DIR)\n if err != nil {\n imp.sendErr(err)\n return\n }\n\n for _, info := range items {\n if info.IsDir() { continue }\n filename := info.Name()\n\n file, err := os.Open(fmt.Sprintf(\"%s/%s\", STORE_DIR, filename))\n if err != nil {\n imp.sendErr(err)\n return\n }\n\n img, err := jpeg.Decode(file)\n if err != nil {\n log.Printf(\"Error decoding image file %s to jpeg\\n\", filename)\n continue\n }\n\n ext := filepath.Ext(filename)\n id := filename[:len(filename)-len(ext)]\n\n imp.send(&imagedata.ImageData{Id: id, Data: &img})\n }\n}", "func (f *fetcher) fetchImageFrom(appName string, aciURL, ascURL, scheme string, ascFile *os.File, latest bool) (string, error) {\n\tvar rem *store.Remote\n\n\tif f.insecureSkipVerify {\n\t\tif f.ks != nil {\n\t\t\tstderr(\"rkt: warning: TLS verification and signature verification has been disabled\")\n\t\t}\n\t} else if scheme == \"docker\" {\n\t\treturn \"\", fmt.Errorf(\"signature verification for docker images is not supported (try --insecure-skip-verify)\")\n\t}\n\n\tif (f.local && scheme != \"file\") || (scheme != \"file\" && !latest) {\n\t\tvar err error\n\t\tok := false\n\t\trem, ok, err = f.s.GetRemote(aciURL)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif ok {\n\t\t\tif f.local {\n\t\t\t\tstderr(\"rkt: using image in local store for app %s\", appName)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t\tif useCached(rem.DownloadTime, rem.CacheMaxAge) {\n\t\t\t\tstderr(\"rkt: found image in local store, skipping fetching from %s\", aciURL)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t}\n\t\tif f.local {\n\t\t\treturn \"\", fmt.Errorf(\"url %s not available in local store\", aciURL)\n\t\t}\n\t}\n\n\tif scheme != \"file\" && f.debug {\n\t\tstderr(\"rkt: fetching image from %s\", aciURL)\n\t}\n\n\tvar etag string\n\tif rem != nil {\n\t\tetag = rem.ETag\n\t}\n\tentity, aciFile, cd, err := f.fetch(appName, aciURL, ascURL, ascFile, etag)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif cd != nil && cd.useCached {\n\t\tif rem != nil {\n\t\t\treturn rem.BlobKey, nil\n\t\t} else {\n\t\t\t// should never happen\n\t\t\tpanic(\"asked to use cached image but remote is nil\")\n\t\t}\n\t}\n\tif scheme != \"file\" {\n\t\tdefer os.Remove(aciFile.Name())\n\t}\n\n\tif entity != nil && !f.insecureSkipVerify {\n\t\tstderr(\"rkt: signature verified:\")\n\t\tfor _, v := range entity.Identities {\n\t\t\tstderr(\" %s\", v.Name)\n\t\t}\n\t}\n\tkey, err := f.s.WriteACI(aciFile, latest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif scheme != \"file\" {\n\t\trem := store.NewRemote(aciURL, ascURL)\n\t\trem.BlobKey = key\n\t\trem.DownloadTime = time.Now()\n\t\tif cd != nil {\n\t\t\trem.ETag = cd.etag\n\t\t\trem.CacheMaxAge = cd.maxAge\n\t\t}\n\t\terr = f.s.WriteRemote(rem)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn key, nil\n}", "func TestFetch(t *testing.T) {\n\timage := \"rkt-inspect-fetch.aci\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// Fetch the image for the first time, this should write the image to the\n\t// on-disk store.\n\toldHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file\"}, t, ctx)\n\n\t// Fetch the image with the same name but different content, the expecting\n\t// result is that we should get a different hash since we are not fetching\n\t// from the on-disk store.\n\tnewHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file --write-file\"}, t, ctx)\n\n\tif oldHash == newHash {\n\t\tt.Fatalf(\"ACI hash should be different as the image has changed\")\n\t}\n}", "func (f OriginalFetcher) Fetch(namespace string, sourceURL string, imageHash string) (info *info.ImageProperties, downloaded bool, err error) {\n\tif sourceURL == \"\" && imageHash == \"\" {\n\t\treturn nil, false, fmt.Errorf(\"Missing Hash & URL\")\n\t}\n\n\tif imageHash != \"\" {\n\t\tinfo, err = f.fetchFromStore(namespace, imageHash)\n\t}\n\n\tif sourceURL != \"\" && (err != nil || imageHash == \"\") {\n\t\tinfo, downloaded, err = f.fetchFromSource(namespace, sourceURL)\n\t}\n\n\treturn info, downloaded, err\n}", "func fetchLocal(f *File) error {\n\n\terr := validateLocal(f)\n\tif err != nil {\n\t\tf.Status.Type = status.ERROR\n\t\treturn err\n\t}\n\tf.path = f.Url\n\tf.Status.Type = status.FETCHED\n\treturn nil\n\n}", "func LoadLocalImage(app *AppData) error {\n\tapp.LocalImage = DockerImage{\n\t\tExists: false,\n\t}\n\tctx := context.Background()\n\tcli, err := client.NewClientWithOpts(client.FromEnv)\n\tcli.NegotiateAPIVersion(ctx)\n\tdefer cli.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tinspect, _, err := cli.ImageInspectWithRaw(ctx, app.From)\n\tif err != nil {\n\t\tif err.Error() == \"Error: No such image: \"+app.From {\n\t\t\tfmt.Printf(\"Repo not exists in local docker\\n\")\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tfmt.Printf(\"Repo exists in local docker\\n\")\n\tapp.LocalImage.Exists = true\n\tif len(inspect.RepoDigests) > 0 {\n\t\tapp.LocalImage.DockerDigest = inspect.RepoDigests[0]\n\t} else {\n\t\tapp.LocalImage.DockerDigest = inspect.ID\n\t}\n\n\t//Setting Docker Config values\n\tconfigData, err := json.Marshal(inspect.Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = pvjson.Unmarshal(configData, &app.LocalImage.DockerConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func TestBaseImage(t *testing.T) {\n\tctx, err := controllerPrepare()\n\tif err != nil {\n\t\tt.Fatal(\"Fail in controller prepare: \", err)\n\t}\n\teveBaseRef := os.Getenv(\"EVE_BASE_REF\")\n\tif len(eveBaseRef) == 0 {\n\t\teveBaseRef = \"4.10.0\"\n\t}\n\tzArch := os.Getenv(\"ZARCH\")\n\tif len(eveBaseRef) == 0 {\n\t\tzArch = \"amd64\"\n\t}\n\tHV := os.Getenv(\"HV\")\n\tif HV == \"xen\" {\n\t\tHV = \"\"\n\t}\n\tvar baseImageTests = []struct {\n\t\tdataStoreID string\n\t\timageID string\n\t\tbaseID string\n\t\timageRelativePath string\n\t\timageFormat config.Format\n\t\teveBaseRef string\n\t\tzArch string\n\t\tHV string\n\t}{\n\t\t{eServerDataStoreID,\n\n\t\t\t\"1ab8761b-5f89-4e0b-b757-4b87a9fa93ec\",\n\n\t\t\t\"22b8761b-5f89-4e0b-b757-4b87a9fa93ec\",\n\n\t\t\t\"baseos.qcow2\",\n\t\t\tconfig.Format_QCOW2,\n\t\t\teveBaseRef,\n\t\t\tzArch,\n\t\t\tHV,\n\t\t},\n\t}\n\tfor _, tt := range baseImageTests {\n\t\tbaseOSVersion := fmt.Sprintf(\"%s-%s\", tt.eveBaseRef, tt.zArch)\n\t\tif tt.HV != \"\" {\n\t\t\tbaseOSVersion = fmt.Sprintf(\"%s-%s-%s\", tt.eveBaseRef, tt.zArch, tt.HV)\n\t\t}\n\t\tt.Run(baseOSVersion, func(t *testing.T) {\n\n\t\t\terr = prepareBaseImageLocal(ctx, tt.dataStoreID, tt.imageID, tt.baseID, tt.imageRelativePath, tt.imageFormat, baseOSVersion)\n\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in prepare base image from local file: \", err)\n\t\t\t}\n\t\t\tdeviceCtx, err := ctx.GetDeviceFirst()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in get first device: \", err)\n\t\t\t}\n\t\t\tdeviceCtx.SetBaseOSConfig([]string{tt.baseID})\n\t\t\tdevUUID := deviceCtx.GetID()\n\t\t\terr = ctx.ConfigSync(deviceCtx)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in sync config with controller: \", err)\n\t\t\t}\n\t\t\tt.Run(\"Started\", func(t *testing.T) {\n\t\t\t\terr := ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion}, einfo.ZInfoDevSW, 300)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image update init: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\tt.Run(\"Downloaded\", func(t *testing.T) {\n\t\t\t\terr := ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion, \"downloadProgress\": \"100\"}, einfo.ZInfoDevSW, 1500)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image download progress: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\tt.Run(\"Logs\", func(t *testing.T) {\n\t\t\t\tif !checkLogs {\n\t\t\t\t\tt.Skip(\"no LOGS flag set - skipped\")\n\t\t\t\t}\n\t\t\t\terr = ctx.LogChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"eveVersion\": baseOSVersion}, 1200)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image logs: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\ttimeout := time.Duration(1200)\n\n\t\t\tif !checkLogs {\n\t\t\t\ttimeout = 2400\n\t\t\t}\n\t\t\tt.Run(\"Active\", func(t *testing.T) {\n\t\t\t\terr = ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion, \"status\": \"INSTALLED\", \"partitionState\": \"(inprogress|active)\"}, einfo.ZInfoDevSW, timeout)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image installed status: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n\n}", "func TestBaseImage(t *testing.T) {\n\t// test with 'original.png'\n\timgs := map[string][]byte{\n\t\t\"original.png\": []byte(\"image\"),\n\t}\n\t_, err := backend.baseImage(imgs)\n\tif err != nil {\n\t\tt.Errorf(\"Got error %s\", err)\n\t}\n\n\t// test with 'original.jpg'\n\timgs = map[string][]byte{\n\t\t\"original.jpg\": []byte(\"image\"),\n\t}\n\t_, err = backend.baseImage(imgs)\n\tif err != nil {\n\t\tt.Errorf(\"Got error %s\", err)\n\t}\n\n\t// without 'original.*' should get an error\n\timgs = map[string][]byte{\n\t\t\"127x127.png\": []byte(\"image\"),\n\t}\n\t_, err = backend.baseImage(imgs)\n\tif err == nil {\n\t\tt.Errorf(\"Should get an error, didn't pass original image.\")\n\t}\n}", "func TestImplicitFetch(t *testing.T) {\n\tfoundMsg := \"found image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// 1. Fetch the image.\n\t// TODO(yifan): Add other ACI with different schemes.\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:ubuntu-12.04\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 2. Try run/prepare with/without tag ':latest', should not get $foundMsg.\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err == nil {\n\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func (f *Frontend) fetchImage(i *img.Image) (*img.Image, error) {\n\tvar err error\n\n\t// go through image proxy to resize and cache the image\n\tkey := hmacKey(i.ID)\n\tu := fmt.Sprintf(\"%v/image/225x,s%v/%v\", f.Host, key, i.ID)\n\tfmt.Println(u)\n\n\tresp, err := f.Images.Client.Get(u)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbdy, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\ti.Base64 = base64.StdEncoding.EncodeToString(bdy)\n\treturn i, err\n}", "func (m *MockRequester) Fetch(url string) (io.ReadCloser, error) {\n\tret := m.ctrl.Call(m, \"Fetch\", url)\n\tret0, _ := ret[0].(io.ReadCloser)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (f *fetcher) fetchImage(img string, asc string, discover bool) (string, error) {\n\tif f.withDeps && !discover {\n\t\treturn \"\", fmt.Errorf(\"cannot fetch image's dependencies with discovery disabled\")\n\t}\n\thash, err := f.fetchSingleImage(img, asc, discover)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif f.withDeps {\n\t\terr = f.fetchImageDeps(hash)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn hash, nil\n}", "func ttrPullImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\terr := ttrPullImage(ctx, client, image)\n\tassert.NilError(t, err)\n}", "func (ss *Sources) localFetch(spec v1.SourceSpec) (string, error) {\n\tp := ss.repoPath(spec)\n\terr := os.MkdirAll(p, 0750)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t//TODO prevent target directory from accumulating unused files\n\t// remove all files before copy\n\t// or\n\t// walk target dir and diff with source dir\n\n\t// Copy local dir to repo path.\n\t// Ignore .git directory.\n\terr = otia10copy.Copy(spec.URL, p, otia10copy.Options{Skip: func(src string) bool {\n\t\treturn filepath.Base(src) == \".git\"\n\t}})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"fetch: %w\", err)\n\t}\n\n\th, err := ss.hashAll(spec.URL) // TODO use hashAll(p) when dir is properly synced (see previous to do)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ts := hex.EncodeToString(h.Sum(nil))\n\n\treturn s, err\n}", "func (f *SourceFetcher) Fetch(url string, namespace string) (*info.ImageProperties, bool, error) {\n\tc := make(chan FetchResult)\n\tgo f.uniqueFetchSource(c, url, namespace)\n\tr := <-c\n\treturn r.ImageDetails, r.Downloaded, r.Error\n}", "func TestRunPrepareLocal(t *testing.T) {\n\tnotAvailableMsg := \"not available in local store\"\n\tfoundMsg := \"using image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --local --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --local --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare --local docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare --local docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\t// 1. Try run/prepare with the image not available in the store, should get $notAvailableMsg.\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, notAvailableMsg); err != nil {\n\t\t\tt.Fatalf(\"%q should be found\", notAvailableMsg)\n\t\t}\n\t\tchild.Wait()\n\t}\n\n\t// 2. Fetch the image\n\timportImageAndFetchHash(t, ctx, \"docker://busybox\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 3. Try run/prepare with the image available in the store, should get $foundMsg.\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err != nil {\n\t\t\tt.Fatalf(\"%q should be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func Fetch(imageURI string, labels map[types.ACIdentifier]string, insecure bool) (tempfile.ReadSeekCloser, error) {\n\tu, err := url.Parse(imageURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar puller remote.Puller\n\n\tswitch u.Scheme {\n\tcase \"file\":\n\t\tfilename := u.Path\n\t\tif u.Host != \"\" {\n\t\t\tfilename = filepath.Join(u.Host, u.Path)\n\t\t}\n\t\tf, err := os.Open(filename)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn tempfile.New(f)\n\tcase \"http\", \"https\":\n\t\tpuller = http.New()\n\tcase \"docker\":\n\t\tpuller = docker.New(insecure)\n\tcase \"aci\", \"\":\n\t\tpuller = aci.New(insecure, labels)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"%q scheme not supported\", u.Scheme)\n\t}\n\n\tr, err := puller.Pull(imageURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tempfile.New(r)\n}", "func (m *MockManager) GetLoadedImageName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetLoadedImageName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (f *MockingFetcher) Fetch(ctx context.Context, name string) (Function, error) {\n\tr, err := f.Fetcher.Fetch(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn mockFunction(r), nil\n}", "func (c *TestClient) GetImage(project, name string) (*compute.Image, error) {\n\tif c.GetImageFn != nil {\n\t\treturn c.GetImageFn(project, name)\n\t}\n\treturn c.client.GetImage(project, name)\n}", "func (c *dockerClientMock) DownloadImage(imageSource, filePath string) (v1.Image, error) {\n\treturn &fake.FakeImage{}, nil // fmt.Errorf(\"%s\", filePath)\n}", "func (m *MockEnvironment) Fetch() map[string]interface{} {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\")\n\tret0, _ := ret[0].(map[string]interface{})\n\treturn ret0\n}", "func (m *MockUpstreamIntf) CachedRemoteDigestOfLocalHeight() blockdigest.Digest {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CachedRemoteDigestOfLocalHeight\")\n\tret0, _ := ret[0].(blockdigest.Digest)\n\treturn ret0\n}", "func getLocalImage(s *Settings) ([]byte, error) {\n\tvar image []byte\n\tvar filePath string\n\tvar file *os.File\n\tvar err error\n\n\tif len(s.Directories) > 0 {\n\t\tfound := false\n\t\tfor _, dir := range s.Directories {\n\t\t\tfilePath = path.Join(\"/\", dir, s.Context.Path)\n\t\t\tfile, err = os.Open(filePath)\n\t\t\tif err == nil {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tfile, err = os.Open(s.Local404Thumb)\n\t\t\tif err != nil {\n\t\t\t\treturn image, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfile, err = os.Open(path.Join(\"/\", s.Context.Path))\n\t\tif err != nil {\n\t\t\tfile, err = os.Open(s.Local404Thumb)\n\t\t\tif err != nil {\n\t\t\t\treturn image, err\n\t\t\t}\n\t\t}\n\t}\n\n\tinfo, _ := file.Stat()\n\timage = make([]byte, info.Size())\n\n\t_, err = file.Read(image)\n\tif err != nil {\n\t\treturn image, err\n\t}\n\n\treturn image, nil\n}", "func (m *MockSystem) FetchURL(ctx context.Context, url string) (semver.Tags, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FetchURL\", ctx, url)\n\tret0, _ := ret[0].(semver.Tags)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func dirImageMock(t *testing.T, dir, dockerReference string) private.UnparsedImage {\n\tref, err := reference.ParseNormalizedNamed(dockerReference)\n\trequire.NoError(t, err)\n\treturn dirImageMockWithRef(t, dir, refImageReferenceMock{ref: ref})\n}", "func (m *MockRepository) Fetch(bucketName, name string, model db.Model) (bool, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\", bucketName, name, model)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockFetcherMockRecorder) FetchLocalImage(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchLocalImage\", reflect.TypeOf((*MockFetcher)(nil).FetchLocalImage), arg0)\n}", "func FetchRemoteFile() {\n\n}", "func TestRemote(t *testing.T) {\n\trnd, err := random.Image(1024, 3)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ts, err := registry.TLS(\"gcr.io\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttr := s.Client().Transport\n\n\tsrc := \"gcr.io/test/compressed\"\n\tref, err := name.ParseReference(src)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := remote.Write(ref, rnd, remote.WithTransport(tr)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\timg, err := remote.Image(ref, remote.WithTransport(tr))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := validate.Image(img); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcf, err := img.ConfigFile()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tm, err := img.Manifest()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tlayer, err := img.LayerByDiffID(cf.RootFS.DiffIDs[0])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\td, err := layer.Digest()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif diff := cmp.Diff(d, m.Layers[0].Digest); diff != \"\" {\n\t\tt.Errorf(\"mismatched digest: %v\", diff)\n\t}\n}", "func (m *MockImageTransferer) Download(arg0 string, arg1 core.Digest) (base.FileReader, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Download\", arg0, arg1)\n\tret0, _ := ret[0].(base.FileReader)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestExpectedImgRef(t *testing.T) {\n\n\tv, isSet := os.LookupEnv(\"DOCKERHUB_PROXY\")\n\tif isSet {\n\t\tdefer os.Setenv(\"DOCKERHUB_PROXY\", v)\n\t}\n\n\tos.Unsetenv(\"DOCKERHUB_PROXY\")\n\tassert.Equal(t,\n\t\t\"index.docker.io/library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\",\n\t\tCompleteImageRef(\"library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\"))\n\n\tos.Setenv(\"DOCKERHUB_PROXY\", \"my-dockerhub-proxy.tld/dockerhub-proxy\")\n\tassert.Equal(t,\n\t\t\"my-dockerhub-proxy.tld/dockerhub-proxy/library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\",\n\t\tCompleteImageRef(\"library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\"))\n\tos.Unsetenv(\"DOCKERHUB_PROXY\")\n}", "func (m *MockFetcher) Fetch(req utils.Request) (responseBody []byte, err error) {\n\targs := m.Called(req)\n\n\tif args.Get(0) != nil {\n\t\tresponseBody = args.Get(0).([]byte)\n\t}\n\n\terr = args.Error(1)\n\n\treturn responseBody, err\n}", "func (f *fetcher) fetchSingleImage(img string, asc string, discover bool) (string, error) {\n\tvar (\n\t\tascFile *os.File\n\t\terr error\n\t\tlatest bool\n\t)\n\tif asc != \"\" && f.ks != nil {\n\t\tascFile, err = os.Open(asc)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to open signature file: %v\", err)\n\t\t}\n\t\tdefer ascFile.Close()\n\t}\n\n\tu, err := url.Parse(img)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"not a valid image reference (%s)\", img)\n\t}\n\n\t// if img refers to a local file, ensure the scheme is file:// and make the url path absolute\n\t_, err = os.Stat(u.Path)\n\tif err == nil {\n\t\tu.Path, err = filepath.Abs(u.Path)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to get abs path: %v\", err)\n\t\t}\n\t\tu.Scheme = \"file\"\n\t} else if !os.IsNotExist(err) {\n\t\treturn \"\", fmt.Errorf(\"unable to access %q: %v\", img, err)\n\t}\n\n\tif discover && u.Scheme == \"\" {\n\t\tif app := newDiscoveryApp(img); app != nil {\n\t\t\tvar discoveryError error\n\t\t\tif !f.local {\n\t\t\t\tstderr(\"rkt: searching for app image %s\", img)\n\t\t\t\tep, err := discoverApp(app, true)\n\t\t\t\tif err != nil {\n\t\t\t\t\tdiscoveryError = err\n\t\t\t\t} else {\n\t\t\t\t\t// No specified version label, mark it as latest\n\t\t\t\t\tif _, ok := app.Labels[\"version\"]; !ok {\n\t\t\t\t\t\tlatest = true\n\t\t\t\t\t}\n\t\t\t\t\treturn f.fetchImageFromEndpoints(app.Name.String(), ep, ascFile, latest)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif discoveryError != nil {\n\t\t\t\tstderr(\"discovery failed for %q: %v. Trying to find image in the store.\", img, discoveryError)\n\t\t\t}\n\t\t\tif f.local || discoveryError != nil {\n\t\t\t\treturn f.fetchImageFromStore(img)\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch u.Scheme {\n\tcase \"http\", \"https\", \"file\":\n\tcase \"docker\":\n\t\tdockerURL := common.ParseDockerURL(path.Join(u.Host, u.Path))\n\t\tif dockerURL.Tag == \"latest\" {\n\t\t\tlatest = true\n\t\t}\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"rkt only supports http, https, docker or file URLs (%s)\", img)\n\t}\n\treturn f.fetchImageFromURL(u.String(), u.Scheme, ascFile, latest)\n}", "func (m *MockManager) LoadImage(arg0 context.Context, arg1 *config.Config, arg2 dockerapi.DockerClient) (*types.ImageInspect, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"LoadImage\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(*types.ImageInspect)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestCannotExecuteStatusImage(t *testing.T) {\n\tbuf := setLogBuffer()\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tt.Log(buf.String())\n\t\t}\n\t}()\n\n\tif StatusImage == \"\" {\n\t\tt.Skip(\"no status image defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tapp := &models.App{Name: id.New().String()}\n\tapp = ensureApp(t, app)\n\n\tfn := &models.Fn{\n\t\tAppID: app.ID,\n\t\tName: id.New().String(),\n\t\tImage: StatusImage,\n\t\tResourceConfig: models.ResourceConfig{\n\t\t\tMemory: memory,\n\t\t},\n\t}\n\tfn = ensureFn(t, fn)\n\n\tlb, err := LB()\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: lb,\n\t}\n\tu.Path = path.Join(u.Path, \"invoke\", fn.ID)\n\n\tcontent := bytes.NewBuffer([]byte(`status`))\n\toutput := &bytes.Buffer{}\n\n\tresp, err := callFN(ctx, u.String(), content, output, models.TypeSync)\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\n\tif resp.StatusCode != http.StatusBadRequest {\n\t\tt.Fatalf(\"StatusCode check failed on %v\", resp.StatusCode)\n\t}\n}", "func GetImg(fileName string) (*image.Image, error) {\n localFile := fmt.Sprintf(\"/data/edgebox/local/%s\", fileName)\n existingImageFile, err := os.Open(localFile)\n if err == nil {\n defer existingImageFile.Close()\n imageData, _, err := image.Decode(existingImageFile)\n if err != nil {\n return nil, err\n }\n return &imageData, nil\n }\n\n remoteFile := fmt.Sprintf(\"/data/edgebox/remote/%s\", fileName)\n existingImageFile, err = os.Open(remoteFile)\n if err == nil {\n defer existingImageFile.Close()\n imageData, _, err := image.Decode(existingImageFile)\n if err != nil {\n return nil, err\n }\n return &imageData, nil\n }\n return nil, err\n}", "func dirImageMockWithRef(t *testing.T, dir string, ref types.ImageReference) private.UnparsedImage {\n\tsrcRef, err := directory.NewReference(dir)\n\trequire.NoError(t, err)\n\tsrc, err := srcRef.NewImageSource(context.Background(), nil)\n\trequire.NoError(t, err)\n\tt.Cleanup(func() {\n\t\terr := src.Close()\n\t\trequire.NoError(t, err)\n\t})\n\treturn image.UnparsedInstance(&dirImageSourceMock{\n\t\tImageSource: imagesource.FromPublic(src),\n\t\tref: ref,\n\t}, nil)\n}", "func (m *MockCacheService) FetchFromCache(pagination model.Pagination) (model.Response, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FetchFromCache\", pagination)\n\tret0, _ := ret[0].(model.Response)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockImageTransferer) Stat(arg0 string, arg1 core.Digest) (*core.BlobInfo, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Stat\", arg0, arg1)\n\tret0, _ := ret[0].(*core.BlobInfo)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (f *fetcher) fetch(appName string, aciURL, ascURL string, ascFile *os.File, etag string) (*openpgp.Entity, *os.File, *cacheData, error) {\n\tvar (\n\t\tentity *openpgp.Entity\n\t\tcd *cacheData\n\t)\n\n\tu, err := url.Parse(aciURL)\n\tif err != nil {\n\t\treturn nil, nil, nil, fmt.Errorf(\"error parsing ACI url: %v\", err)\n\t}\n\tif u.Scheme == \"docker\" {\n\t\tregistryURL := strings.TrimPrefix(aciURL, \"docker://\")\n\n\t\tstoreTmpDir, err := f.s.TmpDir()\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error creating temporary dir for docker to ACI conversion: %v\", err)\n\t\t}\n\t\ttmpDir, err := ioutil.TempDir(storeTmpDir, \"docker2aci-\")\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tdefer os.RemoveAll(tmpDir)\n\n\t\tindexName := docker2aci.GetIndexName(registryURL)\n\t\tuser := \"\"\n\t\tpassword := \"\"\n\t\tif creds, ok := f.dockerAuth[indexName]; ok {\n\t\t\tuser = creds.User\n\t\t\tpassword = creds.Password\n\t\t}\n\t\tacis, err := docker2aci.Convert(registryURL, true, tmpDir, tmpDir, user, password)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error converting docker image to ACI: %v\", err)\n\t\t}\n\n\t\taciFile, err := os.Open(acis[0])\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error opening squashed ACI file: %v\", err)\n\t\t}\n\n\t\treturn nil, aciFile, nil, nil\n\t}\n\n\t// attempt to automatically fetch the public key in case it is available on a TLS connection.\n\tif globalFlags.TrustKeysFromHttps && !globalFlags.InsecureSkipVerify && appName != \"\" {\n\t\tpkls, err := getPubKeyLocations(appName, false, globalFlags.Debug)\n\t\tif err != nil {\n\t\t\tstderr(\"Error determining key location: %v\", err)\n\t\t} else {\n\t\t\t// no http, don't ask user for accepting the key, no overriding\n\t\t\tif err := addKeys(pkls, appName, false, true, false); err != nil {\n\t\t\t\tstderr(\"Error adding keys: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\tvar retrySignature bool\n\tif f.ks != nil && ascFile == nil {\n\t\tu, err := url.Parse(ascURL)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error parsing ASC url: %v\", err)\n\t\t}\n\t\tif u.Scheme == \"file\" {\n\t\t\tascFile, err = os.Open(u.Path)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, fmt.Errorf(\"error opening signature file: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tstderr(\"Downloading signature from %v\\n\", ascURL)\n\t\t\tascFile, err = f.s.TmpFile()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, fmt.Errorf(\"error setting up temporary file: %v\", err)\n\t\t\t}\n\t\t\tdefer os.Remove(ascFile.Name())\n\n\t\t\terr = f.downloadSignatureFile(ascURL, ascFile)\n\t\t\tswitch err {\n\t\t\tcase errStatusAccepted:\n\t\t\t\tretrySignature = true\n\t\t\t\tstderr(\"rkt: server requested deferring the signature download\")\n\t\t\tcase nil:\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t\treturn nil, nil, nil, fmt.Errorf(\"error downloading the signature file: %v\", err)\n\t\t\t}\n\t\t}\n\t\tdefer ascFile.Close()\n\t}\n\n\t// check if the identity used by the signature is in the store before a\n\t// possibly expensive download. This is only an optimization and it's\n\t// ok to skip the test if the signature will be downloaded later.\n\tif !retrySignature && f.ks != nil && appName != \"\" {\n\t\tif _, err := ascFile.Seek(0, 0); err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error seeking signature file: %v\", err)\n\t\t}\n\t\tif entity, err = f.ks.CheckSignature(appName, bytes.NewReader([]byte{}), ascFile); err != nil {\n\t\t\tif _, ok := err.(pgperrors.SignatureError); !ok {\n\t\t\t\treturn nil, nil, nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tvar aciFile *os.File\n\tif u.Scheme == \"file\" {\n\t\taciFile, err = os.Open(u.Path)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error opening ACI file: %v\", err)\n\t\t}\n\t} else {\n\t\taciFile, err = f.s.TmpFile()\n\t\tif err != nil {\n\t\t\treturn nil, aciFile, nil, fmt.Errorf(\"error setting up temporary file: %v\", err)\n\t\t}\n\t\tdefer os.Remove(aciFile.Name())\n\n\t\tif cd, err = f.downloadACI(aciURL, aciFile, etag); err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error downloading ACI: %v\", err)\n\t\t}\n\t\tif cd.useCached {\n\t\t\treturn nil, nil, cd, nil\n\t\t}\n\t}\n\n\tif retrySignature {\n\t\tif err = f.downloadSignatureFile(ascURL, ascFile); err != nil {\n\t\t\treturn nil, aciFile, nil, fmt.Errorf(\"error downloading the signature file: %v\", err)\n\t\t}\n\t}\n\n\tmanifest, err := aci.ManifestFromImage(aciFile)\n\tif err != nil {\n\t\treturn nil, aciFile, nil, err\n\t}\n\t// Check if the downloaded ACI has the correct app name.\n\t// The check is only performed when the aci is downloaded through the\n\t// discovery protocol, but not with local files or full URL.\n\tif appName != \"\" && manifest.Name.String() != appName {\n\t\treturn nil, aciFile, nil,\n\t\t\tfmt.Errorf(\"error when reading the app name: %q expected but %q found\",\n\t\t\t\tappName, manifest.Name.String())\n\t}\n\n\tif f.ks != nil {\n\t\tif _, err := aciFile.Seek(0, 0); err != nil {\n\t\t\treturn nil, aciFile, nil, fmt.Errorf(\"error seeking ACI file: %v\", err)\n\t\t}\n\t\tif _, err := ascFile.Seek(0, 0); err != nil {\n\t\t\treturn nil, aciFile, nil, fmt.Errorf(\"error seeking signature file: %v\", err)\n\t\t}\n\t\tif entity, err = f.ks.CheckSignature(manifest.Name.String(), aciFile, ascFile); err != nil {\n\t\t\treturn nil, aciFile, nil, err\n\t\t}\n\t}\n\n\tif _, err := aciFile.Seek(0, 0); err != nil {\n\t\treturn nil, aciFile, nil, fmt.Errorf(\"error seeking ACI file: %v\", err)\n\t}\n\treturn entity, aciFile, cd, nil\n}", "func testDownloadImages(ctx context.Context, t *testing.T, downloadCh chan<- downloadRequest, addr, ccvmDir string) {\n\twkld := &workload{\n\t\tspec: workloadSpec{\n\t\t\tBaseImageURL: \"http://\" + addr + \"/download/image\",\n\t\t\tBIOS: \"http://\" + addr + \"/download/bios\",\n\t\t},\n\t}\n\n\tresultCh := make(chan interface{})\n\tgo func() {\n\t\timg, bios, err := downloadImages(ctx, wkld, http.DefaultTransport.(*http.Transport),\n\t\t\tresultCh, downloadCh)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to download images: %v\", err)\n\t\t}\n\t\tif len(img) == 0 || len(bios) == 0 {\n\t\t\tt.Errorf(\"One the paths is empty img=%s bios=%s\", img, bios)\n\t\t}\n\t\tclose(resultCh)\n\t}()\n\n\tfor range resultCh {\n\t}\n\n\twkld.spec.BIOS = \"ftp://\" + addr + \"/download/bios\"\n\tresultCh = make(chan interface{})\n\tgo func() {\n\t\t_, _, err := downloadImages(ctx, wkld, http.DefaultTransport.(*http.Transport),\n\t\t\tresultCh, downloadCh)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected downloadImages with bad BIOS URL to fail\")\n\t\t}\n\t\tclose(resultCh)\n\t}()\n\n\tfor range resultCh {\n\t}\n}", "func (m *MockModuleService) GetLatestModuleImage(arg0 string) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetLatestModuleImage\", arg0)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockRepoClient) LocalPath() (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"LocalPath\")\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockUserUsecase) GetSpecialAvatar(name string) string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetSpecialAvatar\", name)\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (imp *Importer) fetchImages() {\n err := downloadImages(\n imp.idPath,\n func(id string, bodyRdr io.Reader) error {\n img, err := jpeg.Decode(bodyRdr)\n if err == nil {\n imp.send(&imagedata.ImageData{Id: id, Data: &img})\n } else {\n log.Printf(\"Error decoding image %s to jpeg\\n\", id)\n }\n return nil\n },\n )\n\n if err != nil { imp.sendErr(err) }\n}", "func (m *MockHandler) GetOsImage(arg0, arg1 string) (*models.OsImage, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOsImage\", arg0, arg1)\n\tret0, _ := ret[0].(*models.OsImage)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockClient) Fetch(keyword string) (*[]linebot.Response, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\", keyword)\n\tret0, _ := ret[0].(*[]linebot.Response)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestRunPrepareFromFile(t *testing.T) {\n\tfoundMsg := \"found image in local store\"\n\timage := \"rkt-inspect-implicit-fetch.aci\"\n\n\timagePath := patchTestACI(image, \"--exec=/inspect\")\n\tdefer os.Remove(imagePath)\n\n\ttests := []string{\n\t\timagePath,\n\t\t\"file://\" + imagePath,\n\t}\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\timportImageAndFetchHash(t, ctx, imagePath)\n\n\tfor _, tt := range tests {\n\n\t\t// 1. Try run/prepare with '--local', should not get the $foundMsg, since we will ignore the '--local' when\n\t\t// the image is a filepath.\n\t\tcmds := []string{\n\t\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false --local %s\", ctx.cmd(), tt),\n\t\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare --local %s\", ctx.cmd(), tt),\n\t\t}\n\n\t\tfor _, cmd := range cmds {\n\t\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\t\tchild, err := gexpect.Spawn(cmd)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t\t}\n\n\t\t\tif err := child.Expect(foundMsg); err == nil {\n\t\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t\t}\n\n\t\t\tif err := child.Wait(); err != nil {\n\t\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t\t}\n\t\t}\n\n\t\t// 2. Try run/prepare without '--local', should not get $foundMsg either.\n\t\tcmds = []string{\n\t\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false %s\", ctx.cmd(), tt),\n\t\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare %s\", ctx.cmd(), tt),\n\t\t}\n\n\t\tfor _, cmd := range cmds {\n\t\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\t\tchild, err := gexpect.Spawn(cmd)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t\t}\n\t\t\tif err := child.Expect(foundMsg); err == nil {\n\t\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t\t}\n\t\t\tif err := child.Wait(); err != nil {\n\t\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n}", "func SampleImageForTest() (*SampleImage, error) {\n\t\n\t// Attempt to resolve the absolute path to the root directory for the sample image\n\trootDir, err := filepath.Abs(filepath.Join(\".\", \"testdata\", \"sample\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\n\t// Return a populated SampleImage object\n\treturn &SampleImage{\n\t\tRootDir: rootDir,\n\t\tDockerfilesDir: filepath.Dir(rootDir),\n\t\tBlobsDir: filepath.Join(rootDir, \"blobs\", \"sha256\"),\n\t\tLayersDir: filepath.Join(rootDir, \"layers\"),\n\t}, nil\n}", "func handleLocalRequest(rw http.ResponseWriter, req *http.Request) {\n\tfn, err := etl.GetFilename(req.FormValue(\"filename\"))\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(rw, \"failed to get valid filename= parameter from request\")\n\t\treturn\n\t}\n\n\tdp, err := etl.ValidateTestPath(fn)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(rw, \"failed to validate test path: %q\", fn)\n\t\treturn\n\t}\n\n\tc, err := storage.GetStorageClient(false)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(rw, \"failed to get storage client\")\n\t\treturn\n\t}\n\n\tctx := context.Background()\n\tobj, err := c.Bucket(dp.Bucket).Object(dp.Path).Attrs(ctx)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(rw, \"failed to get object attrs for %s / %s\", dp.Bucket, dp.Path)\n\t\treturn\n\t}\n\n\tr := toRunnable(obj)\n\terr = r.Run(ctx)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(rw, \"runnable failed to run on %s / %s\", dp.Bucket, dp.Path)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(rw, \"no observed errors\")\n}", "func (reader reader) GetImageFromURLorLocalPath(url, localPath string) (img image.Image, err error) {\n\tswitch {\n\tcase url != \"\":\n\t\treturn reader.ReadFromURL(url)\n\n\tcase localPath != \"\":\n\t\treturn reader.ReadFromLocalPath(localPath)\n\t}\n\treturn nil, errors.New(\"Both url and local path was not provided\")\n}", "func (_m *Repository) Fetch(ctx context.Context, limit int, offset int) ([]*models.Host, error) {\n\tret := _m.Called(ctx, limit, offset)\n\n\tvar r0 []*models.Host\n\tif rf, ok := ret.Get(0).(func(context.Context, int, int) []*models.Host); ok {\n\t\tr0 = rf(ctx, limit, offset)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*models.Host)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok {\n\t\tr1 = rf(ctx, limit, offset)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (b *ecrBase) runGetImage(ctx context.Context, batchGetImageInput ecr.BatchGetImageInput) (*ecr.Image, error) {\n\t// Allow only a single image to be fetched at a time.\n\tif len(batchGetImageInput.ImageIds) != 1 {\n\t\treturn nil, errGetImageUnhandled\n\t}\n\n\tbatchGetImageInput.RegistryId = aws.String(b.ecrSpec.Registry())\n\tbatchGetImageInput.RepositoryName = aws.String(b.ecrSpec.Repository)\n\n\tlog.G(ctx).WithField(\"batchGetImageInput\", batchGetImageInput).Trace(\"ecr.base.image: requesting images\")\n\n\tbatchGetImageOutput, err := b.client.BatchGetImageWithContext(ctx, &batchGetImageInput)\n\tif err != nil {\n\t\tlog.G(ctx).WithError(err).Error(\"ecr.base.image: failed to get image\")\n\t\treturn nil, err\n\t}\n\tlog.G(ctx).WithField(\"batchGetImageOutput\", batchGetImageOutput).Trace(\"ecr.base.image: api response\")\n\n\t// Summarize image request failures for handled errors. Only the first\n\t// failure is checked as only a single ImageIdentifier is allowed to be\n\t// queried for.\n\tif len(batchGetImageOutput.Failures) > 0 {\n\t\tfailure := batchGetImageOutput.Failures[0]\n\t\tswitch aws.StringValue(failure.FailureCode) {\n\t\t// Requested image with a corresponding tag and digest does not exist.\n\t\t// This failure will generally occur when pushing an updated (or new)\n\t\t// image with a tag.\n\t\tcase ecr.ImageFailureCodeImageTagDoesNotMatchDigest:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Debug(\"ecr.base.image: no matching image with specified digest\")\n\t\t\treturn nil, errImageNotFound\n\t\t// Requested image doesn't resolve to a known image. A new image will\n\t\t// result in an ImageNotFound error when checked before push.\n\t\tcase ecr.ImageFailureCodeImageNotFound:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Debug(\"ecr.base.image: no image found\")\n\t\t\treturn nil, errImageNotFound\n\t\t// Requested image identifiers are invalid.\n\t\tcase ecr.ImageFailureCodeInvalidImageDigest, ecr.ImageFailureCodeInvalidImageTag:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Error(\"ecr.base.image: invalid image identifier\")\n\t\t\treturn nil, reference.ErrInvalid\n\t\t// Unhandled failure reported for image request made.\n\t\tdefault:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Warn(\"ecr.base.image: unhandled image request failure\")\n\t\t\treturn nil, errGetImageUnhandled\n\t\t}\n\t}\n\n\treturn batchGetImageOutput.Images[0], nil\n}", "func (c MockDockerClient) ImagePull(ctx context.Context, imageName string) error {\n\tif c.ImagePullFn != nil {\n\t\tfmt.Println(\"[MockDockerClient] In \", utils.CurrentFunctionName())\n\t\tfmt.Println(\"[MockDockerClient] - ctx: \", ctx)\n\t\tfmt.Println(\"[MockDockerClient] - imageName: \", imageName)\n\t\treturn c.ImagePullFn(ctx, imageName)\n\t}\n\tpanic(fmt.Sprintf(\"No function defined for: %s\", utils.CurrentFunctionName()))\n}", "func (m *MockCEImpl) ImagePull(image string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImagePull\", image)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (mr *MockFetcherMockRecorder) FetchUpdatedLocalImage(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchUpdatedLocalImage\", reflect.TypeOf((*MockFetcher)(nil).FetchUpdatedLocalImage), arg0, arg1, arg2)\n}", "func (m *MockCompute) ImageIDFromName(arg0 string) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImageIDFromName\", arg0)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func getTestImage(image int) string {\n\treturn imageNames[image]\n}", "func fetchFile(filename string, url string, token string) (err error) {\n\tfmt.Printf(\"fetching file name=%s, url=%s\\n\", filename, url)\n\tlocalfile, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer localfile.Close()\n\n\tvar user *httpclient.Auth\n\tif token != \"\" {\n\t\tuser = httpclient.GetUserByTokenAuth(token)\n\t}\n\n\t//download file from Shock\n\tres, err := httpclient.Get(url, httpclient.Header{}, nil, user)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != 200 { //err in fetching data\n\t\tresbody, _ := ioutil.ReadAll(res.Body)\n\t\tmsg := fmt.Sprintf(\"op=fetchFile, url=%s, res=%s\", url, resbody)\n\t\treturn errors.New(msg)\n\t}\n\n\t_, err = io.Copy(localfile, res.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn\n}", "func (m *MockSystem) CanFetch(path string) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CanFetch\", path)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (m *MockAgentSecureServer) TaggerFetchEntity(arg0 context.Context, arg1 *core.FetchEntityRequest) (*core.FetchEntityResponse, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"TaggerFetchEntity\", arg0, arg1)\n\tret0, _ := ret[0].(*core.FetchEntityResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockEcrClient) ImageListable(arg0, arg1, arg2, arg3 string) (bool, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImageListable\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func retagLocalImageForRemotePush(localTag string, remoteUrl string) string {\n\tnewTag := fmt.Sprintf(\"%s/%s\", remoteUrl, localTag)\n\tdockerTag(localTag, newTag)\n\treturn newTag\n}", "func (f MockFetch) Fetch(targetServer Server) ServerStatus {\n\tif targetServer.ID == 196 {\n\t\treturn ServerStatus{targetServer.ID, false, \"404\", targetServer.URL, time.Now()}\n\t}\n\treturn ServerStatus{targetServer.ID, true, \"\", targetServer.URL, time.Now()}\n}", "func (m *MockLoggingClient) Fetch(arg0 context.Context, arg1 *logging.QueryRequest, arg2 ...grpc.CallOption) (*logging.QueryResponse, error) {\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Fetch\", varargs...)\n\tret0, _ := ret[0].(*logging.QueryResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockHandler) GetLatestOsImage(arg0 string) (*models.OsImage, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetLatestOsImage\", arg0)\n\tret0, _ := ret[0].(*models.OsImage)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func prepull(ctx context.Context, req types.FunctionDeployment, client *containerd.Client, alwaysPull bool) (containerd.Image, error) {\n\tstart := time.Now()\n\tr, err := reference.ParseNormalizedNamed(req.Image)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timgRef := reference.TagNameOnly(r).String()\n\n\tsnapshotter := \"\"\n\tif val, ok := os.LookupEnv(\"snapshotter\"); ok {\n\t\tsnapshotter = val\n\t}\n\n\timage, err := service.PrepareImage(ctx, client, imgRef, snapshotter, alwaysPull)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to pull image %s\", imgRef)\n\t}\n\n\tsize, _ := image.Size(ctx)\n\tlog.Printf(\"Image for: %s size: %d, took: %fs\\n\", image.Name(), size, time.Since(start).Seconds())\n\n\treturn image, nil\n}", "func (is *ImageStoreLocal) InitRepo(name string) error {\n\tvar lockLatency time.Time\n\n\tis.Lock(&lockLatency)\n\tdefer is.Unlock(&lockLatency)\n\n\treturn is.initRepo(name)\n}", "func picturesMock(w http.ResponseWriter, r *http.Request) {\n\tjson := `{\"copyright\":\"Amir H. Abolfath\",\"date\":\"2019-12-06\",\"explanation\":\"This frame.\",\"hdurl\":\"https://apod.nasa.gov/apod/image/1912/TaurusAbolfath.jpg\",\"media_type\":\"image\",\"service_version\":\"v1\",\"title\":\"Pleiades to Hyades\",\"url\":\"https://apod.nasa.gov/apod/image/1912/TaurusAbolfath1024.jpg\"}`\n\tw.WriteHeader(200)\n\t_, _ = w.Write([]byte(json))\n}", "func (m *MockTChanCluster) Fetch(ctx thrift.Context, req *FetchRequest) (*FetchResult_, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\", ctx, req)\n\tret0, _ := ret[0].(*FetchResult_)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func PrepareBaseImage(ctx context.Context, ref string, output io.Writer) (err error) {\n\n\tcli, err := client.NewEnvClient()\n\tif err != nil {\n\t\treturn\n\t}\n\n\treader, err := cli.ImagePull(ctx, ref, types.ImagePullOptions{})\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer reader.Close()\n\n\tvar log pullLog\n\tstatus := make(map[string]string)\n\tscanner := bufio.NewScanner(reader)\n\tfor scanner.Scan() {\n\t\tjson.Unmarshal(scanner.Bytes(), &log)\n\t\tif log.ID != \"\" {\n\t\t\tcur := status[log.ID]\n\t\t\tif cur != log.Status {\n\t\t\t\tstatus[log.ID] = log.Status\n\t\t\t\tfmt.Fprintln(output, log.Status, log.ID)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Fprintln(output, log.Status)\n\t\t}\n\t}\n\n\treturn\n\n}", "func (f FetchStruct) FetchFromRemote() *model.FetchResult {\n\trepo := f.Repo\n\tremoteURL := f.RemoteURL\n\tremoteBranch := f.RemoteBranch\n\trepoPath := f.RepoPath\n\n\tvar remoteDataObject RemoteDataInterface\n\tremoteDataObject = RemoteDataStruct{\n\t\tRepo: repo,\n\t\tRemoteURL: remoteURL,\n\t}\n\n\tremoteName := remoteDataObject.GetRemoteName()\n\tlogger := global.Logger{}\n\n\ttargetRefPsec := \"refs/heads/\" + remoteBranch + \":refs/remotes/\" + remoteBranch\n\tb := new(bytes.Buffer)\n\tvar fetchErr error\n\tgitSSHAuth, sshErr := ssh.NewSSHAgentAuth(\"git\")\n\tw, _ := repo.Worktree()\n\n\t// Check if repo path is empty and fetch path from worktree\n\tif repoPath == \"\" {\n\t\trepoPath = w.Filesystem.Root()\n\t}\n\n\tif sshErr != nil {\n\t\tlogger.Log(\"Authentication method failed -> \"+sshErr.Error(), global.StatusError)\n\t\tif w == nil {\n\t\t\treturn &model.FetchResult{\n\t\t\t\tStatus: global.FetchFromRemoteError,\n\t\t\t\tFetchedItems: nil,\n\t\t\t}\n\t\t}\n\t\tlogger.Log(\"Retrying fetch with fallback module using git client\", global.StatusWarning)\n\t\treturn f.windowsFetch()\n\t}\n\n\tlogger.Log(fmt.Sprintf(\"Fetching changes from -> %s : %s\", remoteURL, targetRefPsec), global.StatusInfo)\n\n\tif remoteURL != \"\" && remoteBranch != \"\" {\n\t\tif remoteName == \"\" {\n\t\t\treturn &model.FetchResult{\n\t\t\t\tStatus: global.FetchFromRemoteError,\n\t\t\t\tFetchedItems: nil,\n\t\t\t}\n\t\t}\n\n\t\tfetchErr = repo.Fetch(&git.FetchOptions{\n\t\t\tRemoteName: remoteName,\n\t\t\tAuth: gitSSHAuth,\n\t\t\tRefSpecs: []config.RefSpec{config.RefSpec(targetRefPsec)},\n\t\t\tProgress: sideband.Progress(func(f io.Writer) io.Writer {\n\t\t\t\treturn f\n\t\t\t}(b)),\n\t\t})\n\t} else {\n\t\tfetchErr = repo.Fetch(&git.FetchOptions{\n\t\t\tRemoteName: git.DefaultRemoteName,\n\t\t\tAuth: gitSSHAuth,\n\t\t\tProgress: sideband.Progress(func(f io.Writer) io.Writer {\n\t\t\t\treturn f\n\t\t\t}(b)),\n\t\t})\n\t}\n\n\tif fetchErr != nil {\n\t\tif fetchErr.Error() == \"already up-to-date\" {\n\t\t\tlogger.Log(fetchErr.Error(), global.StatusWarning)\n\t\t\treturn &model.FetchResult{\n\t\t\t\tStatus: global.FetchNoNewChanges,\n\t\t\t\tFetchedItems: nil,\n\t\t\t}\n\t\t} else {\n\t\t\tlogger.Log(fetchErr.Error(), global.StatusError)\n\t\t\tlogger.Log(\"Fetch failed. Retrying fetch with git client\", global.StatusWarning)\n\t\t\treturn f.windowsFetch()\n\t\t}\n\n\t} else {\n\t\tlogger.Log(b.String(), global.StatusInfo)\n\t\tlogger.Log(\"Changes fetched from remote\", global.StatusInfo)\n\n\t\tmsg := fmt.Sprintf(\"Changes fetched from remote %v\", remoteName)\n\t\treturn &model.FetchResult{\n\t\t\tStatus: global.FetchFromRemoteSuccess,\n\t\t\tFetchedItems: []*string{&msg},\n\t\t}\n\t}\n\n}", "func getRemoteImage(url string) ([]byte, error) {\n\tvar image []byte\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn image, err\n\t}\n\tdefer resp.Body.Close()\n\n\timage, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn image, err\n\t}\n\n\treturn image, nil\n}", "func (v *Virt) ImageLocalDigests(ctx context.Context, image string) (digests []string, err error) {\n\treturn\n}", "func LookupMyImage(connConfig string, myImageId string) (SpiderMyImageInfo, error) {\n\n\tif connConfig == \"\" {\n\t\terr := fmt.Errorf(\"LookupMyImage() called with empty connConfig.\")\n\t\tcommon.CBLog.Error(err)\n\t\treturn SpiderMyImageInfo{}, err\n\t} else if myImageId == \"\" {\n\t\terr := fmt.Errorf(\"LookupMyImage() called with empty myImageId.\")\n\t\tcommon.CBLog.Error(err)\n\t\treturn SpiderMyImageInfo{}, err\n\t}\n\n\turl := common.SpiderRestUrl + \"/myimage/\" + url.QueryEscape(myImageId)\n\n\t// Create Req body\n\ttempReq := common.SpiderConnectionName{}\n\ttempReq.ConnectionName = connConfig\n\n\tclient := resty.New().SetCloseConnection(true)\n\tclient.SetAllowGetMethodPayload(true)\n\n\tresp, err := client.R().\n\t\tSetHeader(\"Content-Type\", \"application/json\").\n\t\tSetBody(tempReq).\n\t\tSetResult(&SpiderMyImageInfo{}). // or SetResult(AuthSuccess{}).\n\t\t//SetError(&AuthError{}). // or SetError(AuthError{}).\n\t\tGet(url)\n\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\terr := fmt.Errorf(\"an error occurred while requesting to CB-Spider\")\n\t\treturn SpiderMyImageInfo{}, err\n\t}\n\n\tfmt.Println(string(resp.Body()))\n\n\tfmt.Println(\"HTTP Status code: \" + strconv.Itoa(resp.StatusCode()))\n\tswitch {\n\tcase resp.StatusCode() >= 400 || resp.StatusCode() < 200:\n\t\terr := fmt.Errorf(string(resp.Body()))\n\t\tcommon.CBLog.Error(err)\n\t\treturn SpiderMyImageInfo{}, err\n\t}\n\n\ttemp := resp.Result().(*SpiderMyImageInfo)\n\treturn *temp, nil\n\n}", "func GetImage(ctx context.Context, sharedDownload map[string]*DownloadState, params *Params) (io.Reader, error) {\n\tlogger := logging.FromContext(ctx)\n\ttimeout := params.Timeout\n\tURL := params.URL\n\tvar imageReader io.Reader\n\n\tif dnState, ok := sharedDownload[URL]; ok {\n\t\tlogger.WithField(\"url\", URL).Trace(\"is fetching by another client\")\n\t\terrCh := make(chan error, 1)\n\t\tdnState.Subs = append(dnState.Subs, errCh)\n\t\tif err := <-errCh; err != nil {\n\t\t\tlogger.WithError(err).WithField(\"url\", URL).Trace(\"fetch failed\")\n\t\t\tdelete(sharedDownload, URL)\n\t\t\treturn nil, err\n\t\t}\n\t\timageReader = bytes.NewReader(dnState.Data)\n\t\tlogger.WithField(\"url\", URL).Trace(\"fetched shared\")\n\t} else {\n\t\tsubscribers := make([]chan error, 0, 1)\n\t\tdownloadState := &DownloadState{\n\t\t\tData: nil,\n\t\t\tSubs: subscribers,\n\t\t}\n\t\tsharedDownload[URL] = downloadState\n\t\tdefer func(sd map[string]*DownloadState, url string) {\n\t\t\tdelete(sd, url)\n\t\t}(sharedDownload, URL)\n\t\thttpClient := httpclient.NewHTTPClient(timeout)\n\t\tresponse, err := httpClient.Get(ctx, URL)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).WithField(\"url\", URL).Error(\"fetch image failed\")\n\t\t\tfor _, subs := range downloadState.Subs {\n\t\t\t\tsubs <- err\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdownloadState.Data = response.RawBody\n\t\tfor _, subs := range downloadState.Subs {\n\t\t\tsubs <- nil\n\t\t}\n\t\timageReader = bytes.NewReader(response.RawBody)\n\t}\n\n\treturn imageReader, nil\n}", "func ttrPushImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\trc, err := client.ImagePush(ctx, ttrImageName(image), types.ImagePushOptions{RegistryAuth: \"{}\"})\n\tassert.Assert(t, err)\n\tif rc != nil {\n\t\tbody, err := readAllAndClose(rc)\n\t\tassert.Assert(t, err)\n\t\tassert.Assert(t, strings.Contains(body, `\"status\":\"latest: digest: `))\n\t}\n}", "func (u Util) PrepareFetch(l *log.Logger, f types.File) *FetchOp {\n\tvar err error\n\tvar expectedSum []byte\n\n\t// explicitly ignoring the error here because the config should already be\n\t// validated by this point\n\turi, _ := url.Parse(f.Contents.Source)\n\n\thasher, err := GetHasher(f.Contents.Verification)\n\tif err != nil {\n\t\tl.Crit(\"Error verifying file %q: %v\", f.Path, err)\n\t\treturn nil\n\t}\n\n\tif hasher != nil {\n\t\t// explicitly ignoring the error here because the config should already\n\t\t// be validated by this point\n\t\t_, expectedSumString, _ := f.Contents.Verification.HashParts()\n\t\texpectedSum, err = hex.DecodeString(expectedSumString)\n\t\tif err != nil {\n\t\t\tl.Crit(\"Error parsing verification string %q: %v\", expectedSumString, err)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif f.User.Name != \"\" {\n\t\tuser, err := u.userLookup(f.User.Name)\n\t\tif err != nil {\n\t\t\tl.Crit(\"No such user %q: %v\", f.User.Name, err)\n\t\t\treturn nil\n\t\t}\n\t\tuid, err := strconv.ParseInt(user.Uid, 0, 0)\n\t\tif err != nil {\n\t\t\tl.Crit(\"Couldn't parse uid %q: %v\", user.Uid, err)\n\t\t\treturn nil\n\t\t}\n\t\ttmp := int(uid)\n\t\tf.User.ID = &tmp\n\t}\n\tif f.Group.Name != \"\" {\n\t\tg, err := u.groupLookup(f.Group.Name)\n\t\tif err != nil {\n\t\t\tl.Crit(\"No such group %q: %v\", f.Group.Name, err)\n\t\t\treturn nil\n\t\t}\n\t\tgid, err := strconv.ParseInt(g.Gid, 0, 0)\n\t\tif err != nil {\n\t\t\tl.Crit(\"Couldn't parse gid %q: %v\", g.Gid, err)\n\t\t\treturn nil\n\t\t}\n\t\ttmp := int(gid)\n\t\tf.Group.ID = &tmp\n\t}\n\n\treturn &FetchOp{\n\t\tPath: f.Path,\n\t\tHash: hasher,\n\t\tMode: os.FileMode(f.Mode),\n\t\tUid: *f.User.ID,\n\t\tGid: *f.Group.ID,\n\t\tUrl: *uri,\n\t\tFetchOptions: resource.FetchOptions{\n\t\t\tHash: hasher,\n\t\t\tCompression: f.Contents.Compression,\n\t\t\tExpectedSum: expectedSum,\n\t\t},\n\t}\n}", "func fetch(ctx context.Context, path string, repo *gogit.Repository, branch string, access repoAccess, impl string) error {\n\trefspec := fmt.Sprintf(\"refs/heads/%s:refs/heads/%s\", branch, branch)\n\tswitch impl {\n\tcase sourcev1.LibGit2Implementation:\n\t\tlg2repo, err := libgit2.OpenRepository(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn fetchLibgit2(lg2repo, refspec, access)\n\tcase sourcev1.GoGitImplementation:\n\t\treturn fetchGoGit(ctx, repo, refspec, access)\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown git implementation %q\", impl)\n\t}\n}", "func (suite *APIImageSaveLoadSuite) TestImageSaveLoadOk(c *check.C) {\n\tbefore, err := request.Get(\"/images/\" + busyboxImage125 + \"/json\")\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, before, 200)\n\tgotBefore := types.ImageInfo{}\n\terr = request.DecodeBody(&gotBefore, before.Body)\n\tc.Assert(err, check.IsNil)\n\n\tq := url.Values{}\n\tq.Set(\"name\", busyboxImage125)\n\tquery := request.WithQuery(q)\n\tresp, err := request.Get(\"/images/save\", query)\n\tc.Assert(err, check.IsNil)\n\tdefer resp.Body.Close()\n\n\tdir, err := ioutil.TempDir(\"\", \"TestImageSaveLoadOk\")\n\tif err != nil {\n\t\tc.Errorf(\"failed to create a new temporary directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\ttmpFile := filepath.Join(dir, \"busyboxImage.tar\")\n\tf, err := os.Create(tmpFile)\n\tif err != nil {\n\t\tc.Errorf(\"failed to create file: %v\", err)\n\t}\n\n\tif _, err := io.Copy(f, resp.Body); err != nil {\n\t\tc.Errorf(\"failed to save data to file: %v\", err)\n\t}\n\n\tdata, err := os.Open(tmpFile)\n\tif err != nil {\n\t\tc.Errorf(\"failed to load file's data: %v\", err)\n\t}\n\n\tloadImageName := \"load-busyboxImage\"\n\tq = url.Values{}\n\tq.Set(\"name\", loadImageName)\n\n\tquery = request.WithQuery(q)\n\treader := request.WithRawData(data)\n\theader := request.WithHeader(\"Content-Type\", \"application/x-tar\")\n\n\tresp, err = request.Post(\"/images/load\", query, reader, header)\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, resp, 200)\n\n\tafter, err := request.Get(\"/images/\" + loadImageName + \":\" + environment.Busybox125Tag + \"/json\")\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, after, 200)\n\tdefer request.Delete(\"/images/\" + loadImageName + \":\" + environment.Busybox125Tag)\n\n\tgotAfter := types.ImageInfo{}\n\terr = request.DecodeBody(&gotAfter, after.Body)\n\tc.Assert(err, check.IsNil)\n\n\tc.Assert(gotBefore.ID, check.Equals, gotAfter.ID)\n\tc.Assert(gotBefore.CreatedAt, check.Equals, gotAfter.CreatedAt)\n\tc.Assert(gotBefore.Size, check.Equals, gotAfter.Size)\n}", "func (m *MockLocalConfigProvider) GetURL(name string) (*LocalURL, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetURL\", name)\n\tret0, _ := ret[0].(*LocalURL)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockStorage) Get(ctx context.Context, id string) (*storage.ImageModel, error) {\n\tret := m.ctrl.Call(m, \"Get\", ctx, id)\n\tret0, _ := ret[0].(*storage.ImageModel)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s *FileStore) PullImage(ctx context.Context, name string, translator Translator) (string, error) {\n\tname, specDigest := SplitImageName(name)\n\tep := translator(ctx, name)\n\tglog.V(1).Infof(\"Image translation: %q -> %q\", name, ep.URL)\n\tif err := os.MkdirAll(s.dataDir(), 0777); err != nil {\n\t\treturn \"\", fmt.Errorf(\"mkdir %q: %v\", s.dataDir(), err)\n\t}\n\ttempFile, err := ioutil.TempFile(s.dataDir(), \"part_\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create a temporary file: %v\", err)\n\t}\n\tdefer func() {\n\t\tif tempFile != nil {\n\t\t\ttempFile.Close()\n\t\t}\n\t}()\n\tif err := s.downloader.DownloadFile(ctx, ep, tempFile); err != nil {\n\t\ttempFile.Close()\n\t\tif err := os.Remove(tempFile.Name()); err != nil {\n\t\t\tglog.Warningf(\"Error removing %q: %v\", tempFile.Name(), err)\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"error downloading %q: %v\", ep.URL, err)\n\t}\n\n\tif _, err := tempFile.Seek(0, os.SEEK_SET); err != nil {\n\t\treturn \"\", fmt.Errorf(\"can't get the digest for %q: Seek(): %v\", tempFile.Name(), err)\n\t}\n\n\td, err := digest.FromReader(tempFile)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif err := tempFile.Close(); err != nil {\n\t\treturn \"\", fmt.Errorf(\"closing %q: %v\", tempFile.Name(), err)\n\t}\n\tfileName := tempFile.Name()\n\ttempFile = nil\n\tif specDigest != \"\" && d != specDigest {\n\t\treturn \"\", fmt.Errorf(\"image digest mismatch: %s instead of %s\", d, specDigest)\n\t}\n\tif err := s.placeImage(fileName, d.Hex(), name); err != nil {\n\t\treturn \"\", err\n\t}\n\tnamed, err := reference.WithName(name)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\twithDigest, err := reference.WithDigest(named, d)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn withDigest.String(), nil\n}", "func NewLocalFetcher() *LocalFetcher {\n\treturn &LocalFetcher{\n\t\tdata: make(map[string]*asset.Asset),\n\t}\n}", "func Fetch(dir, target string, pool int, dynamic bool) error {\n\tlocal, err := GetOrigin(dir)\n\tif err != nil {\n\t\tlog.Error(\"bad local origin:\", err)\n\t\treturn err\n\t}\n\tlocalHashes, localSource, err := local.Scan()\n\tif target != \"\" {\n\t\tlocalSource = target\n\t}\n\tglobal, err := GetOrigin(localSource)\n\tif err != nil {\n\t\tlog.Error(\"bad global origin:\", err)\n\t\treturn err\n\t}\n\tglobalHashes, globalSource, err := global.Scan()\n\tif globalSource != localSource {\n\t\tlog.Warning(\"unverified origin:\", globalSource)\n\t} else {\n\t\tlog.Notice(\"verified origin:\", globalSource)\n\t}\n\n\tmissingHashes := Compare(localHashes, globalHashes)\n\tif pool < 2 {\n\t\terr = FetchSpecific(dir, global, missingHashes)\n\t} else {\n\t\tif dynamic {\n\t\t\tpool *= runtime.NumCPU()\n\t\t}\n\t\terr = FetchSpecificAsync(dir, global, missingHashes, pool)\n\t}\n\tif err != nil {\n\t\tlog.Error(\"failed to fetch files:\", err)\n\t\treturn err\n\t}\n\tlog.Notice(\"fetched\", len(missingHashes), \"files from origin\")\n\n\treturn Write(dir, globalSource, globalHashes)\n}", "func PullImage(image, cacheDir string) (v1.Image, error) {\n var options []crane.Option\n\n // options = append(options, crane.Insecure)\n\n // Use current built OS and architecture\n options = append(options, crane.WithPlatform(&v1.Platform{\n OS: runtime.GOOS,\n Architecture: runtime.GOARCH,\n }))\n\n // Grab the remote manifest\n manifest, err := crane.Manifest(image, options...)\n if err != nil {\n return nil, fmt.Errorf(\"failed fetching manifest for %s: %v\", image, err)\n }\n\n if !gjson.Valid(string(manifest)) {\n return nil, fmt.Errorf(\"Cannot parse manifest: %s\", string(manifest))\n }\n\n value := gjson.Get(string(manifest), \"config.digest\").Value().(string)\n if value == \"\" {\n return nil, fmt.Errorf(\"Malformed manifest: %s\", string(manifest))\n }\n \n digest := strings.Split(value, \":\")[1]\n tarball := fmt.Sprintf(\"%s/%s.tar.gz\", cacheDir, digest)\n\n // Download the tarball of the image if not available in the cache\n if _, err := os.Stat(tarball); os.IsNotExist(err) {\n // Create the cacheDir if it does not already exist\n if cacheDir != \"\" {\n if _, err := os.Stat(cacheDir); os.IsNotExist(err) {\n os.MkdirAll(cacheDir, os.ModePerm)\n }\n }\n \n // Pull the image\n img, err := crane.Pull(image, options...)\n if err != nil {\n return nil, fmt.Errorf(\"Could not pull image: %s\", err)\n }\n \n f, err := os.Create(tarball)\n if err != nil {\n return nil, fmt.Errorf(\"Failed to open %s: %v\", tarball, err)\n }\n \n defer f.Close()\n \n err = crane.Save(img, image, tarball)\n if err != nil {\n return nil, fmt.Errorf(\"Could not save image: %s\", err)\n }\n }\n\n img, err := crane.Load(tarball)\n if err != nil {\n return nil, fmt.Errorf(\"Could not load image: %s\", err)\n }\n\n return img, nil\n}", "func (i ImageFetcher) Fetch(path string) (image.Image, error) {\n\tresp, err := http.Get(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\treturn png.Decode(resp.Body)\n}", "func (m *MockTChanNode) Fetch(ctx thrift.Context, req *FetchRequest) (*FetchResult_, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\", ctx, req)\n\tret0, _ := ret[0].(*FetchResult_)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func pull(ctx context.Context, imgCache *cache.Handle, directTo, pullFrom string, noHTTPS bool) (imagePath string, err error) {\n\tshubURI, err := ParseReference(pullFrom)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse shub uri: %s\", err)\n\t}\n\n\t// Get the image manifest\n\tmanifest, err := GetManifest(shubURI, noHTTPS)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to get manifest for: %s: %s\", pullFrom, err)\n\t}\n\n\tif directTo != \"\" {\n\t\tsylog.Infof(\"Downloading shub image\")\n\t\tif err := DownloadImage(ctx, manifest, directTo, pullFrom, true, noHTTPS); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\timagePath = directTo\n\t} else {\n\t\tcacheEntry, err := imgCache.GetEntry(cache.ShubCacheType, manifest.Commit)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to check if %v exists in cache: %v\", manifest.Commit, err)\n\t\t}\n\t\tdefer cacheEntry.CleanTmp()\n\t\tif !cacheEntry.Exists {\n\t\t\tsylog.Infof(\"Downloading shub image\")\n\n\t\t\terr := DownloadImage(ctx, manifest, cacheEntry.TmpPath, pullFrom, true, noHTTPS)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\terr = cacheEntry.Finalize()\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\timagePath = cacheEntry.Path\n\t\t} else {\n\t\t\tsylog.Infof(\"Use cached image\")\n\t\t\timagePath = cacheEntry.Path\n\t\t}\n\n\t}\n\n\treturn imagePath, nil\n}", "func TestFetcher_Disk(t *testing.T) {\n\tfetcher := (&diskFetcher{\"../fixtures\"}).fetch\n\n\tfakeSites := []struct {\n\t\turl string\n\t\texpected string\n\t}{\n\t\t{\"https://www.404s.com/nope.txt\", \"\"},\n\t\t{\"https://www.404s.com/hello.txt\", \"Hello, world!\"},\n\t}\n\tfor _, site := range fakeSites {\n\t\tt.Run(site.url, func(t *testing.T) {\n\t\t\tactual := fetchAsString(t, fetcher, site.url)\n\t\t\trequire.Equal(t, site.expected, actual)\n\t\t})\n\t}\n}", "func (_m *MockECRAPI) BatchGetImageRequest(_param0 *ecr.BatchGetImageInput) (*request.Request, *ecr.BatchGetImageOutput) {\n\tret := _m.ctrl.Call(_m, \"BatchGetImageRequest\", _param0)\n\tret0, _ := ret[0].(*request.Request)\n\tret1, _ := ret[1].(*ecr.BatchGetImageOutput)\n\treturn ret0, ret1\n}", "func (m *MockPexeler) GetRandomImage(arg0 string) (int, []byte, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetRandomImage\", arg0)\n\tret0, _ := ret[0].(int)\n\tret1, _ := ret[1].([]byte)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func _getMock(url string) (content []byte, err error) {\n\tvar idnum = crc32.ChecksumIEEE([]byte(url))%uint32(5) + 1\n\tvar response = fmt.Sprintf(mockResponseTemplate, idnum, idnum, \"no message\", 200)\n\treturn []byte(response), nil\n}", "func TestGetImages(t *testing.T) {\n\t// Test case 1: HTML with no image tags\n\thtml1 := `<html><body><h1>Hello, World!</h1></body></html>`\n\texpected1 := make(map[string]string)\n\tresult1, err1 := core.GetImages(html1)\n\tif err1 != nil {\n\t\tt.Errorf(\"Unexpected error: %v\", err1)\n\t}\n\tif len(result1) != len(expected1) {\n\t\tt.Errorf(\"Expected %d images, but got %d\", len(expected1), len(result1))\n\t}\n\n\t// Test case 2: HTML with one image tag\n\thtml2 := `<html><body><img src=\"image1.jpg\"></body></html>`\n\texpected2 := map[string]string{\"image1.jpg\": \"<img src=\\\"image1.jpg\\\">\"}\n\tresult2, err2 := core.GetImages(html2)\n\tif err2 != nil {\n\t\tt.Errorf(\"Unexpected error: %v\", err2)\n\t}\n\tif len(result2) != len(expected2) {\n\t\tt.Errorf(\"Expected %d images, but got %d\", len(expected2), len(result2))\n\t}\n\tfor key, value := range expected2 {\n\t\tif result2[key] != value {\n\t\t\tt.Errorf(\"Expected image URL %s with tag %s, but got %s\", key, value, result2[key])\n\t\t}\n\t}\n\n\t// Test case 3: HTML with multiple image tags\n\thtml3 := `<html><body><img src=\"image1.jpg\"><img src=\"image2.jpg\"></body></html>`\n\texpected3 := map[string]string{\n\t\t\"image1.jpg\": \"<img src=\\\"image1.jpg\\\">\",\n\t\t\"image2.jpg\": \"<img src=\\\"image2.jpg\\\">\",\n\t}\n\tresult3, err3 := core.GetImages(html3)\n\tif err3 != nil {\n\t\tt.Errorf(\"Unexpected error: %v\", err3)\n\t}\n\tif len(result3) != len(expected3) {\n\t\tt.Errorf(\"Expected %d images, but got %d\", len(expected3), len(result3))\n\t}\n\tfor key, value := range expected3 {\n\t\tif result3[key] != value {\n\t\t\tt.Errorf(\"Expected image URL %s with tag %s, but got %s\", key, value, result3[key])\n\t\t}\n\t}\n\t// Test case 4: HTML with multiple image tags with duplicayr\n\thtml4 := `<html><body><img src=\"image1.jpg\"><img src=\"image2.jpg\"><img src=\"image2.jpg\"></body></html>`\n\texpected4 := map[string]string{\n\t\t\"image1.jpg\": \"<img src=\\\"image1.jpg\\\">\",\n\t\t\"image2.jpg\": \"<img src=\\\"image2.jpg\\\">\",\n\t}\n\tresult4, err4 := core.GetImages(html4)\n\tif err4 != nil {\n\t\tt.Errorf(\"Unexpected error: %v\", err4)\n\t}\n\tif len(result4) != len(expected4) {\n\t\tt.Errorf(\"Expected %d images, but got %d\", len(expected4), len(result4))\n\t}\n\tfor key, value := range expected4 {\n\t\tif result4[key] != value {\n\t\t\tt.Errorf(\"Expected image URL %s with tag %s, but got %s\", key, value, result4[key])\n\t\t}\n\t}\n}", "func (m *MockRepository) FetchAll(bucketName string, modelFn func([]byte) (db.Model, error)) (interface{}, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FetchAll\", bucketName, modelFn)\n\tret0, _ := ret[0].(interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockUnzipper) FetchAndExtract(arg0 context.Context, arg1 string) (map[string][]byte, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FetchAndExtract\", arg0, arg1)\n\tret0, _ := ret[0].(map[string][]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}" ]
[ "0.72084695", "0.6717672", "0.6187629", "0.60904604", "0.5996021", "0.58322525", "0.5825194", "0.5771793", "0.5758451", "0.57340926", "0.5726534", "0.5694318", "0.56871355", "0.5607126", "0.5600493", "0.55343837", "0.548785", "0.53960055", "0.537409", "0.53434074", "0.5329744", "0.52986526", "0.5295109", "0.5293138", "0.52815896", "0.52813476", "0.5259406", "0.5255973", "0.52421516", "0.5228599", "0.5224441", "0.52157396", "0.5198362", "0.517085", "0.51669323", "0.51479065", "0.51467085", "0.51284117", "0.51221454", "0.51214004", "0.50836855", "0.5074557", "0.5062827", "0.50473523", "0.50469536", "0.50334287", "0.50268346", "0.5014151", "0.49971282", "0.49901295", "0.4988306", "0.497935", "0.49741", "0.49739656", "0.49600983", "0.49597535", "0.4955469", "0.49510545", "0.49505162", "0.4942324", "0.4937981", "0.49214906", "0.49165148", "0.49111128", "0.4902524", "0.49006534", "0.48984995", "0.48971304", "0.48781705", "0.48768905", "0.48699847", "0.48598334", "0.48594522", "0.48341104", "0.4833799", "0.48335838", "0.48301634", "0.48243967", "0.48173138", "0.4816802", "0.4808415", "0.48032337", "0.48009023", "0.47943538", "0.47927514", "0.47924235", "0.47920927", "0.47912517", "0.47901776", "0.47867823", "0.4778739", "0.4778729", "0.47772896", "0.47749105", "0.47655374", "0.4764281", "0.4761982", "0.4756816", "0.47547153", "0.4753348" ]
0.78663063
0
FetchLocalImage indicates an expected call of FetchLocalImage
func (mr *MockFetcherMockRecorder) FetchLocalImage(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchLocalImage", reflect.TypeOf((*MockFetcher)(nil).FetchLocalImage), arg0) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockFetcher) FetchLocalImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchLocalImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockFetcherMockRecorder) FetchUpdatedLocalImage(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchUpdatedLocalImage\", reflect.TypeOf((*MockFetcher)(nil).FetchUpdatedLocalImage), arg0, arg1, arg2)\n}", "func fetchLocal(f *File) error {\n\n\terr := validateLocal(f)\n\tif err != nil {\n\t\tf.Status.Type = status.ERROR\n\t\treturn err\n\t}\n\tf.path = f.Url\n\tf.Status.Type = status.FETCHED\n\treturn nil\n\n}", "func (m *MockFetcher) FetchUpdatedLocalImage(arg0 context.Context, arg1 string, arg2 io.Writer) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchUpdatedLocalImage\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestFetch(t *testing.T) {\n\timage := \"rkt-inspect-fetch.aci\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// Fetch the image for the first time, this should write the image to the\n\t// on-disk store.\n\toldHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file\"}, t, ctx)\n\n\t// Fetch the image with the same name but different content, the expecting\n\t// result is that we should get a different hash since we are not fetching\n\t// from the on-disk store.\n\tnewHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file --write-file\"}, t, ctx)\n\n\tif oldHash == newHash {\n\t\tt.Fatalf(\"ACI hash should be different as the image has changed\")\n\t}\n}", "func TestImplicitFetch(t *testing.T) {\n\tfoundMsg := \"found image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// 1. Fetch the image.\n\t// TODO(yifan): Add other ACI with different schemes.\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:ubuntu-12.04\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 2. Try run/prepare with/without tag ':latest', should not get $foundMsg.\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err == nil {\n\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func (imp *Importer) fetchLocalImages() {\n items, err := ioutil.ReadDir(STORE_DIR)\n if err != nil {\n imp.sendErr(err)\n return\n }\n\n for _, info := range items {\n if info.IsDir() { continue }\n filename := info.Name()\n\n file, err := os.Open(fmt.Sprintf(\"%s/%s\", STORE_DIR, filename))\n if err != nil {\n imp.sendErr(err)\n return\n }\n\n img, err := jpeg.Decode(file)\n if err != nil {\n log.Printf(\"Error decoding image file %s to jpeg\\n\", filename)\n continue\n }\n\n ext := filepath.Ext(filename)\n id := filename[:len(filename)-len(ext)]\n\n imp.send(&imagedata.ImageData{Id: id, Data: &img})\n }\n}", "func (f *fetcher) fetchImageFrom(appName string, aciURL, ascURL, scheme string, ascFile *os.File, latest bool) (string, error) {\n\tvar rem *store.Remote\n\n\tif f.insecureSkipVerify {\n\t\tif f.ks != nil {\n\t\t\tstderr(\"rkt: warning: TLS verification and signature verification has been disabled\")\n\t\t}\n\t} else if scheme == \"docker\" {\n\t\treturn \"\", fmt.Errorf(\"signature verification for docker images is not supported (try --insecure-skip-verify)\")\n\t}\n\n\tif (f.local && scheme != \"file\") || (scheme != \"file\" && !latest) {\n\t\tvar err error\n\t\tok := false\n\t\trem, ok, err = f.s.GetRemote(aciURL)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif ok {\n\t\t\tif f.local {\n\t\t\t\tstderr(\"rkt: using image in local store for app %s\", appName)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t\tif useCached(rem.DownloadTime, rem.CacheMaxAge) {\n\t\t\t\tstderr(\"rkt: found image in local store, skipping fetching from %s\", aciURL)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t}\n\t\tif f.local {\n\t\t\treturn \"\", fmt.Errorf(\"url %s not available in local store\", aciURL)\n\t\t}\n\t}\n\n\tif scheme != \"file\" && f.debug {\n\t\tstderr(\"rkt: fetching image from %s\", aciURL)\n\t}\n\n\tvar etag string\n\tif rem != nil {\n\t\tetag = rem.ETag\n\t}\n\tentity, aciFile, cd, err := f.fetch(appName, aciURL, ascURL, ascFile, etag)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif cd != nil && cd.useCached {\n\t\tif rem != nil {\n\t\t\treturn rem.BlobKey, nil\n\t\t} else {\n\t\t\t// should never happen\n\t\t\tpanic(\"asked to use cached image but remote is nil\")\n\t\t}\n\t}\n\tif scheme != \"file\" {\n\t\tdefer os.Remove(aciFile.Name())\n\t}\n\n\tif entity != nil && !f.insecureSkipVerify {\n\t\tstderr(\"rkt: signature verified:\")\n\t\tfor _, v := range entity.Identities {\n\t\t\tstderr(\" %s\", v.Name)\n\t\t}\n\t}\n\tkey, err := f.s.WriteACI(aciFile, latest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif scheme != \"file\" {\n\t\trem := store.NewRemote(aciURL, ascURL)\n\t\trem.BlobKey = key\n\t\trem.DownloadTime = time.Now()\n\t\tif cd != nil {\n\t\t\trem.ETag = cd.etag\n\t\t\trem.CacheMaxAge = cd.maxAge\n\t\t}\n\t\terr = f.s.WriteRemote(rem)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn key, nil\n}", "func (o *ImageImportManifest) GetLocalImageIdOk() (*string, bool) {\n\tif o == nil || o.LocalImageId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.LocalImageId, true\n}", "func TestIsImageInLocalRegistry(t *testing.T) {\n\ttype testDef struct {\n\t\timageName string\n\t\tdocker test.FakeDockerClient\n\t\texpectedResult bool\n\t\texpectedError string\n\t}\n\ttests := map[string]testDef{\n\t\t\"ImageFound\": {\"a_test_image\", test.FakeDockerClient{}, true, \"\"},\n\t\t\"ImageNotFound\": {\"a_test_image:sometag\", test.FakeDockerClient{}, false, \"unable to get metadata for a_test_image:sometag\"},\n\t}\n\n\tfor test, def := range tests {\n\t\tdh := getDocker(&def.docker)\n\t\tfake := dh.kubeDockerClient.(*dockertools.FakeDockerClient)\n\t\tif def.expectedResult {\n\t\t\tfake.Image = &dockertypes.ImageInspect{ID: def.imageName}\n\t\t}\n\n\t\tresult, err := dh.IsImageInLocalRegistry(def.imageName)\n\n\t\tif e := fake.AssertCalls([]string{\"inspect_image\"}); e != nil {\n\t\t\tt.Errorf(\"%+v\", e)\n\t\t}\n\n\t\tif result != def.expectedResult {\n\t\t\tt.Errorf(\"Test - %s: Expected result: %v. Got: %v\", test, def.expectedResult, result)\n\t\t}\n\t\tif err != nil && len(def.expectedError) > 0 && !strings.Contains(err.Error(), def.expectedError) {\n\t\t\tt.Errorf(\"Test - %s: Expected error: Got: %+v\", test, err)\n\t\t}\n\t}\n}", "func (f OriginalFetcher) Fetch(namespace string, sourceURL string, imageHash string) (info *info.ImageProperties, downloaded bool, err error) {\n\tif sourceURL == \"\" && imageHash == \"\" {\n\t\treturn nil, false, fmt.Errorf(\"Missing Hash & URL\")\n\t}\n\n\tif imageHash != \"\" {\n\t\tinfo, err = f.fetchFromStore(namespace, imageHash)\n\t}\n\n\tif sourceURL != \"\" && (err != nil || imageHash == \"\") {\n\t\tinfo, downloaded, err = f.fetchFromSource(namespace, sourceURL)\n\t}\n\n\treturn info, downloaded, err\n}", "func (f *fetcher) fetchImage(img string, asc string, discover bool) (string, error) {\n\tif f.withDeps && !discover {\n\t\treturn \"\", fmt.Errorf(\"cannot fetch image's dependencies with discovery disabled\")\n\t}\n\thash, err := f.fetchSingleImage(img, asc, discover)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif f.withDeps {\n\t\terr = f.fetchImageDeps(hash)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn hash, nil\n}", "func LoadLocalImage(app *AppData) error {\n\tapp.LocalImage = DockerImage{\n\t\tExists: false,\n\t}\n\tctx := context.Background()\n\tcli, err := client.NewClientWithOpts(client.FromEnv)\n\tcli.NegotiateAPIVersion(ctx)\n\tdefer cli.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tinspect, _, err := cli.ImageInspectWithRaw(ctx, app.From)\n\tif err != nil {\n\t\tif err.Error() == \"Error: No such image: \"+app.From {\n\t\t\tfmt.Printf(\"Repo not exists in local docker\\n\")\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tfmt.Printf(\"Repo exists in local docker\\n\")\n\tapp.LocalImage.Exists = true\n\tif len(inspect.RepoDigests) > 0 {\n\t\tapp.LocalImage.DockerDigest = inspect.RepoDigests[0]\n\t} else {\n\t\tapp.LocalImage.DockerDigest = inspect.ID\n\t}\n\n\t//Setting Docker Config values\n\tconfigData, err := json.Marshal(inspect.Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = pvjson.Unmarshal(configData, &app.LocalImage.DockerConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *MockFetcher) FetchRemoteImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchRemoteImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (f *SourceFetcher) Fetch(url string, namespace string) (*info.ImageProperties, bool, error) {\n\tc := make(chan FetchResult)\n\tgo f.uniqueFetchSource(c, url, namespace)\n\tr := <-c\n\treturn r.ImageDetails, r.Downloaded, r.Error\n}", "func TestCannotExecuteStatusImage(t *testing.T) {\n\tbuf := setLogBuffer()\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tt.Log(buf.String())\n\t\t}\n\t}()\n\n\tif StatusImage == \"\" {\n\t\tt.Skip(\"no status image defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tapp := &models.App{Name: id.New().String()}\n\tapp = ensureApp(t, app)\n\n\tfn := &models.Fn{\n\t\tAppID: app.ID,\n\t\tName: id.New().String(),\n\t\tImage: StatusImage,\n\t\tResourceConfig: models.ResourceConfig{\n\t\t\tMemory: memory,\n\t\t},\n\t}\n\tfn = ensureFn(t, fn)\n\n\tlb, err := LB()\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: lb,\n\t}\n\tu.Path = path.Join(u.Path, \"invoke\", fn.ID)\n\n\tcontent := bytes.NewBuffer([]byte(`status`))\n\toutput := &bytes.Buffer{}\n\n\tresp, err := callFN(ctx, u.String(), content, output, models.TypeSync)\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\n\tif resp.StatusCode != http.StatusBadRequest {\n\t\tt.Fatalf(\"StatusCode check failed on %v\", resp.StatusCode)\n\t}\n}", "func (f *fetcher) fetchSingleImage(img string, asc string, discover bool) (string, error) {\n\tvar (\n\t\tascFile *os.File\n\t\terr error\n\t\tlatest bool\n\t)\n\tif asc != \"\" && f.ks != nil {\n\t\tascFile, err = os.Open(asc)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to open signature file: %v\", err)\n\t\t}\n\t\tdefer ascFile.Close()\n\t}\n\n\tu, err := url.Parse(img)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"not a valid image reference (%s)\", img)\n\t}\n\n\t// if img refers to a local file, ensure the scheme is file:// and make the url path absolute\n\t_, err = os.Stat(u.Path)\n\tif err == nil {\n\t\tu.Path, err = filepath.Abs(u.Path)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to get abs path: %v\", err)\n\t\t}\n\t\tu.Scheme = \"file\"\n\t} else if !os.IsNotExist(err) {\n\t\treturn \"\", fmt.Errorf(\"unable to access %q: %v\", img, err)\n\t}\n\n\tif discover && u.Scheme == \"\" {\n\t\tif app := newDiscoveryApp(img); app != nil {\n\t\t\tvar discoveryError error\n\t\t\tif !f.local {\n\t\t\t\tstderr(\"rkt: searching for app image %s\", img)\n\t\t\t\tep, err := discoverApp(app, true)\n\t\t\t\tif err != nil {\n\t\t\t\t\tdiscoveryError = err\n\t\t\t\t} else {\n\t\t\t\t\t// No specified version label, mark it as latest\n\t\t\t\t\tif _, ok := app.Labels[\"version\"]; !ok {\n\t\t\t\t\t\tlatest = true\n\t\t\t\t\t}\n\t\t\t\t\treturn f.fetchImageFromEndpoints(app.Name.String(), ep, ascFile, latest)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif discoveryError != nil {\n\t\t\t\tstderr(\"discovery failed for %q: %v. Trying to find image in the store.\", img, discoveryError)\n\t\t\t}\n\t\t\tif f.local || discoveryError != nil {\n\t\t\t\treturn f.fetchImageFromStore(img)\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch u.Scheme {\n\tcase \"http\", \"https\", \"file\":\n\tcase \"docker\":\n\t\tdockerURL := common.ParseDockerURL(path.Join(u.Host, u.Path))\n\t\tif dockerURL.Tag == \"latest\" {\n\t\t\tlatest = true\n\t\t}\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"rkt only supports http, https, docker or file URLs (%s)\", img)\n\t}\n\treturn f.fetchImageFromURL(u.String(), u.Scheme, ascFile, latest)\n}", "func (f *Frontend) fetchImage(i *img.Image) (*img.Image, error) {\n\tvar err error\n\n\t// go through image proxy to resize and cache the image\n\tkey := hmacKey(i.ID)\n\tu := fmt.Sprintf(\"%v/image/225x,s%v/%v\", f.Host, key, i.ID)\n\tfmt.Println(u)\n\n\tresp, err := f.Images.Client.Get(u)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbdy, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\ti.Base64 = base64.StdEncoding.EncodeToString(bdy)\n\treturn i, err\n}", "func (mr *MockFetcherMockRecorder) FetchRemoteImage(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchRemoteImage\", reflect.TypeOf((*MockFetcher)(nil).FetchRemoteImage), arg0)\n}", "func Fetch(imageURI string, labels map[types.ACIdentifier]string, insecure bool) (tempfile.ReadSeekCloser, error) {\n\tu, err := url.Parse(imageURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar puller remote.Puller\n\n\tswitch u.Scheme {\n\tcase \"file\":\n\t\tfilename := u.Path\n\t\tif u.Host != \"\" {\n\t\t\tfilename = filepath.Join(u.Host, u.Path)\n\t\t}\n\t\tf, err := os.Open(filename)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn tempfile.New(f)\n\tcase \"http\", \"https\":\n\t\tpuller = http.New()\n\tcase \"docker\":\n\t\tpuller = docker.New(insecure)\n\tcase \"aci\", \"\":\n\t\tpuller = aci.New(insecure, labels)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"%q scheme not supported\", u.Scheme)\n\t}\n\n\tr, err := puller.Pull(imageURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tempfile.New(r)\n}", "func getLocalImage(s *Settings) ([]byte, error) {\n\tvar image []byte\n\tvar filePath string\n\tvar file *os.File\n\tvar err error\n\n\tif len(s.Directories) > 0 {\n\t\tfound := false\n\t\tfor _, dir := range s.Directories {\n\t\t\tfilePath = path.Join(\"/\", dir, s.Context.Path)\n\t\t\tfile, err = os.Open(filePath)\n\t\t\tif err == nil {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tfile, err = os.Open(s.Local404Thumb)\n\t\t\tif err != nil {\n\t\t\t\treturn image, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfile, err = os.Open(path.Join(\"/\", s.Context.Path))\n\t\tif err != nil {\n\t\t\tfile, err = os.Open(s.Local404Thumb)\n\t\t\tif err != nil {\n\t\t\t\treturn image, err\n\t\t\t}\n\t\t}\n\t}\n\n\tinfo, _ := file.Stat()\n\timage = make([]byte, info.Size())\n\n\t_, err = file.Read(image)\n\tif err != nil {\n\t\treturn image, err\n\t}\n\n\treturn image, nil\n}", "func (v *Virt) ImageLocalDigests(ctx context.Context, image string) (digests []string, err error) {\n\treturn\n}", "func (o *ImageImportManifest) HasLocalImageId() bool {\n\tif o != nil && o.LocalImageId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func TestRunPrepareLocal(t *testing.T) {\n\tnotAvailableMsg := \"not available in local store\"\n\tfoundMsg := \"using image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --local --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --local --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare --local docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare --local docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\t// 1. Try run/prepare with the image not available in the store, should get $notAvailableMsg.\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, notAvailableMsg); err != nil {\n\t\t\tt.Fatalf(\"%q should be found\", notAvailableMsg)\n\t\t}\n\t\tchild.Wait()\n\t}\n\n\t// 2. Fetch the image\n\timportImageAndFetchHash(t, ctx, \"docker://busybox\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 3. Try run/prepare with the image available in the store, should get $foundMsg.\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err != nil {\n\t\t\tt.Fatalf(\"%q should be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func (ss *Sources) localFetch(spec v1.SourceSpec) (string, error) {\n\tp := ss.repoPath(spec)\n\terr := os.MkdirAll(p, 0750)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t//TODO prevent target directory from accumulating unused files\n\t// remove all files before copy\n\t// or\n\t// walk target dir and diff with source dir\n\n\t// Copy local dir to repo path.\n\t// Ignore .git directory.\n\terr = otia10copy.Copy(spec.URL, p, otia10copy.Options{Skip: func(src string) bool {\n\t\treturn filepath.Base(src) == \".git\"\n\t}})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"fetch: %w\", err)\n\t}\n\n\th, err := ss.hashAll(spec.URL) // TODO use hashAll(p) when dir is properly synced (see previous to do)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ts := hex.EncodeToString(h.Sum(nil))\n\n\treturn s, err\n}", "func retagLocalImageForRemotePush(localTag string, remoteUrl string) string {\n\tnewTag := fmt.Sprintf(\"%s/%s\", remoteUrl, localTag)\n\tdockerTag(localTag, newTag)\n\treturn newTag\n}", "func pullMissingImage(ctx context.Context, apiClient client.CommonAPIClient, image string, force bool) error {\n\tif !force {\n\t\t_, inspectError := apiClient.ImageInspect(ctx, image)\n\t\tif inspectError == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif err, ok := inspectError.(client.RespError); !ok {\n\t\t\treturn inspectError\n\t\t} else if err.Code() != http.StatusNotFound {\n\t\t\treturn inspectError\n\t\t}\n\t}\n\n\tnamedRef, err := reference.Parse(image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnamedRef = reference.TrimTagForDigest(reference.WithDefaultTagIfMissing(namedRef))\n\n\tvar name, tag string\n\tif reference.IsNameTagged(namedRef) {\n\t\tname, tag = namedRef.Name(), namedRef.(reference.Tagged).Tag()\n\t} else {\n\t\tname = namedRef.String()\n\t}\n\n\tresponseBody, err := apiClient.ImagePull(ctx, name, tag, fetchRegistryAuth(namedRef.Name()))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to pull image: %v\", err)\n\t}\n\tdefer responseBody.Close()\n\n\treturn showProgress(responseBody)\n}", "func testFetchURL() {\n\tconst url string = \"http://gopl.io\"\n\tfmt.Printf(\"\\nbytes of url (%s): %d\\n\", url, len(myFetchURL(url)))\n}", "func (reader reader) GetImageFromURLorLocalPath(url, localPath string) (img image.Image, err error) {\n\tswitch {\n\tcase url != \"\":\n\t\treturn reader.ReadFromURL(url)\n\n\tcase localPath != \"\":\n\t\treturn reader.ReadFromLocalPath(localPath)\n\t}\n\treturn nil, errors.New(\"Both url and local path was not provided\")\n}", "func (f MockFetch) Fetch(targetServer Server) ServerStatus {\n\tif targetServer.ID == 196 {\n\t\treturn ServerStatus{targetServer.ID, false, \"404\", targetServer.URL, time.Now()}\n\t}\n\treturn ServerStatus{targetServer.ID, true, \"\", targetServer.URL, time.Now()}\n}", "func FetchRemoteFile() {\n\n}", "func FetchComicImg(url, path string) error {\n\tpath, err := expand(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpath, err = filepath.Abs(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tout, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer out.Close()\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\t_, err = io.Copy(out, resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (imp *Importer) fetchImages() {\n err := downloadImages(\n imp.idPath,\n func(id string, bodyRdr io.Reader) error {\n img, err := jpeg.Decode(bodyRdr)\n if err == nil {\n imp.send(&imagedata.ImageData{Id: id, Data: &img})\n } else {\n log.Printf(\"Error decoding image %s to jpeg\\n\", id)\n }\n return nil\n },\n )\n\n if err != nil { imp.sendErr(err) }\n}", "func ttrPullImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\terr := ttrPullImage(ctx, client, image)\n\tassert.NilError(t, err)\n}", "func (u Util) PrepareFetch(l *log.Logger, f types.File) *FetchOp {\n\tvar err error\n\tvar expectedSum []byte\n\n\t// explicitly ignoring the error here because the config should already be\n\t// validated by this point\n\turi, _ := url.Parse(f.Contents.Source)\n\n\thasher, err := GetHasher(f.Contents.Verification)\n\tif err != nil {\n\t\tl.Crit(\"Error verifying file %q: %v\", f.Path, err)\n\t\treturn nil\n\t}\n\n\tif hasher != nil {\n\t\t// explicitly ignoring the error here because the config should already\n\t\t// be validated by this point\n\t\t_, expectedSumString, _ := f.Contents.Verification.HashParts()\n\t\texpectedSum, err = hex.DecodeString(expectedSumString)\n\t\tif err != nil {\n\t\t\tl.Crit(\"Error parsing verification string %q: %v\", expectedSumString, err)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif f.User.Name != \"\" {\n\t\tuser, err := u.userLookup(f.User.Name)\n\t\tif err != nil {\n\t\t\tl.Crit(\"No such user %q: %v\", f.User.Name, err)\n\t\t\treturn nil\n\t\t}\n\t\tuid, err := strconv.ParseInt(user.Uid, 0, 0)\n\t\tif err != nil {\n\t\t\tl.Crit(\"Couldn't parse uid %q: %v\", user.Uid, err)\n\t\t\treturn nil\n\t\t}\n\t\ttmp := int(uid)\n\t\tf.User.ID = &tmp\n\t}\n\tif f.Group.Name != \"\" {\n\t\tg, err := u.groupLookup(f.Group.Name)\n\t\tif err != nil {\n\t\t\tl.Crit(\"No such group %q: %v\", f.Group.Name, err)\n\t\t\treturn nil\n\t\t}\n\t\tgid, err := strconv.ParseInt(g.Gid, 0, 0)\n\t\tif err != nil {\n\t\t\tl.Crit(\"Couldn't parse gid %q: %v\", g.Gid, err)\n\t\t\treturn nil\n\t\t}\n\t\ttmp := int(gid)\n\t\tf.Group.ID = &tmp\n\t}\n\n\treturn &FetchOp{\n\t\tPath: f.Path,\n\t\tHash: hasher,\n\t\tMode: os.FileMode(f.Mode),\n\t\tUid: *f.User.ID,\n\t\tGid: *f.Group.ID,\n\t\tUrl: *uri,\n\t\tFetchOptions: resource.FetchOptions{\n\t\t\tHash: hasher,\n\t\t\tCompression: f.Contents.Compression,\n\t\t\tExpectedSum: expectedSum,\n\t\t},\n\t}\n}", "func (f *Fetch) mustFetch(url string) []byte {\n\tb, err := f.fetch(url, true)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error downloading %s: %s\\n\", f.repos[len(f.repos)-1]+url, err)\n\t}\n\treturn b\n}", "func Test_GetImageFromUrl_badUrl(t *testing.T) {\n\tb, err := GetImageFromUrl(\"some-bad-url\")\n\n\tassert.Equal(t, `Error getting image: Get some-bad-url: unsupported protocol scheme \"\"`, err.Error())\n\tassert.Equal(t, []byte(nil), b)\n}", "func (s *FileStore) ImageStatus(name string) (*Image, error) {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn s.imageStatusUnlocked(name)\n}", "func (suite *APIImageInspectSuite) TestImageInspectOk(c *check.C) {\n\tvar (\n\t\trepo = environment.BusyboxRepo\n\t\ttag = \"1.24\"\n\n\t\tid = \"sha256:ca3d7d608b8a8bbaaac2c350bd0f9588cce0509ada74108d5c4b2afb24c46125\"\n\t\tdig = \"sha256:840f2b98a2540ff1d265782c42543dbec7218d3ab0e73b296d7dac846f146e27\"\n\t)\n\n\trepoTag := fmt.Sprintf(\"%s:%s\", repo, tag)\n\trepoDigest := fmt.Sprintf(\"%s@%s\", repo, dig)\n\n\tfor _, image := range []string{\n\t\tid,\n\t\trepoTag,\n\t\trepoDigest,\n\t\tfmt.Sprintf(\"%s:whatever@%s\", repo, dig),\n\t} {\n\t\tresp, err := request.Get(\"/images/\" + image + \"/json\")\n\t\tc.Assert(err, check.IsNil)\n\t\tCheckRespStatus(c, resp, 200)\n\n\t\tgot := types.ImageInfo{}\n\t\terr = request.DecodeBody(&got, resp.Body)\n\t\tc.Assert(err, check.IsNil)\n\n\t\t// TODO: More specific check is needed\n\t\tc.Assert(got.Config, check.NotNil)\n\t\tc.Assert(got.ID, check.Equals, id)\n\t\tc.Assert(got.CreatedAt, check.NotNil)\n\t\tc.Assert(got.Size, check.NotNil)\n\t\tc.Assert(reflect.DeepEqual(got.RepoTags, []string{repoTag}), check.Equals, true)\n\t\tc.Assert(reflect.DeepEqual(got.RepoDigests, []string{repoDigest}), check.Equals, true)\n\t}\n}", "func (i ImageFetcher) Fetch(path string) (image.Image, error) {\n\tresp, err := http.Get(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\treturn png.Decode(resp.Body)\n}", "func checkImage(url string) bool {\n\treq, err := http.NewRequest(\"HEAD\", url, nil)\n\tif err != nil {\n\t\treturn false\n\t}\n\tvar client http.Client\n\tresp, err := client.Do(req)\n\tif err != nil || len(resp.Header[\"Content-Length\"]) == 0 || len(resp.Header[\"Content-Type\"]) == 0 {\n\t\treturn false\n\t}\n\tdefer resp.Body.Close()\n\tmb, _ := strconv.Atoi(resp.Header[\"Content-Length\"][0])\n\tif mb > 10*1024*1024 {\n\t\treturn false\n\t}\n\tif !strings.HasPrefix(resp.Header[\"Content-Type\"][0], \"image\") {\n\t\treturn false\n\t}\n\treturn true\n}", "func TestRunPrepareFromFile(t *testing.T) {\n\tfoundMsg := \"found image in local store\"\n\timage := \"rkt-inspect-implicit-fetch.aci\"\n\n\timagePath := patchTestACI(image, \"--exec=/inspect\")\n\tdefer os.Remove(imagePath)\n\n\ttests := []string{\n\t\timagePath,\n\t\t\"file://\" + imagePath,\n\t}\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\timportImageAndFetchHash(t, ctx, imagePath)\n\n\tfor _, tt := range tests {\n\n\t\t// 1. Try run/prepare with '--local', should not get the $foundMsg, since we will ignore the '--local' when\n\t\t// the image is a filepath.\n\t\tcmds := []string{\n\t\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false --local %s\", ctx.cmd(), tt),\n\t\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare --local %s\", ctx.cmd(), tt),\n\t\t}\n\n\t\tfor _, cmd := range cmds {\n\t\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\t\tchild, err := gexpect.Spawn(cmd)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t\t}\n\n\t\t\tif err := child.Expect(foundMsg); err == nil {\n\t\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t\t}\n\n\t\t\tif err := child.Wait(); err != nil {\n\t\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t\t}\n\t\t}\n\n\t\t// 2. Try run/prepare without '--local', should not get $foundMsg either.\n\t\tcmds = []string{\n\t\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false %s\", ctx.cmd(), tt),\n\t\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare %s\", ctx.cmd(), tt),\n\t\t}\n\n\t\tfor _, cmd := range cmds {\n\t\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\t\tchild, err := gexpect.Spawn(cmd)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t\t}\n\t\t\tif err := child.Expect(foundMsg); err == nil {\n\t\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t\t}\n\t\t\tif err := child.Wait(); err != nil {\n\t\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n}", "func handleLocalRequest(rw http.ResponseWriter, req *http.Request) {\n\tfn, err := etl.GetFilename(req.FormValue(\"filename\"))\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(rw, \"failed to get valid filename= parameter from request\")\n\t\treturn\n\t}\n\n\tdp, err := etl.ValidateTestPath(fn)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(rw, \"failed to validate test path: %q\", fn)\n\t\treturn\n\t}\n\n\tc, err := storage.GetStorageClient(false)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(rw, \"failed to get storage client\")\n\t\treturn\n\t}\n\n\tctx := context.Background()\n\tobj, err := c.Bucket(dp.Bucket).Object(dp.Path).Attrs(ctx)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(rw, \"failed to get object attrs for %s / %s\", dp.Bucket, dp.Path)\n\t\treturn\n\t}\n\n\tr := toRunnable(obj)\n\terr = r.Run(ctx)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(rw, \"runnable failed to run on %s / %s\", dp.Bucket, dp.Path)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(rw, \"no observed errors\")\n}", "func GetImg(fileName string) (*image.Image, error) {\n localFile := fmt.Sprintf(\"/data/edgebox/local/%s\", fileName)\n existingImageFile, err := os.Open(localFile)\n if err == nil {\n defer existingImageFile.Close()\n imageData, _, err := image.Decode(existingImageFile)\n if err != nil {\n return nil, err\n }\n return &imageData, nil\n }\n\n remoteFile := fmt.Sprintf(\"/data/edgebox/remote/%s\", fileName)\n existingImageFile, err = os.Open(remoteFile)\n if err == nil {\n defer existingImageFile.Close()\n imageData, _, err := image.Decode(existingImageFile)\n if err != nil {\n return nil, err\n }\n return &imageData, nil\n }\n return nil, err\n}", "func (sub *Sub) fetchMissingObjects(srpcClient *srpc.Client, image *image.Image,\n\tfreeSpace *uint64, pushComputedFiles bool) (\n\tbool, subStatus) {\n\tif image == nil {\n\t\treturn false, statusImageNotReady\n\t}\n\tlogger := sub.herd.logger\n\tsubObj := lib.Sub{\n\t\tHostname: sub.mdb.Hostname,\n\t\tClient: srpcClient,\n\t\tFileSystem: sub.fileSystem,\n\t\tComputedInodes: sub.computedInodes,\n\t\tObjectCache: sub.objectCache,\n\t\tObjectGetter: sub.herd.objectServer}\n\tobjectsToFetch, objectsToPush := lib.BuildMissingLists(subObj, image,\n\t\tpushComputedFiles, false, logger)\n\tif objectsToPush == nil {\n\t\treturn false, statusMissingComputedFile\n\t}\n\tvar returnAvailable bool = true\n\tvar returnStatus subStatus = statusSynced\n\tif len(objectsToFetch) > 0 {\n\t\tif !sub.checkForEnoughSpace(freeSpace, objectsToFetch) {\n\t\t\treturn false, statusNotEnoughFreeSpace\n\t\t}\n\t\tlogger.Printf(\"Calling %s:Subd.Fetch() for: %d objects\\n\",\n\t\t\tsub, len(objectsToFetch))\n\t\terr := client.Fetch(srpcClient, sub.herd.imageManager.String(),\n\t\t\tobjectcache.ObjectMapToCache(objectsToFetch))\n\t\tif err != nil {\n\t\t\tsrpcClient.Close()\n\t\t\tlogger.Printf(\"Error calling %s:Subd.Fetch(): %s\\n\", sub, err)\n\t\t\tif err == srpc.ErrorAccessToMethodDenied {\n\t\t\t\treturn false, statusFetchDenied\n\t\t\t}\n\t\t\treturn false, statusFailedToFetch\n\t\t}\n\t\treturnAvailable = false\n\t\treturnStatus = statusFetching\n\t}\n\tif len(objectsToPush) > 0 {\n\t\tsub.herd.cpuSharer.GrabSemaphore(sub.herd.pushSemaphore)\n\t\tdefer func() { <-sub.herd.pushSemaphore }()\n\t\tsub.status = statusPushing\n\t\terr := lib.PushObjects(subObj, objectsToPush, logger)\n\t\tif err != nil {\n\t\t\tif err == srpc.ErrorAccessToMethodDenied {\n\t\t\t\treturn false, statusPushDenied\n\t\t\t}\n\t\t\tif err == lib.ErrorFailedToGetObject {\n\t\t\t\treturn false, statusFailedToGetObject\n\t\t\t}\n\t\t\treturn false, statusFailedToPush\n\t\t}\n\t\tif returnAvailable {\n\t\t\t// Update local copy of objectcache, since there will not be\n\t\t\t// another Poll() before the update computation.\n\t\t\tfor hashVal := range objectsToPush {\n\t\t\t\tsub.objectCache = append(sub.objectCache, hashVal)\n\t\t\t}\n\t\t}\n\t}\n\treturn returnAvailable, returnStatus\n}", "func (f *File) Fetch(timeout int, concurrent bool) error {\n\terr := validateUrl(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\tisRemote := f.IsRemote()\n\tif !isRemote {\n\t\treturn fetchLocal(f)\n\t} else {\n\t\treturn fetchRemote(f, timeout, concurrent)\n\t}\n\n\treturn nil\n}", "func parseImageLocal(workingDir string, contextDir string) (contextDirOut string, tag string, ok bool) {\n\tif !filepath.IsAbs(contextDir) {\n\t\tcontextDir = filepath.Join(workingDir, contextDir)\n\t}\n\tif _, err := os.Stat(filepath.Join(contextDir, \"Dockerfile\")); os.IsNotExist(err) {\n\t\tlog.Debugf(\"Ignoring missing Dockerfile '%s/Dockerfile'\", contextDir)\n\t\treturn \"\", \"\", false\n\t}\n\n\tsha, _, err := common.FindGitRevision(contextDir)\n\tif err != nil {\n\t\tlog.Warnf(\"Unable to determine git revision: %v\", err)\n\t\tsha = \"latest\"\n\t}\n\treturn contextDir, fmt.Sprintf(\"%s:%s\", filepath.Base(contextDir), sha), true\n}", "func (o *ImageImportManifest) GetLocalImageId() string {\n\tif o == nil || o.LocalImageId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.LocalImageId\n}", "func (f *fetcher) fetch(appName string, aciURL, ascURL string, ascFile *os.File, etag string) (*openpgp.Entity, *os.File, *cacheData, error) {\n\tvar (\n\t\tentity *openpgp.Entity\n\t\tcd *cacheData\n\t)\n\n\tu, err := url.Parse(aciURL)\n\tif err != nil {\n\t\treturn nil, nil, nil, fmt.Errorf(\"error parsing ACI url: %v\", err)\n\t}\n\tif u.Scheme == \"docker\" {\n\t\tregistryURL := strings.TrimPrefix(aciURL, \"docker://\")\n\n\t\tstoreTmpDir, err := f.s.TmpDir()\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error creating temporary dir for docker to ACI conversion: %v\", err)\n\t\t}\n\t\ttmpDir, err := ioutil.TempDir(storeTmpDir, \"docker2aci-\")\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tdefer os.RemoveAll(tmpDir)\n\n\t\tindexName := docker2aci.GetIndexName(registryURL)\n\t\tuser := \"\"\n\t\tpassword := \"\"\n\t\tif creds, ok := f.dockerAuth[indexName]; ok {\n\t\t\tuser = creds.User\n\t\t\tpassword = creds.Password\n\t\t}\n\t\tacis, err := docker2aci.Convert(registryURL, true, tmpDir, tmpDir, user, password)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error converting docker image to ACI: %v\", err)\n\t\t}\n\n\t\taciFile, err := os.Open(acis[0])\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error opening squashed ACI file: %v\", err)\n\t\t}\n\n\t\treturn nil, aciFile, nil, nil\n\t}\n\n\t// attempt to automatically fetch the public key in case it is available on a TLS connection.\n\tif globalFlags.TrustKeysFromHttps && !globalFlags.InsecureSkipVerify && appName != \"\" {\n\t\tpkls, err := getPubKeyLocations(appName, false, globalFlags.Debug)\n\t\tif err != nil {\n\t\t\tstderr(\"Error determining key location: %v\", err)\n\t\t} else {\n\t\t\t// no http, don't ask user for accepting the key, no overriding\n\t\t\tif err := addKeys(pkls, appName, false, true, false); err != nil {\n\t\t\t\tstderr(\"Error adding keys: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\tvar retrySignature bool\n\tif f.ks != nil && ascFile == nil {\n\t\tu, err := url.Parse(ascURL)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error parsing ASC url: %v\", err)\n\t\t}\n\t\tif u.Scheme == \"file\" {\n\t\t\tascFile, err = os.Open(u.Path)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, fmt.Errorf(\"error opening signature file: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tstderr(\"Downloading signature from %v\\n\", ascURL)\n\t\t\tascFile, err = f.s.TmpFile()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, fmt.Errorf(\"error setting up temporary file: %v\", err)\n\t\t\t}\n\t\t\tdefer os.Remove(ascFile.Name())\n\n\t\t\terr = f.downloadSignatureFile(ascURL, ascFile)\n\t\t\tswitch err {\n\t\t\tcase errStatusAccepted:\n\t\t\t\tretrySignature = true\n\t\t\t\tstderr(\"rkt: server requested deferring the signature download\")\n\t\t\tcase nil:\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t\treturn nil, nil, nil, fmt.Errorf(\"error downloading the signature file: %v\", err)\n\t\t\t}\n\t\t}\n\t\tdefer ascFile.Close()\n\t}\n\n\t// check if the identity used by the signature is in the store before a\n\t// possibly expensive download. This is only an optimization and it's\n\t// ok to skip the test if the signature will be downloaded later.\n\tif !retrySignature && f.ks != nil && appName != \"\" {\n\t\tif _, err := ascFile.Seek(0, 0); err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error seeking signature file: %v\", err)\n\t\t}\n\t\tif entity, err = f.ks.CheckSignature(appName, bytes.NewReader([]byte{}), ascFile); err != nil {\n\t\t\tif _, ok := err.(pgperrors.SignatureError); !ok {\n\t\t\t\treturn nil, nil, nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tvar aciFile *os.File\n\tif u.Scheme == \"file\" {\n\t\taciFile, err = os.Open(u.Path)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error opening ACI file: %v\", err)\n\t\t}\n\t} else {\n\t\taciFile, err = f.s.TmpFile()\n\t\tif err != nil {\n\t\t\treturn nil, aciFile, nil, fmt.Errorf(\"error setting up temporary file: %v\", err)\n\t\t}\n\t\tdefer os.Remove(aciFile.Name())\n\n\t\tif cd, err = f.downloadACI(aciURL, aciFile, etag); err != nil {\n\t\t\treturn nil, nil, nil, fmt.Errorf(\"error downloading ACI: %v\", err)\n\t\t}\n\t\tif cd.useCached {\n\t\t\treturn nil, nil, cd, nil\n\t\t}\n\t}\n\n\tif retrySignature {\n\t\tif err = f.downloadSignatureFile(ascURL, ascFile); err != nil {\n\t\t\treturn nil, aciFile, nil, fmt.Errorf(\"error downloading the signature file: %v\", err)\n\t\t}\n\t}\n\n\tmanifest, err := aci.ManifestFromImage(aciFile)\n\tif err != nil {\n\t\treturn nil, aciFile, nil, err\n\t}\n\t// Check if the downloaded ACI has the correct app name.\n\t// The check is only performed when the aci is downloaded through the\n\t// discovery protocol, but not with local files or full URL.\n\tif appName != \"\" && manifest.Name.String() != appName {\n\t\treturn nil, aciFile, nil,\n\t\t\tfmt.Errorf(\"error when reading the app name: %q expected but %q found\",\n\t\t\t\tappName, manifest.Name.String())\n\t}\n\n\tif f.ks != nil {\n\t\tif _, err := aciFile.Seek(0, 0); err != nil {\n\t\t\treturn nil, aciFile, nil, fmt.Errorf(\"error seeking ACI file: %v\", err)\n\t\t}\n\t\tif _, err := ascFile.Seek(0, 0); err != nil {\n\t\t\treturn nil, aciFile, nil, fmt.Errorf(\"error seeking signature file: %v\", err)\n\t\t}\n\t\tif entity, err = f.ks.CheckSignature(manifest.Name.String(), aciFile, ascFile); err != nil {\n\t\t\treturn nil, aciFile, nil, err\n\t\t}\n\t}\n\n\tif _, err := aciFile.Seek(0, 0); err != nil {\n\t\treturn nil, aciFile, nil, fmt.Errorf(\"error seeking ACI file: %v\", err)\n\t}\n\treturn entity, aciFile, cd, nil\n}", "func maybeQueueBlobFetch(oid, prev string) bool {\n\tselect {\n\tcase internodeTaskQueue <- internodeTask{\n\t\tcmd: fetchObjectCmd,\n\t\toid: oid,\n\t\tprevNode: prev,\n\t}:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (suite *APIImageInspectSuite) TestImageInspectNotFound(c *check.C) {\n\tresp, err := request.Get(\"/images/\" + \"TestImageInspectNotFound\" + \"/json\")\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, resp, 404)\n}", "func prepull(ctx context.Context, req types.FunctionDeployment, client *containerd.Client, alwaysPull bool) (containerd.Image, error) {\n\tstart := time.Now()\n\tr, err := reference.ParseNormalizedNamed(req.Image)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timgRef := reference.TagNameOnly(r).String()\n\n\tsnapshotter := \"\"\n\tif val, ok := os.LookupEnv(\"snapshotter\"); ok {\n\t\tsnapshotter = val\n\t}\n\n\timage, err := service.PrepareImage(ctx, client, imgRef, snapshotter, alwaysPull)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to pull image %s\", imgRef)\n\t}\n\n\tsize, _ := image.Size(ctx)\n\tlog.Printf(\"Image for: %s size: %d, took: %fs\\n\", image.Name(), size, time.Since(start).Seconds())\n\n\treturn image, nil\n}", "func FetchFoxPic() string {\n\tvar foxPic string\n\tresp, err := http.Get(\"https://randomfox.ca/floof/\")\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to get fox picture.\")\n\t}\n\n\tdefer resp.Body.Close()\n\tvar result map[string]interface{}\n\n\tjson.NewDecoder(resp.Body).Decode(&result)\n\tif foxPic, ok := result[\"image\"].(string); ok {\n\t\treturn foxPic\n\t}\n\treturn foxPic\n}", "func TestExpectedImgRef(t *testing.T) {\n\n\tv, isSet := os.LookupEnv(\"DOCKERHUB_PROXY\")\n\tif isSet {\n\t\tdefer os.Setenv(\"DOCKERHUB_PROXY\", v)\n\t}\n\n\tos.Unsetenv(\"DOCKERHUB_PROXY\")\n\tassert.Equal(t,\n\t\t\"index.docker.io/library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\",\n\t\tCompleteImageRef(\"library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\"))\n\n\tos.Setenv(\"DOCKERHUB_PROXY\", \"my-dockerhub-proxy.tld/dockerhub-proxy\")\n\tassert.Equal(t,\n\t\t\"my-dockerhub-proxy.tld/dockerhub-proxy/library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\",\n\t\tCompleteImageRef(\"library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\"))\n\tos.Unsetenv(\"DOCKERHUB_PROXY\")\n}", "func ttrPushImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\trc, err := client.ImagePush(ctx, ttrImageName(image), types.ImagePushOptions{RegistryAuth: \"{}\"})\n\tassert.Assert(t, err)\n\tif rc != nil {\n\t\tbody, err := readAllAndClose(rc)\n\t\tassert.Assert(t, err)\n\t\tassert.Assert(t, strings.Contains(body, `\"status\":\"latest: digest: `))\n\t}\n}", "func getRemoteImage(url string) ([]byte, error) {\n\tvar image []byte\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn image, err\n\t}\n\tdefer resp.Body.Close()\n\n\timage, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn image, err\n\t}\n\n\treturn image, nil\n}", "func imageGet(L *lua.LState) int {\n\tp := checkImage(L)\n\n\tL.Push(lua.LNumber(*p))\n\n\treturn 1\n}", "func (f *SourceFetcher) uniqueFetchSource(c chan FetchResult, url string, namespace string) {\n\t// download temp source\n\ttmpOriginalPath, downloaded, err := f.downloadTempSource(url)\n\tif err != nil {\n\t\tf.notifyDownloadSourceFailed(c, err)\n\t\treturn\n\t}\n\n\t// file hash the image url\n\tmd5, err := info.Info{Path: tmpOriginalPath}.FileHash()\n\tif err != nil {\n\t\tf.notifyDownloadSourceFailed(c, err)\n\t\treturn\n\t}\n\n\t// move file to destination\n\tdestination := f.Paths.LocalOriginalPath(namespace, md5)\n\terr = f.copyImageFromTmp(tmpOriginalPath, destination)\n\tif err != nil {\n\t\tf.notifyDownloadSourceFailed(c, err)\n\t\treturn\n\t}\n\n\t// generate image details\n\timageDetails, err := info.Info{Path: destination}.ImageDetails()\n\tif err != nil {\n\t\tf.notifyDownloadSourceFailed(c, err)\n\t\treturn\n\t}\n\n\tc <- FetchResult{nil, imageDetails, downloaded}\n\tclose(c)\n}", "func checkImage(L *lua.LState) *common.Entity {\n\tud := L.CheckUserData(1)\n\tif v, ok := ud.Value.(*common.Entity); ok {\n\t\treturn v\n\t}\n\tL.ArgError(1, \"image expected\")\n\treturn nil\n}", "func TestBaseImage(t *testing.T) {\n\t// test with 'original.png'\n\timgs := map[string][]byte{\n\t\t\"original.png\": []byte(\"image\"),\n\t}\n\t_, err := backend.baseImage(imgs)\n\tif err != nil {\n\t\tt.Errorf(\"Got error %s\", err)\n\t}\n\n\t// test with 'original.jpg'\n\timgs = map[string][]byte{\n\t\t\"original.jpg\": []byte(\"image\"),\n\t}\n\t_, err = backend.baseImage(imgs)\n\tif err != nil {\n\t\tt.Errorf(\"Got error %s\", err)\n\t}\n\n\t// without 'original.*' should get an error\n\timgs = map[string][]byte{\n\t\t\"127x127.png\": []byte(\"image\"),\n\t}\n\t_, err = backend.baseImage(imgs)\n\tif err == nil {\n\t\tt.Errorf(\"Should get an error, didn't pass original image.\")\n\t}\n}", "func (b *ecrBase) runGetImage(ctx context.Context, batchGetImageInput ecr.BatchGetImageInput) (*ecr.Image, error) {\n\t// Allow only a single image to be fetched at a time.\n\tif len(batchGetImageInput.ImageIds) != 1 {\n\t\treturn nil, errGetImageUnhandled\n\t}\n\n\tbatchGetImageInput.RegistryId = aws.String(b.ecrSpec.Registry())\n\tbatchGetImageInput.RepositoryName = aws.String(b.ecrSpec.Repository)\n\n\tlog.G(ctx).WithField(\"batchGetImageInput\", batchGetImageInput).Trace(\"ecr.base.image: requesting images\")\n\n\tbatchGetImageOutput, err := b.client.BatchGetImageWithContext(ctx, &batchGetImageInput)\n\tif err != nil {\n\t\tlog.G(ctx).WithError(err).Error(\"ecr.base.image: failed to get image\")\n\t\treturn nil, err\n\t}\n\tlog.G(ctx).WithField(\"batchGetImageOutput\", batchGetImageOutput).Trace(\"ecr.base.image: api response\")\n\n\t// Summarize image request failures for handled errors. Only the first\n\t// failure is checked as only a single ImageIdentifier is allowed to be\n\t// queried for.\n\tif len(batchGetImageOutput.Failures) > 0 {\n\t\tfailure := batchGetImageOutput.Failures[0]\n\t\tswitch aws.StringValue(failure.FailureCode) {\n\t\t// Requested image with a corresponding tag and digest does not exist.\n\t\t// This failure will generally occur when pushing an updated (or new)\n\t\t// image with a tag.\n\t\tcase ecr.ImageFailureCodeImageTagDoesNotMatchDigest:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Debug(\"ecr.base.image: no matching image with specified digest\")\n\t\t\treturn nil, errImageNotFound\n\t\t// Requested image doesn't resolve to a known image. A new image will\n\t\t// result in an ImageNotFound error when checked before push.\n\t\tcase ecr.ImageFailureCodeImageNotFound:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Debug(\"ecr.base.image: no image found\")\n\t\t\treturn nil, errImageNotFound\n\t\t// Requested image identifiers are invalid.\n\t\tcase ecr.ImageFailureCodeInvalidImageDigest, ecr.ImageFailureCodeInvalidImageTag:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Error(\"ecr.base.image: invalid image identifier\")\n\t\t\treturn nil, reference.ErrInvalid\n\t\t// Unhandled failure reported for image request made.\n\t\tdefault:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Warn(\"ecr.base.image: unhandled image request failure\")\n\t\t\treturn nil, errGetImageUnhandled\n\t\t}\n\t}\n\n\treturn batchGetImageOutput.Images[0], nil\n}", "func (j *DSGitHub) FetchRaw(ctx *Ctx) (err error) {\n\tPrintf(\"%s should use generic FetchRaw()\\n\", j.DS)\n\treturn\n}", "func ImageExistsLocally(ctx context.Context, imageName string, platform string) (bool, error) {\n\treturn false, errors.New(\"Unsupported Operation\")\n}", "func isImageImportSupported(cloud string) (bool, error) {\n\t// More information about the Discovery API:\n\t// https://docs.openstack.org/api-ref/image/v2/?expanded=#image-service-info-discovery\n\tlogrus.Debugln(\"Checking if the image import mechanism is supported\")\n\n\tconn, err := clientconfig.NewServiceClient(\"image\", openstackdefaults.DefaultClientOpts(cloud))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\ts, err := imageimport.Get(conn).Extract()\n\tif err != nil {\n\t\t// ErrDefault404 means that image discovery API is not available for the cloud\n\t\tif _, ok := err.(gophercloud.ErrDefault404); ok {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\n\t// Next check is just to make sure the response data was not corrupted\n\tif s.ImportMethods.Type != \"array\" {\n\t\treturn false, nil\n\t}\n\n\tfor _, method := range s.ImportMethods.Value {\n\t\tif method == string(imageimport.GlanceDirectMethod) {\n\t\t\tlogrus.Debugln(\"Glance Direct image import plugin was found\")\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\tlogrus.Debugln(\"Glance Direct image import plugin was not found\")\n\treturn false, nil\n}", "func LookupMyImage(connConfig string, myImageId string) (SpiderMyImageInfo, error) {\n\n\tif connConfig == \"\" {\n\t\terr := fmt.Errorf(\"LookupMyImage() called with empty connConfig.\")\n\t\tcommon.CBLog.Error(err)\n\t\treturn SpiderMyImageInfo{}, err\n\t} else if myImageId == \"\" {\n\t\terr := fmt.Errorf(\"LookupMyImage() called with empty myImageId.\")\n\t\tcommon.CBLog.Error(err)\n\t\treturn SpiderMyImageInfo{}, err\n\t}\n\n\turl := common.SpiderRestUrl + \"/myimage/\" + url.QueryEscape(myImageId)\n\n\t// Create Req body\n\ttempReq := common.SpiderConnectionName{}\n\ttempReq.ConnectionName = connConfig\n\n\tclient := resty.New().SetCloseConnection(true)\n\tclient.SetAllowGetMethodPayload(true)\n\n\tresp, err := client.R().\n\t\tSetHeader(\"Content-Type\", \"application/json\").\n\t\tSetBody(tempReq).\n\t\tSetResult(&SpiderMyImageInfo{}). // or SetResult(AuthSuccess{}).\n\t\t//SetError(&AuthError{}). // or SetError(AuthError{}).\n\t\tGet(url)\n\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\terr := fmt.Errorf(\"an error occurred while requesting to CB-Spider\")\n\t\treturn SpiderMyImageInfo{}, err\n\t}\n\n\tfmt.Println(string(resp.Body()))\n\n\tfmt.Println(\"HTTP Status code: \" + strconv.Itoa(resp.StatusCode()))\n\tswitch {\n\tcase resp.StatusCode() >= 400 || resp.StatusCode() < 200:\n\t\terr := fmt.Errorf(string(resp.Body()))\n\t\tcommon.CBLog.Error(err)\n\t\treturn SpiderMyImageInfo{}, err\n\t}\n\n\ttemp := resp.Result().(*SpiderMyImageInfo)\n\treturn *temp, nil\n\n}", "func TestRemote(t *testing.T) {\n\trnd, err := random.Image(1024, 3)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ts, err := registry.TLS(\"gcr.io\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttr := s.Client().Transport\n\n\tsrc := \"gcr.io/test/compressed\"\n\tref, err := name.ParseReference(src)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := remote.Write(ref, rnd, remote.WithTransport(tr)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\timg, err := remote.Image(ref, remote.WithTransport(tr))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := validate.Image(img); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcf, err := img.ConfigFile()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tm, err := img.Manifest()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tlayer, err := img.LayerByDiffID(cf.RootFS.DiffIDs[0])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\td, err := layer.Digest()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif diff := cmp.Diff(d, m.Layers[0].Digest); diff != \"\" {\n\t\tt.Errorf(\"mismatched digest: %v\", diff)\n\t}\n}", "func testDownloadImages(ctx context.Context, t *testing.T, downloadCh chan<- downloadRequest, addr, ccvmDir string) {\n\twkld := &workload{\n\t\tspec: workloadSpec{\n\t\t\tBaseImageURL: \"http://\" + addr + \"/download/image\",\n\t\t\tBIOS: \"http://\" + addr + \"/download/bios\",\n\t\t},\n\t}\n\n\tresultCh := make(chan interface{})\n\tgo func() {\n\t\timg, bios, err := downloadImages(ctx, wkld, http.DefaultTransport.(*http.Transport),\n\t\t\tresultCh, downloadCh)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to download images: %v\", err)\n\t\t}\n\t\tif len(img) == 0 || len(bios) == 0 {\n\t\t\tt.Errorf(\"One the paths is empty img=%s bios=%s\", img, bios)\n\t\t}\n\t\tclose(resultCh)\n\t}()\n\n\tfor range resultCh {\n\t}\n\n\twkld.spec.BIOS = \"ftp://\" + addr + \"/download/bios\"\n\tresultCh = make(chan interface{})\n\tgo func() {\n\t\t_, _, err := downloadImages(ctx, wkld, http.DefaultTransport.(*http.Transport),\n\t\t\tresultCh, downloadCh)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected downloadImages with bad BIOS URL to fail\")\n\t\t}\n\t\tclose(resultCh)\n\t}()\n\n\tfor range resultCh {\n\t}\n}", "func TestFetcher_Disk(t *testing.T) {\n\tfetcher := (&diskFetcher{\"../fixtures\"}).fetch\n\n\tfakeSites := []struct {\n\t\turl string\n\t\texpected string\n\t}{\n\t\t{\"https://www.404s.com/nope.txt\", \"\"},\n\t\t{\"https://www.404s.com/hello.txt\", \"Hello, world!\"},\n\t}\n\tfor _, site := range fakeSites {\n\t\tt.Run(site.url, func(t *testing.T) {\n\t\t\tactual := fetchAsString(t, fetcher, site.url)\n\t\t\trequire.Equal(t, site.expected, actual)\n\t\t})\n\t}\n}", "func (j *DSRocketchat) FetchRaw(ctx *Ctx) (err error) {\n\tPrintf(\"%s should use generic FetchRaw()\\n\", j.DS)\n\treturn\n}", "func (a *API) ImportImage(format, bucket, object, image_size, device, name, description, architecture string, force bool) (string, error) {\n\timages, err := a.GetImages(name)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"getting images: %v\", err)\n\t}\n\n\tfor _, image := range images.Images.Image {\n\t\tif force {\n\t\t\tplog.Infof(\"deleting pre-existing image %v\", image.ImageId)\n\t\t\terr = a.DeleteImage(image.ImageId, force)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"deleting image %v: %v\", image.ImageId, err)\n\t\t\t}\n\t\t} else {\n\t\t\t// save time & re-use the existing image but inform the user\n\t\t\tplog.Infof(\"reusing existing image %v\", image.ImageId)\n\t\t\treturn image.ImageId, nil\n\t\t}\n\t}\n\n\trequest := ecs.CreateImportImageRequest()\n\trequest.Scheme = \"https\"\n\trequest.DiskDeviceMapping = &[]ecs.ImportImageDiskDeviceMapping{\n\t\t{\n\t\t\tFormat: format,\n\t\t\tOSSBucket: bucket,\n\t\t\tOSSObject: object,\n\t\t\tDiskImageSize: image_size,\n\t\t\tDevice: device,\n\t\t},\n\t}\n\trequest.ImageName = name\n\trequest.Description = description\n\trequest.Architecture = architecture\n\n\tplog.Infof(\"importing image\")\n\tresponse, err := a.ecs.ImportImage(request)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"importing image: %v\", err)\n\t}\n\n\treturn a.finishImportImageTask(response)\n}", "func (f *finder) findImage(img string, asc string, discover bool) (*types.Hash, error) {\n\t// check if it is a valid hash, if so let it pass through\n\th, err := types.NewHash(img)\n\tif err == nil {\n\t\tfullKey, err := f.s.ResolveKey(img)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not resolve key: %v\", err)\n\t\t}\n\t\th, err = types.NewHash(fullKey)\n\t\tif err != nil {\n\t\t\t// should never happen\n\t\t\tpanic(err)\n\t\t}\n\t\treturn h, nil\n\t}\n\n\t// try fetching the image, potentially remotely\n\tft := &fetcher{\n\t\timageActionData: f.imageActionData,\n\t\tlocal: f.local,\n\t\twithDeps: f.withDeps,\n\t}\n\tkey, err := ft.fetchImage(img, asc, discover)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th, err = types.NewHash(key)\n\tif err != nil {\n\t\t// should never happen\n\t\tpanic(err)\n\t}\n\n\treturn h, nil\n}", "func (c *Client) DetectImpurityImage(r io.Reader) (ImageData, error) {\n\tdata := ImageData{}\n\tvar b bytes.Buffer\n\tw := multipart.NewWriter(&b)\n\tdefer w.Close()\n\n\tfw, err := w.CreateFormFile(\"file_image\", \"picpurify\")\n\tif err != nil {\n\t\treturn data, err\n\t}\n\tif _, err = io.Copy(fw, r); err != nil {\n\t\treturn data, err\n\t}\n\n\tfw, err = w.CreateFormField(\"API_KEY\")\n\tif err != nil {\n\t\treturn data, err\n\t}\n\tif _, err = fw.Write([]byte(c.Key)); err != nil {\n\t\treturn data, err\n\t}\n\n\tif fw, err = w.CreateFormField(\"task\"); err != nil {\n\t\treturn data, err\n\t}\n\tif _, err = fw.Write([]byte(c.Tasks)); err != nil {\n\t\treturn data, err\n\t}\n\n\treq, err := http.NewRequest(http.MethodPost, c.ImageURL, &b)\n\tif err != nil {\n\t\treturn data, errors.Wrap(err, \"picpurify client: creating request\")\n\t}\n\treq.ContentLength = int64(len(b.Bytes()))\n\treq.Header.Set(\"Content-Type\", w.FormDataContentType())\n\n\tvar bytes []byte\n\tresp, err := c.Client.Do(req)\n\tif err != nil {\n\t\treturn data, errors.Wrap(err, \"picpurify client: executing request\")\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn data, errors.Errorf(\"got unexpected http code: %d\", resp.StatusCode)\n\t}\n\n\tbytes, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn data, errors.Wrap(err, \"picpurify client: reading response body\")\n\t}\n\n\tif err != nil {\n\t\treturn data, errors.New(\"retrieving data from picpurify\")\n\t}\n\terr = json.Unmarshal(bytes, &data)\n\treturn data, err\n}", "func DownloadLayersFromLocalDocker(digest string) (io.ReadCloser, error) {\n\tctx := context.Background()\n\tcli, err := client.NewClientWithOpts(client.FromEnv)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcli.NegotiateAPIVersion(ctx)\n\thttpClient := cli.HTTPClient()\n\n\turl := \"http://v\" + cli.ClientVersion() + \"/images/\" + digest + \"/get\"\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res.Body, nil\n}", "func (f FetchStruct) windowsFetch() *model.FetchResult {\n\tvar args []string\n\n\tremoteName := f.RemoteName\n\trepoPath := f.RepoPath\n\tbranch := f.RemoteBranch\n\n\tif remoteName == \"\" && branch == \"\" {\n\t\targs = []string{\"fetch\"}\n\t} else {\n\t\tbranchReference := branch + \":\" + branch\n\t\targs = []string{\"fetch\", remoteName, branchReference}\n\t}\n\tcmd := utils.GetGitClient(repoPath, args)\n\tcmdStr, cmdErr := cmd.Output()\n\n\tif cmdErr != nil {\n\t\tlogger.Log(fmt.Sprintf(\"Fetch failed -> %s\", cmdErr.Error()), global.StatusError)\n\n\t\treturn &model.FetchResult{\n\t\t\tStatus: global.FetchFromRemoteError,\n\t\t\tFetchedItems: nil,\n\t\t}\n\t} else {\n\t\tlogger.Log(fmt.Sprintf(\"Changes fetched from remote - %s\\n%s\", remoteName, cmdStr), global.StatusInfo)\n\n\t\tmsg := fmt.Sprintf(\"Changes fetched from remote %v\", remoteName)\n\t\treturn &model.FetchResult{\n\t\t\tStatus: \"CHANGES FETCHED FROM REMOTE\",\n\t\t\tFetchedItems: []*string{&msg},\n\t\t}\n\t}\n}", "func fetchFile(filename string, url string, token string) (err error) {\n\tfmt.Printf(\"fetching file name=%s, url=%s\\n\", filename, url)\n\tlocalfile, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer localfile.Close()\n\n\tvar user *httpclient.Auth\n\tif token != \"\" {\n\t\tuser = httpclient.GetUserByTokenAuth(token)\n\t}\n\n\t//download file from Shock\n\tres, err := httpclient.Get(url, httpclient.Header{}, nil, user)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != 200 { //err in fetching data\n\t\tresbody, _ := ioutil.ReadAll(res.Body)\n\t\tmsg := fmt.Sprintf(\"op=fetchFile, url=%s, res=%s\", url, resbody)\n\t\treturn errors.New(msg)\n\t}\n\n\t_, err = io.Copy(localfile, res.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn\n}", "func (s *Client) Image(fileID string, page int) (file []byte, err error) {\n\tif page <= 0 {\n\t\tpage = 1\n\t}\n\tqueryParam := fmt.Sprintf(\"?page=%d\", page)\n\turl := strings.Join([]string{s.config.apiBaseURL, \"/result/image/\", fileID, queryParam}, \"\")\n\n\tlog.Printf(\"get image url %s\", url)\n\treq, err := http.NewRequest(\"GET\", url, strings.NewReader(\"\"))\n\tif err != nil {\n\t\treturn\n\t}\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", strings.Join([]string{\"Bearer \", s.getToken()}, \"\"))\n\n\tres, err := s.httpClient.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\tfile, err = ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func (s *SharemeService) ImageURL(c *gae.Context, key string) string {\n\tobj := s.Get(c, key)\n\tif bb, ok := obj.(*BlobBinary); ok && strings.HasPrefix(bb.MimeType(), \"image\") {\n\t\tif url, _, err := imgurl.UrlifyR(bb, bb.MimeType(), 0, 0); err == nil {\n\t\t\treturn url\n\t\t} else {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn \"\"\n}", "func (re *registryV1Endpoint) FetchLayers(img ImageRef, dest string) ([]string, error) {\n\temptySet := []string{}\n\tif _, ok := re.tokens[img.Name()]; !ok {\n\t\tif _, err := re.Token(img); err != nil {\n\t\t\treturn emptySet, err\n\t\t}\n\t}\n\tif img.ID() == \"\" {\n\t\tif _, err := re.ImageID(img); err != nil {\n\t\t\treturn emptySet, err\n\t\t}\n\t}\n\tif len(img.Ancestry()) == 0 {\n\t\tif _, err := re.Ancestry(img); err != nil {\n\t\t\treturn emptySet, err\n\t\t}\n\t}\n\n\tendpoint := re.host\n\tif len(re.endpoints) > 0 {\n\t\tendpoint = re.endpoints[0]\n\t}\n\tfor _, id := range img.Ancestry() {\n\t\tlogrus.Debugf(\"Fetching layer %s\", id)\n\t\tif err := os.MkdirAll(path.Join(dest, id), 0755); err != nil {\n\t\t\treturn emptySet, err\n\t\t}\n\t\t// get the json file first\n\t\terr := func() error {\n\t\t\turl := fmt.Sprintf(\"https://%s/v1/images/%s/json\", endpoint, id)\n\t\t\treq, err := http.NewRequest(\"GET\", url, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"Token %s\", re.tokens[img.Name()]))\n\n\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"Get(%q) returned %q\", url, resp.Status)\n\t\t\t}\n\n\t\t\t//logrus.Debugf(\"%#v\", resp)\n\t\t\tfh, err := os.Create(path.Join(dest, id, \"json\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer fh.Close()\n\t\t\tif _, err := io.Copy(fh, resp.Body); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}()\n\t\tif err != nil {\n\t\t\treturn emptySet, err\n\t\t}\n\n\t\t// get the layer file next\n\t\terr = func() error {\n\t\t\turl := fmt.Sprintf(\"https://%s/v1/images/%s/layer\", endpoint, id)\n\t\t\treq, err := http.NewRequest(\"GET\", url, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlogrus.Debugf(\"%q\", fmt.Sprintf(\"Token %s\", re.tokens[img.Name()]))\n\t\t\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"Token %s\", re.tokens[img.Name()]))\n\n\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"Get(%q) returned %q\", url, resp.Status)\n\t\t\t}\n\n\t\t\tlogrus.Debugf(\"[FetchLayers] ended up at %q\", resp.Request.URL.String())\n\t\t\tlogrus.Debugf(\"[FetchLayers] response %#v\", resp)\n\t\t\tfh, err := os.Create(path.Join(dest, id, \"layer.tar\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer fh.Close()\n\t\t\tif _, err := io.Copy(fh, resp.Body); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}()\n\t\tif err != nil {\n\t\t\treturn emptySet, err\n\t\t}\n\t}\n\n\treturn img.Ancestry(), nil\n}", "func (c *TestClient) GetImage(project, name string) (*compute.Image, error) {\n\tif c.GetImageFn != nil {\n\t\treturn c.GetImageFn(project, name)\n\t}\n\treturn c.client.GetImage(project, name)\n}", "func (s *Sync) fetchLocalIdx() (*index.Index, error) {\n\treturn index.NewIndexFiles(filepath.Join(s.opts.WorkDir, \"data\"))\n}", "func testImage(tmpl *detect.FeatTmpl, annot inria.Annot, dir string, opts DetectOpts, mininter float64) (*detect.ResultSet, error) {\n\tim, err := loadImage(path.Join(dir, annot.Image))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Get detections.\n\tdets := detectImage(tmpl, im, opts.Margin, opts.PyrStep, opts.HOGBin, opts.LocalMax, opts.MaxIOU)\n\tval := detect.ValidateMatch(dets, annot.Rects, mininter)\n\treturn val, nil\n}", "func (r *RepoRef) IsLocal() bool {\n\treturn r.Path != \"\"\n}", "func _detectRemoteURL_LocalGit(path string) (string, error) {\n\tcmd := exec.Command(\"git\", \"config\", \"--get\", \"remote.origin.url\")\n\toutput, err := cmd.Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(bytes.TrimSpace(output)), nil\n}", "func checkImage(image liferay.Image) {\n\texists := docker.CheckDockerImageExists(image.GetFullyQualifiedName())\n\n\tif exists == false {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"image\": image.GetFullyQualifiedName(),\n\t\t}).Warn(\"Image has NOT been pulled from Docker Hub\")\n\t\treturn\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"image\": image.GetFullyQualifiedName(),\n\t}).Info(\"Image has been pulled from Docker Hub\")\n}", "func (o *PcloudImagesGetallNotFound) IsSuccess() bool {\n\treturn false\n}", "func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Image, retErr error) {\n\tpullCtx := defaultRemoteContext()\n\tfor _, o := range opts {\n\t\tif err := o(c, pullCtx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif pullCtx.PlatformMatcher == nil {\n\t\tif len(pullCtx.Platforms) > 1 {\n\t\t\treturn nil, errors.New(\"cannot pull multiplatform image locally, try Fetch\")\n\t\t} else if len(pullCtx.Platforms) == 0 {\n\t\t\tpullCtx.PlatformMatcher = c.platform\n\t\t} else {\n\t\t\tp, err := platforms.Parse(pullCtx.Platforms[0])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid platform %s: %w\", pullCtx.Platforms[0], err)\n\t\t\t}\n\n\t\t\tpullCtx.PlatformMatcher = platforms.Only(p)\n\t\t}\n\t}\n\n\tctx, done, err := c.WithLease(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer done(ctx)\n\n\tvar unpacks int32\n\tvar unpackEg *errgroup.Group\n\tvar unpackWrapper func(f images.Handler) images.Handler\n\n\tif pullCtx.Unpack {\n\t\t// unpacker only supports schema 2 image, for schema 1 this is noop.\n\t\tu, err := c.newUnpacker(ctx, pullCtx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"create unpacker: %w\", err)\n\t\t}\n\t\tunpackWrapper, unpackEg = u.handlerWrapper(ctx, pullCtx, &unpacks)\n\t\tdefer func() {\n\t\t\tif err := unpackEg.Wait(); err != nil {\n\t\t\t\tif retErr == nil {\n\t\t\t\t\tretErr = fmt.Errorf(\"unpack: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\twrapper := pullCtx.HandlerWrapper\n\t\tpullCtx.HandlerWrapper = func(h images.Handler) images.Handler {\n\t\t\tif wrapper == nil {\n\t\t\t\treturn unpackWrapper(h)\n\t\t\t}\n\t\t\treturn unpackWrapper(wrapper(h))\n\t\t}\n\t}\n\n\timg, err := c.fetch(ctx, pullCtx, ref, 1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// NOTE(fuweid): unpacker defers blobs download. before create image\n\t// record in ImageService, should wait for unpacking(including blobs\n\t// download).\n\tif pullCtx.Unpack {\n\t\tif unpackEg != nil {\n\t\t\tif err := unpackEg.Wait(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\timg, err = c.createNewImage(ctx, img)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ti := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher)\n\n\tif pullCtx.Unpack {\n\t\tif unpacks == 0 {\n\t\t\t// Try to unpack is none is done previously.\n\t\t\t// This is at least required for schema 1 image.\n\t\t\tif err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to unpack image on snapshotter %s: %w\", pullCtx.Snapshotter, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn i, nil\n}", "func (imageService Service) ImageStatus(Id string) (Status string, err error) {\n\n\n\turl := strings.TrimSuffix(imageService.URL, \"/\") + \"/images/\" + Id\n\tvar headers http.Header\n\theaders, err = misc.GetHeader(url, imageService.TokenID, imageService.Client)\n\tif err != nil {\n\t\treturn \"\", err\n\t} else {\n\t\tfor header, value := range headers {\n\t\t\t//log.Printf (\"header '%s'='%s'\", header, value[0])\n\t\t\tif strings.ToLower(header) == \"x-image-meta-status\" {\n\t\t\t\tStatus = value[0]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn Status, nil\n}", "func (o *LocalDatabaseProvider) GetBlockLocalDnsRequestsOk() (*bool, bool) {\n\tif o == nil || o.BlockLocalDnsRequests == nil {\n\t\treturn nil, false\n\t}\n\treturn o.BlockLocalDnsRequests, true\n}", "func (r *ReconcileCapability) fetch(err error) (reconcile.Result, error) {\n\tif errors.IsNotFound(err) {\n\t\t// Return and don't create\n\t\tr.reqLogger.Info(\"component resource not found. Ignoring since object must be deleted\")\n\t\treturn reconcile.Result{}, nil\n\t}\n\t// Error reading the object - create the request.\n\tr.reqLogger.Error(err, \"Failed to get Component\")\n\treturn reconcile.Result{}, err\n}", "func (j *DSGitHub) CustomFetchRaw() bool {\n\treturn false\n}", "func (o *ProdutoVM) GetRefImagemOk() (*int64, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RefImagem.Get(), o.RefImagem.IsSet()\n}", "func findImage(n *hetznerNodeGroup, serverType *hcloud.ServerType) (*hcloud.Image, error) {\n\t// Select correct image based on server type architecture\n\timage, _, err := n.manager.client.Image.GetForArchitecture(context.TODO(), n.manager.image, serverType.Architecture)\n\tif err != nil {\n\t\t// Keep looking for label if image was not found by id or name\n\t\tif !strings.HasPrefix(err.Error(), \"image not found\") {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif image != nil {\n\t\treturn image, nil\n\t}\n\n\t// Look for snapshot with label\n\timages, err := n.manager.client.Image.AllWithOpts(context.TODO(), hcloud.ImageListOpts{\n\t\tType: []hcloud.ImageType{hcloud.ImageTypeSnapshot},\n\t\tStatus: []hcloud.ImageStatus{hcloud.ImageStatusAvailable},\n\t\tSort: []string{\"created:desc\"},\n\t\tArchitecture: []hcloud.Architecture{serverType.Architecture},\n\t\tListOpts: hcloud.ListOpts{\n\t\t\tLabelSelector: n.manager.image,\n\t\t},\n\t})\n\n\tif err != nil || len(images) == 0 {\n\t\treturn nil, fmt.Errorf(\"unable to find image %s with architecture %s: %v\", n.manager.image, serverType.Architecture, err)\n\t}\n\n\treturn images[0], nil\n}", "func PullImage(image, cacheDir string) (v1.Image, error) {\n var options []crane.Option\n\n // options = append(options, crane.Insecure)\n\n // Use current built OS and architecture\n options = append(options, crane.WithPlatform(&v1.Platform{\n OS: runtime.GOOS,\n Architecture: runtime.GOARCH,\n }))\n\n // Grab the remote manifest\n manifest, err := crane.Manifest(image, options...)\n if err != nil {\n return nil, fmt.Errorf(\"failed fetching manifest for %s: %v\", image, err)\n }\n\n if !gjson.Valid(string(manifest)) {\n return nil, fmt.Errorf(\"Cannot parse manifest: %s\", string(manifest))\n }\n\n value := gjson.Get(string(manifest), \"config.digest\").Value().(string)\n if value == \"\" {\n return nil, fmt.Errorf(\"Malformed manifest: %s\", string(manifest))\n }\n \n digest := strings.Split(value, \":\")[1]\n tarball := fmt.Sprintf(\"%s/%s.tar.gz\", cacheDir, digest)\n\n // Download the tarball of the image if not available in the cache\n if _, err := os.Stat(tarball); os.IsNotExist(err) {\n // Create the cacheDir if it does not already exist\n if cacheDir != \"\" {\n if _, err := os.Stat(cacheDir); os.IsNotExist(err) {\n os.MkdirAll(cacheDir, os.ModePerm)\n }\n }\n \n // Pull the image\n img, err := crane.Pull(image, options...)\n if err != nil {\n return nil, fmt.Errorf(\"Could not pull image: %s\", err)\n }\n \n f, err := os.Create(tarball)\n if err != nil {\n return nil, fmt.Errorf(\"Failed to open %s: %v\", tarball, err)\n }\n \n defer f.Close()\n \n err = crane.Save(img, image, tarball)\n if err != nil {\n return nil, fmt.Errorf(\"Could not save image: %s\", err)\n }\n }\n\n img, err := crane.Load(tarball)\n if err != nil {\n return nil, fmt.Errorf(\"Could not load image: %s\", err)\n }\n\n return img, nil\n}", "func (c *APODClient) FetchImageURLs(count int) ([]string, error) {\n\tvar urls []string\n\tdate := time.Now()\n\n\t// make the request\n\tfor i := 0; len(urls) < count; i++ {\n\t\tdate = date.AddDate(0, 0, -i)\n\t\tresp, err := http.Get(c.buildURL(date))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error fetching data from APOD API\")\n\t\t}\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\treturn nil, fmt.Errorf(\"Received non-200 status code %d\", resp.StatusCode)\n\t\t}\n\n\t\t// parse the response\n\t\tvar imageMeta APODImageMeta\n\t\tif err := json.NewDecoder(resp.Body).Decode(&imageMeta); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error parsing API response\")\n\t\t}\n\n\t\tif imageMeta.MediaType != APODTypeImage {\n\t\t\t// we only want images\n\t\t\tcontinue\n\t\t}\n\t\turls = append(urls, imageMeta.URL)\n\t}\n\n\treturn urls, nil\n}", "func (j *DSRocketchat) CustomFetchRaw() bool {\n\treturn false\n}", "func (o *ContainerSpec) GetImageOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Image, true\n}", "func smartFetch(cl *client.Client, targ string, br *blobref.BlobRef) error {\n\tif *flagVerbose {\n\t\tlog.Printf(\"Fetching %v into %q\", br, targ)\n\t}\n\n\trc, err := fetch(cl, br)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rc.Close()\n\n\tsniffer := new(index.BlobSniffer)\n\t_, err = io.CopyN(sniffer, rc, sniffSize)\n\tif err != nil && err != io.EOF {\n\t\treturn err\n\t}\n\n\tsniffer.Parse()\n\tsc, ok := sniffer.Superset()\n\n\tif !ok {\n\t\t// opaque data - put it in a file\n\t\tf, err := os.Create(targ)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"opaque: %v\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tbody, _ := sniffer.Body()\n\t\tr := io.MultiReader(bytes.NewBuffer(body), rc)\n\t\t_, err = io.Copy(f, r)\n\t\treturn err\n\t}\n\n\tsc.BlobRef = br\n\n\tswitch sc.Type {\n\tcase \"directory\":\n\t\tdir := filepath.Join(targ, sc.FileName)\n\t\tif err := os.MkdirAll(dir, sc.FileMode()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := setFileMeta(dir, sc); err != nil {\n\t\t\tlog.Print(err)\n\t\t}\n\t\tentries := blobref.Parse(sc.Entries)\n\t\tif entries == nil {\n\t\t\treturn fmt.Errorf(\"bad entries blobref: %v\", sc.Entries)\n\t\t}\n\t\treturn smartFetch(cl, dir, entries)\n\tcase \"static-set\":\n\t\t// directory entries\n\t\tfor _, m := range sc.Members {\n\t\t\tdref := blobref.Parse(m)\n\t\t\tif dref == nil {\n\t\t\t\treturn fmt.Errorf(\"bad member blobref: %v\", m)\n\t\t\t}\n\t\t\tif err := smartFetch(cl, targ, dref); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\tcase \"file\":\n\t\tname := filepath.Join(targ, sc.FileName)\n\t\tf, err := os.Create(name)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"file type: %v\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tseekFetcher := blobref.SeekerFromStreamingFetcher(cl)\n\t\tfr, err := schema.NewFileReader(seekFetcher, br)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"NewFileReader: %v\", err)\n\t\t}\n\t\tdefer fr.Close()\n\n\t\tif err := setFileMeta(name, sc); err != nil {\n\t\t\tlog.Print(err)\n\t\t}\n\t\treturn nil\n\tdefault:\n\t\treturn errors.New(\"unknown blob type: \" + sc.Type)\n\t}\n\tpanic(\"unreachable\")\n}", "func smartFetch(cl *client.Client, targ string, br *blobref.BlobRef) error {\n\tif *flagVerbose {\n\t\tlog.Printf(\"Fetching %v into %q\", br, targ)\n\t}\n\n\trc, err := fetch(cl, br)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rc.Close()\n\n\tsniffer := new(index.BlobSniffer)\n\t_, err = io.CopyN(sniffer, rc, sniffSize)\n\tif err != nil && err != io.EOF {\n\t\treturn err\n\t}\n\n\tsniffer.Parse()\n\tsc, ok := sniffer.Superset()\n\n\tif !ok {\n\t\t// opaque data - put it in a file\n\t\tf, err := os.Create(targ)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"opaque: %v\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tbody, _ := sniffer.Body()\n\t\tr := io.MultiReader(bytes.NewBuffer(body), rc)\n\t\t_, err = io.Copy(f, r)\n\t\treturn err\n\t}\n\n\tsc.BlobRef = br\n\n\tswitch sc.Type {\n\tcase \"directory\":\n\t\tdir := filepath.Join(targ, sc.FileName)\n\t\tif err := os.MkdirAll(dir, sc.FileMode()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := setFileMeta(dir, sc); err != nil {\n\t\t\tlog.Print(err)\n\t\t}\n\t\tentries := blobref.Parse(sc.Entries)\n\t\tif entries == nil {\n\t\t\treturn fmt.Errorf(\"bad entries blobref: %v\", sc.Entries)\n\t\t}\n\t\treturn smartFetch(cl, dir, entries)\n\tcase \"static-set\":\n\t\t// directory entries\n\t\tfor _, m := range sc.Members {\n\t\t\tdref := blobref.Parse(m)\n\t\t\tif dref == nil {\n\t\t\t\treturn fmt.Errorf(\"bad member blobref: %v\", m)\n\t\t\t}\n\t\t\tif err := smartFetch(cl, targ, dref); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\tcase \"file\":\n\t\tname := filepath.Join(targ, sc.FileName)\n\t\tf, err := os.Create(name)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"file type: %v\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tfor _, p := range sc.Parts {\n\t\t\tif p.BytesRef != nil {\n\t\t\t\tpanic(\"don't know how to handle BytesRef\")\n\t\t\t}\n\t\t\trc, err := fetch(cl, p.BlobRef)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t_, err = io.Copy(f, rc)\n\t\t\trc.Close()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif err := setFileMeta(name, sc); err != nil {\n\t\t\tlog.Print(err)\n\t\t}\n\t\treturn nil\n\tdefault:\n\t\treturn errors.New(\"unknown blob type: \" + sc.Type)\n\t}\n\tpanic(\"unreachable\")\n}", "func (i *interactor) Fetch(arg ...string) error {\n\tremote, err := i.remote()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not resolve remote for fetching: %w\", err)\n\t}\n\targ = append([]string{\"fetch\", remote}, arg...)\n\ti.logger.Infof(\"Fetching from %s\", remote)\n\tif out, err := i.executor.Run(arg...); err != nil {\n\t\treturn fmt.Errorf(\"error fetching: %w %v\", err, string(out))\n\t}\n\treturn nil\n}", "func Fetch(FS billy.Filesystem, remote, owner, repo, tag string, private bool) error {\n\tsrcRepo := path.Join(owner, repo)\n\tgco := &gogit.CloneOptions{\n\t\tURL: fmt.Sprintf(\"https://%s/%s\", remote, srcRepo),\n\t\tDepth: 1,\n\t}\n\n\tif tag != \"v0.0.0\" {\n\t\tgco.ReferenceName = plumbing.NewTagReferenceName(tag)\n\t\tgco.SingleBranch = true\n\t}\n\n\tif private {\n\t\tif netrc, err := yagu.NetrcCredentials(remote); err == nil {\n\t\t\tgco.Auth = &http.BasicAuth{\n\t\t\t\tUsername: netrc.Login,\n\t\t\t\tPassword: netrc.Password,\n\t\t\t}\n\t\t} else if ssh, err := yagu.SSHCredentials(remote); err == nil {\n\t\t\tgco.Auth = ssh.Keys\n\t\t\tgco.URL = fmt.Sprintf(\"%s@%s:%s\", ssh.User, remote, srcRepo)\n\t\t} else {\n\t\t\tgco.URL = fmt.Sprintf(\"%s@%s:%s\", \"git\", remote, srcRepo)\n\t\t}\n\t}\n\n\t_, err := gogit.Clone(memory.NewStorage(), FS, gco)\n\n\treturn err\n}" ]
[ "0.7089123", "0.64089054", "0.64003843", "0.6323396", "0.6199627", "0.6125763", "0.6094584", "0.60674024", "0.602652", "0.5980229", "0.59721696", "0.58975357", "0.5887362", "0.57745016", "0.5712181", "0.56382984", "0.558818", "0.5585266", "0.550621", "0.54520273", "0.54474455", "0.54394317", "0.54382557", "0.5313265", "0.5286928", "0.5203312", "0.5154441", "0.5146828", "0.51185244", "0.5116811", "0.50926286", "0.50919956", "0.50912285", "0.50710213", "0.5045024", "0.503971", "0.50330293", "0.5005387", "0.50018615", "0.49938852", "0.49904042", "0.49397087", "0.49134502", "0.49050346", "0.489568", "0.48900023", "0.4889449", "0.4866684", "0.48653093", "0.48523796", "0.48415455", "0.48258117", "0.48192674", "0.48178482", "0.4804687", "0.47988665", "0.47945318", "0.47890183", "0.47743583", "0.47672516", "0.4765899", "0.47565556", "0.47540784", "0.47437695", "0.4742359", "0.47392884", "0.47201508", "0.4706801", "0.47025597", "0.47024268", "0.46991894", "0.4692577", "0.46860442", "0.46666116", "0.4660846", "0.46603003", "0.46579507", "0.46573016", "0.4649596", "0.46400008", "0.46383557", "0.4636398", "0.4627746", "0.46195924", "0.46162453", "0.46137974", "0.4610479", "0.46089593", "0.4603661", "0.46033764", "0.46017167", "0.4594462", "0.4592701", "0.458814", "0.45870602", "0.45830265", "0.45826733", "0.45826733", "0.45790595", "0.45780078" ]
0.67842525
1
FetchRemoteImage mocks base method
func (m *MockFetcher) FetchRemoteImage(arg0 string) (image.Image, error) { ret := m.ctrl.Call(m, "FetchRemoteImage", arg0) ret0, _ := ret[0].(image.Image) ret1, _ := ret[1].(error) return ret0, ret1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockFetcher) FetchLocalImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchLocalImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockFetcher) FetchUpdatedLocalImage(arg0 context.Context, arg1 string, arg2 io.Writer) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchUpdatedLocalImage\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (f *Frontend) fetchImage(i *img.Image) (*img.Image, error) {\n\tvar err error\n\n\t// go through image proxy to resize and cache the image\n\tkey := hmacKey(i.ID)\n\tu := fmt.Sprintf(\"%v/image/225x,s%v/%v\", f.Host, key, i.ID)\n\tfmt.Println(u)\n\n\tresp, err := f.Images.Client.Get(u)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbdy, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\ti.Base64 = base64.StdEncoding.EncodeToString(bdy)\n\treturn i, err\n}", "func TestBaseImage(t *testing.T) {\n\tctx, err := controllerPrepare()\n\tif err != nil {\n\t\tt.Fatal(\"Fail in controller prepare: \", err)\n\t}\n\teveBaseRef := os.Getenv(\"EVE_BASE_REF\")\n\tif len(eveBaseRef) == 0 {\n\t\teveBaseRef = \"4.10.0\"\n\t}\n\tzArch := os.Getenv(\"ZARCH\")\n\tif len(eveBaseRef) == 0 {\n\t\tzArch = \"amd64\"\n\t}\n\tHV := os.Getenv(\"HV\")\n\tif HV == \"xen\" {\n\t\tHV = \"\"\n\t}\n\tvar baseImageTests = []struct {\n\t\tdataStoreID string\n\t\timageID string\n\t\tbaseID string\n\t\timageRelativePath string\n\t\timageFormat config.Format\n\t\teveBaseRef string\n\t\tzArch string\n\t\tHV string\n\t}{\n\t\t{eServerDataStoreID,\n\n\t\t\t\"1ab8761b-5f89-4e0b-b757-4b87a9fa93ec\",\n\n\t\t\t\"22b8761b-5f89-4e0b-b757-4b87a9fa93ec\",\n\n\t\t\t\"baseos.qcow2\",\n\t\t\tconfig.Format_QCOW2,\n\t\t\teveBaseRef,\n\t\t\tzArch,\n\t\t\tHV,\n\t\t},\n\t}\n\tfor _, tt := range baseImageTests {\n\t\tbaseOSVersion := fmt.Sprintf(\"%s-%s\", tt.eveBaseRef, tt.zArch)\n\t\tif tt.HV != \"\" {\n\t\t\tbaseOSVersion = fmt.Sprintf(\"%s-%s-%s\", tt.eveBaseRef, tt.zArch, tt.HV)\n\t\t}\n\t\tt.Run(baseOSVersion, func(t *testing.T) {\n\n\t\t\terr = prepareBaseImageLocal(ctx, tt.dataStoreID, tt.imageID, tt.baseID, tt.imageRelativePath, tt.imageFormat, baseOSVersion)\n\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in prepare base image from local file: \", err)\n\t\t\t}\n\t\t\tdeviceCtx, err := ctx.GetDeviceFirst()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in get first device: \", err)\n\t\t\t}\n\t\t\tdeviceCtx.SetBaseOSConfig([]string{tt.baseID})\n\t\t\tdevUUID := deviceCtx.GetID()\n\t\t\terr = ctx.ConfigSync(deviceCtx)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in sync config with controller: \", err)\n\t\t\t}\n\t\t\tt.Run(\"Started\", func(t *testing.T) {\n\t\t\t\terr := ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion}, einfo.ZInfoDevSW, 300)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image update init: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\tt.Run(\"Downloaded\", func(t *testing.T) {\n\t\t\t\terr := ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion, \"downloadProgress\": \"100\"}, einfo.ZInfoDevSW, 1500)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image download progress: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\tt.Run(\"Logs\", func(t *testing.T) {\n\t\t\t\tif !checkLogs {\n\t\t\t\t\tt.Skip(\"no LOGS flag set - skipped\")\n\t\t\t\t}\n\t\t\t\terr = ctx.LogChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"eveVersion\": baseOSVersion}, 1200)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image logs: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\ttimeout := time.Duration(1200)\n\n\t\t\tif !checkLogs {\n\t\t\t\ttimeout = 2400\n\t\t\t}\n\t\t\tt.Run(\"Active\", func(t *testing.T) {\n\t\t\t\terr = ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion, \"status\": \"INSTALLED\", \"partitionState\": \"(inprogress|active)\"}, einfo.ZInfoDevSW, timeout)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image installed status: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n\n}", "func TestFetch(t *testing.T) {\n\timage := \"rkt-inspect-fetch.aci\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// Fetch the image for the first time, this should write the image to the\n\t// on-disk store.\n\toldHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file\"}, t, ctx)\n\n\t// Fetch the image with the same name but different content, the expecting\n\t// result is that we should get a different hash since we are not fetching\n\t// from the on-disk store.\n\tnewHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file --write-file\"}, t, ctx)\n\n\tif oldHash == newHash {\n\t\tt.Fatalf(\"ACI hash should be different as the image has changed\")\n\t}\n}", "func (m *MockCEImpl) ImagePull(image string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImagePull\", image)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func ttrPullImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\terr := ttrPullImage(ctx, client, image)\n\tassert.NilError(t, err)\n}", "func getRemoteImage(url string) ([]byte, error) {\n\tvar image []byte\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn image, err\n\t}\n\tdefer resp.Body.Close()\n\n\timage, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn image, err\n\t}\n\n\treturn image, nil\n}", "func (m *MockRequester) Fetch(url string) (io.ReadCloser, error) {\n\tret := m.ctrl.Call(m, \"Fetch\", url)\n\tret0, _ := ret[0].(io.ReadCloser)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (c *dockerClientMock) DownloadImage(imageSource, filePath string) (v1.Image, error) {\n\treturn &fake.FakeImage{}, nil // fmt.Errorf(\"%s\", filePath)\n}", "func TestImplicitFetch(t *testing.T) {\n\tfoundMsg := \"found image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// 1. Fetch the image.\n\t// TODO(yifan): Add other ACI with different schemes.\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:ubuntu-12.04\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 2. Try run/prepare with/without tag ':latest', should not get $foundMsg.\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err == nil {\n\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func TestRemote(t *testing.T) {\n\trnd, err := random.Image(1024, 3)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ts, err := registry.TLS(\"gcr.io\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttr := s.Client().Transport\n\n\tsrc := \"gcr.io/test/compressed\"\n\tref, err := name.ParseReference(src)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := remote.Write(ref, rnd, remote.WithTransport(tr)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\timg, err := remote.Image(ref, remote.WithTransport(tr))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := validate.Image(img); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcf, err := img.ConfigFile()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tm, err := img.Manifest()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tlayer, err := img.LayerByDiffID(cf.RootFS.DiffIDs[0])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\td, err := layer.Digest()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif diff := cmp.Diff(d, m.Layers[0].Digest); diff != \"\" {\n\t\tt.Errorf(\"mismatched digest: %v\", diff)\n\t}\n}", "func (c *dockerClientMock) GetRemoteImageInfo(imageSoure string) (v1.Image, error) {\n\treturn &fake.FakeImage{}, nil\n}", "func (f *fetcher) fetchImage(img string, asc string, discover bool) (string, error) {\n\tif f.withDeps && !discover {\n\t\treturn \"\", fmt.Errorf(\"cannot fetch image's dependencies with discovery disabled\")\n\t}\n\thash, err := f.fetchSingleImage(img, asc, discover)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif f.withDeps {\n\t\terr = f.fetchImageDeps(hash)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn hash, nil\n}", "func (f OriginalFetcher) Fetch(namespace string, sourceURL string, imageHash string) (info *info.ImageProperties, downloaded bool, err error) {\n\tif sourceURL == \"\" && imageHash == \"\" {\n\t\treturn nil, false, fmt.Errorf(\"Missing Hash & URL\")\n\t}\n\n\tif imageHash != \"\" {\n\t\tinfo, err = f.fetchFromStore(namespace, imageHash)\n\t}\n\n\tif sourceURL != \"\" && (err != nil || imageHash == \"\") {\n\t\tinfo, downloaded, err = f.fetchFromSource(namespace, sourceURL)\n\t}\n\n\treturn info, downloaded, err\n}", "func (f *fetcher) fetchImageFrom(appName string, aciURL, ascURL, scheme string, ascFile *os.File, latest bool) (string, error) {\n\tvar rem *store.Remote\n\n\tif f.insecureSkipVerify {\n\t\tif f.ks != nil {\n\t\t\tstderr(\"rkt: warning: TLS verification and signature verification has been disabled\")\n\t\t}\n\t} else if scheme == \"docker\" {\n\t\treturn \"\", fmt.Errorf(\"signature verification for docker images is not supported (try --insecure-skip-verify)\")\n\t}\n\n\tif (f.local && scheme != \"file\") || (scheme != \"file\" && !latest) {\n\t\tvar err error\n\t\tok := false\n\t\trem, ok, err = f.s.GetRemote(aciURL)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif ok {\n\t\t\tif f.local {\n\t\t\t\tstderr(\"rkt: using image in local store for app %s\", appName)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t\tif useCached(rem.DownloadTime, rem.CacheMaxAge) {\n\t\t\t\tstderr(\"rkt: found image in local store, skipping fetching from %s\", aciURL)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t}\n\t\tif f.local {\n\t\t\treturn \"\", fmt.Errorf(\"url %s not available in local store\", aciURL)\n\t\t}\n\t}\n\n\tif scheme != \"file\" && f.debug {\n\t\tstderr(\"rkt: fetching image from %s\", aciURL)\n\t}\n\n\tvar etag string\n\tif rem != nil {\n\t\tetag = rem.ETag\n\t}\n\tentity, aciFile, cd, err := f.fetch(appName, aciURL, ascURL, ascFile, etag)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif cd != nil && cd.useCached {\n\t\tif rem != nil {\n\t\t\treturn rem.BlobKey, nil\n\t\t} else {\n\t\t\t// should never happen\n\t\t\tpanic(\"asked to use cached image but remote is nil\")\n\t\t}\n\t}\n\tif scheme != \"file\" {\n\t\tdefer os.Remove(aciFile.Name())\n\t}\n\n\tif entity != nil && !f.insecureSkipVerify {\n\t\tstderr(\"rkt: signature verified:\")\n\t\tfor _, v := range entity.Identities {\n\t\t\tstderr(\" %s\", v.Name)\n\t\t}\n\t}\n\tkey, err := f.s.WriteACI(aciFile, latest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif scheme != \"file\" {\n\t\trem := store.NewRemote(aciURL, ascURL)\n\t\trem.BlobKey = key\n\t\trem.DownloadTime = time.Now()\n\t\tif cd != nil {\n\t\t\trem.ETag = cd.etag\n\t\t\trem.CacheMaxAge = cd.maxAge\n\t\t}\n\t\terr = f.s.WriteRemote(rem)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn key, nil\n}", "func (m *MockHandler) GetLatestOsImage(arg0 string) (*models.OsImage, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetLatestOsImage\", arg0)\n\tret0, _ := ret[0].(*models.OsImage)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockModuleService) GetLatestModuleImage(arg0 string) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetLatestModuleImage\", arg0)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func PullImage(image, cacheDir string) (v1.Image, error) {\n var options []crane.Option\n\n // options = append(options, crane.Insecure)\n\n // Use current built OS and architecture\n options = append(options, crane.WithPlatform(&v1.Platform{\n OS: runtime.GOOS,\n Architecture: runtime.GOARCH,\n }))\n\n // Grab the remote manifest\n manifest, err := crane.Manifest(image, options...)\n if err != nil {\n return nil, fmt.Errorf(\"failed fetching manifest for %s: %v\", image, err)\n }\n\n if !gjson.Valid(string(manifest)) {\n return nil, fmt.Errorf(\"Cannot parse manifest: %s\", string(manifest))\n }\n\n value := gjson.Get(string(manifest), \"config.digest\").Value().(string)\n if value == \"\" {\n return nil, fmt.Errorf(\"Malformed manifest: %s\", string(manifest))\n }\n \n digest := strings.Split(value, \":\")[1]\n tarball := fmt.Sprintf(\"%s/%s.tar.gz\", cacheDir, digest)\n\n // Download the tarball of the image if not available in the cache\n if _, err := os.Stat(tarball); os.IsNotExist(err) {\n // Create the cacheDir if it does not already exist\n if cacheDir != \"\" {\n if _, err := os.Stat(cacheDir); os.IsNotExist(err) {\n os.MkdirAll(cacheDir, os.ModePerm)\n }\n }\n \n // Pull the image\n img, err := crane.Pull(image, options...)\n if err != nil {\n return nil, fmt.Errorf(\"Could not pull image: %s\", err)\n }\n \n f, err := os.Create(tarball)\n if err != nil {\n return nil, fmt.Errorf(\"Failed to open %s: %v\", tarball, err)\n }\n \n defer f.Close()\n \n err = crane.Save(img, image, tarball)\n if err != nil {\n return nil, fmt.Errorf(\"Could not save image: %s\", err)\n }\n }\n\n img, err := crane.Load(tarball)\n if err != nil {\n return nil, fmt.Errorf(\"Could not load image: %s\", err)\n }\n\n return img, nil\n}", "func (m *MockHandler) GetOsImage(arg0, arg1 string) (*models.OsImage, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOsImage\", arg0, arg1)\n\tret0, _ := ret[0].(*models.OsImage)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (b *ecrBase) runGetImage(ctx context.Context, batchGetImageInput ecr.BatchGetImageInput) (*ecr.Image, error) {\n\t// Allow only a single image to be fetched at a time.\n\tif len(batchGetImageInput.ImageIds) != 1 {\n\t\treturn nil, errGetImageUnhandled\n\t}\n\n\tbatchGetImageInput.RegistryId = aws.String(b.ecrSpec.Registry())\n\tbatchGetImageInput.RepositoryName = aws.String(b.ecrSpec.Repository)\n\n\tlog.G(ctx).WithField(\"batchGetImageInput\", batchGetImageInput).Trace(\"ecr.base.image: requesting images\")\n\n\tbatchGetImageOutput, err := b.client.BatchGetImageWithContext(ctx, &batchGetImageInput)\n\tif err != nil {\n\t\tlog.G(ctx).WithError(err).Error(\"ecr.base.image: failed to get image\")\n\t\treturn nil, err\n\t}\n\tlog.G(ctx).WithField(\"batchGetImageOutput\", batchGetImageOutput).Trace(\"ecr.base.image: api response\")\n\n\t// Summarize image request failures for handled errors. Only the first\n\t// failure is checked as only a single ImageIdentifier is allowed to be\n\t// queried for.\n\tif len(batchGetImageOutput.Failures) > 0 {\n\t\tfailure := batchGetImageOutput.Failures[0]\n\t\tswitch aws.StringValue(failure.FailureCode) {\n\t\t// Requested image with a corresponding tag and digest does not exist.\n\t\t// This failure will generally occur when pushing an updated (or new)\n\t\t// image with a tag.\n\t\tcase ecr.ImageFailureCodeImageTagDoesNotMatchDigest:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Debug(\"ecr.base.image: no matching image with specified digest\")\n\t\t\treturn nil, errImageNotFound\n\t\t// Requested image doesn't resolve to a known image. A new image will\n\t\t// result in an ImageNotFound error when checked before push.\n\t\tcase ecr.ImageFailureCodeImageNotFound:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Debug(\"ecr.base.image: no image found\")\n\t\t\treturn nil, errImageNotFound\n\t\t// Requested image identifiers are invalid.\n\t\tcase ecr.ImageFailureCodeInvalidImageDigest, ecr.ImageFailureCodeInvalidImageTag:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Error(\"ecr.base.image: invalid image identifier\")\n\t\t\treturn nil, reference.ErrInvalid\n\t\t// Unhandled failure reported for image request made.\n\t\tdefault:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Warn(\"ecr.base.image: unhandled image request failure\")\n\t\t\treturn nil, errGetImageUnhandled\n\t\t}\n\t}\n\n\treturn batchGetImageOutput.Images[0], nil\n}", "func TestExpectedImgRef(t *testing.T) {\n\n\tv, isSet := os.LookupEnv(\"DOCKERHUB_PROXY\")\n\tif isSet {\n\t\tdefer os.Setenv(\"DOCKERHUB_PROXY\", v)\n\t}\n\n\tos.Unsetenv(\"DOCKERHUB_PROXY\")\n\tassert.Equal(t,\n\t\t\"index.docker.io/library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\",\n\t\tCompleteImageRef(\"library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\"))\n\n\tos.Setenv(\"DOCKERHUB_PROXY\", \"my-dockerhub-proxy.tld/dockerhub-proxy\")\n\tassert.Equal(t,\n\t\t\"my-dockerhub-proxy.tld/dockerhub-proxy/library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\",\n\t\tCompleteImageRef(\"library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\"))\n\tos.Unsetenv(\"DOCKERHUB_PROXY\")\n}", "func (mc *MockContainer) ImagePull() error {\n\treturn mc.MockImagePull()\n}", "func testDownloadImages(ctx context.Context, t *testing.T, downloadCh chan<- downloadRequest, addr, ccvmDir string) {\n\twkld := &workload{\n\t\tspec: workloadSpec{\n\t\t\tBaseImageURL: \"http://\" + addr + \"/download/image\",\n\t\t\tBIOS: \"http://\" + addr + \"/download/bios\",\n\t\t},\n\t}\n\n\tresultCh := make(chan interface{})\n\tgo func() {\n\t\timg, bios, err := downloadImages(ctx, wkld, http.DefaultTransport.(*http.Transport),\n\t\t\tresultCh, downloadCh)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to download images: %v\", err)\n\t\t}\n\t\tif len(img) == 0 || len(bios) == 0 {\n\t\t\tt.Errorf(\"One the paths is empty img=%s bios=%s\", img, bios)\n\t\t}\n\t\tclose(resultCh)\n\t}()\n\n\tfor range resultCh {\n\t}\n\n\twkld.spec.BIOS = \"ftp://\" + addr + \"/download/bios\"\n\tresultCh = make(chan interface{})\n\tgo func() {\n\t\t_, _, err := downloadImages(ctx, wkld, http.DefaultTransport.(*http.Transport),\n\t\t\tresultCh, downloadCh)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected downloadImages with bad BIOS URL to fail\")\n\t\t}\n\t\tclose(resultCh)\n\t}()\n\n\tfor range resultCh {\n\t}\n}", "func (c MockDockerClient) ImagePull(ctx context.Context, imageName string) error {\n\tif c.ImagePullFn != nil {\n\t\tfmt.Println(\"[MockDockerClient] In \", utils.CurrentFunctionName())\n\t\tfmt.Println(\"[MockDockerClient] - ctx: \", ctx)\n\t\tfmt.Println(\"[MockDockerClient] - imageName: \", imageName)\n\t\treturn c.ImagePullFn(ctx, imageName)\n\t}\n\tpanic(fmt.Sprintf(\"No function defined for: %s\", utils.CurrentFunctionName()))\n}", "func TestIsImageInLocalRegistry(t *testing.T) {\n\ttype testDef struct {\n\t\timageName string\n\t\tdocker test.FakeDockerClient\n\t\texpectedResult bool\n\t\texpectedError string\n\t}\n\ttests := map[string]testDef{\n\t\t\"ImageFound\": {\"a_test_image\", test.FakeDockerClient{}, true, \"\"},\n\t\t\"ImageNotFound\": {\"a_test_image:sometag\", test.FakeDockerClient{}, false, \"unable to get metadata for a_test_image:sometag\"},\n\t}\n\n\tfor test, def := range tests {\n\t\tdh := getDocker(&def.docker)\n\t\tfake := dh.kubeDockerClient.(*dockertools.FakeDockerClient)\n\t\tif def.expectedResult {\n\t\t\tfake.Image = &dockertypes.ImageInspect{ID: def.imageName}\n\t\t}\n\n\t\tresult, err := dh.IsImageInLocalRegistry(def.imageName)\n\n\t\tif e := fake.AssertCalls([]string{\"inspect_image\"}); e != nil {\n\t\t\tt.Errorf(\"%+v\", e)\n\t\t}\n\n\t\tif result != def.expectedResult {\n\t\t\tt.Errorf(\"Test - %s: Expected result: %v. Got: %v\", test, def.expectedResult, result)\n\t\t}\n\t\tif err != nil && len(def.expectedError) > 0 && !strings.Contains(err.Error(), def.expectedError) {\n\t\t\tt.Errorf(\"Test - %s: Expected error: Got: %+v\", test, err)\n\t\t}\n\t}\n}", "func (m *MockImageTransferer) Download(arg0 string, arg1 core.Digest) (base.FileReader, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Download\", arg0, arg1)\n\tret0, _ := ret[0].(base.FileReader)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (c *TestClient) GetImage(project, name string) (*compute.Image, error) {\n\tif c.GetImageFn != nil {\n\t\treturn c.GetImageFn(project, name)\n\t}\n\treturn c.client.GetImage(project, name)\n}", "func (m *MockMoby) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {\n\tret := m.ctrl.Call(m, \"ImageBuild\", ctx, buildContext, options)\n\tret0, _ := ret[0].(types.ImageBuildResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestBaseImage(t *testing.T) {\n\t// test with 'original.png'\n\timgs := map[string][]byte{\n\t\t\"original.png\": []byte(\"image\"),\n\t}\n\t_, err := backend.baseImage(imgs)\n\tif err != nil {\n\t\tt.Errorf(\"Got error %s\", err)\n\t}\n\n\t// test with 'original.jpg'\n\timgs = map[string][]byte{\n\t\t\"original.jpg\": []byte(\"image\"),\n\t}\n\t_, err = backend.baseImage(imgs)\n\tif err != nil {\n\t\tt.Errorf(\"Got error %s\", err)\n\t}\n\n\t// without 'original.*' should get an error\n\timgs = map[string][]byte{\n\t\t\"127x127.png\": []byte(\"image\"),\n\t}\n\t_, err = backend.baseImage(imgs)\n\tif err == nil {\n\t\tt.Errorf(\"Should get an error, didn't pass original image.\")\n\t}\n}", "func (a *AgentServer) PullImage(req PullImageRequest, image *string) error {\n\n\tlogger := plog.WithFields(logrus.Fields{\n\t\t\"image\": req.Image,\n\t\t\"registry\": req.Registry})\n\n\t// set up the connections\n\tdocker, err := docker.NewDockerClient()\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Could not connect to docker client\")\n\t\treturn err\n\t}\n\tconn, err := zzk.GetLocalConnection(\"/\")\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Could not acquire coordinator connection\")\n\t\treturn err\n\t}\n\n\t// pull the image from the registry\n\treg := registry.NewRegistryListener(docker, req.Registry, \"\")\n\treg.SetConnection(conn)\n\ttimer := time.NewTimer(req.Timeout)\n\tdefer timer.Stop()\n\tif err := reg.PullImage(timer.C, req.Image); err != nil {\n\t\tlogger.WithError(err).Error(\"Could not pull image from registry\")\n\t\treturn err\n\t}\n\n\t// get the tag of the image pulled\n\t*image, err = reg.ImagePath(req.Image)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Could not get image id for image from registry\")\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (m *MockPackClient) InspectImage(arg0 string, arg1 bool) (*client.ImageInfo, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"InspectImage\", arg0, arg1)\n\tret0, _ := ret[0].(*client.ImageInfo)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockSystem) FetchURL(ctx context.Context, url string) (semver.Tags, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FetchURL\", ctx, url)\n\tret0, _ := ret[0].(semver.Tags)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockDockerClient) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImagePull\", ctx, refStr, options)\n\tret0, _ := ret[0].(io.ReadCloser)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func FetchRemoteFile() {\n\n}", "func (m *MockManager) LoadImage(arg0 context.Context, arg1 *config.Config, arg2 dockerapi.DockerClient) (*types.ImageInspect, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"LoadImage\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(*types.ImageInspect)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func GetImage(ctx context.Context, sharedDownload map[string]*DownloadState, params *Params) (io.Reader, error) {\n\tlogger := logging.FromContext(ctx)\n\ttimeout := params.Timeout\n\tURL := params.URL\n\tvar imageReader io.Reader\n\n\tif dnState, ok := sharedDownload[URL]; ok {\n\t\tlogger.WithField(\"url\", URL).Trace(\"is fetching by another client\")\n\t\terrCh := make(chan error, 1)\n\t\tdnState.Subs = append(dnState.Subs, errCh)\n\t\tif err := <-errCh; err != nil {\n\t\t\tlogger.WithError(err).WithField(\"url\", URL).Trace(\"fetch failed\")\n\t\t\tdelete(sharedDownload, URL)\n\t\t\treturn nil, err\n\t\t}\n\t\timageReader = bytes.NewReader(dnState.Data)\n\t\tlogger.WithField(\"url\", URL).Trace(\"fetched shared\")\n\t} else {\n\t\tsubscribers := make([]chan error, 0, 1)\n\t\tdownloadState := &DownloadState{\n\t\t\tData: nil,\n\t\t\tSubs: subscribers,\n\t\t}\n\t\tsharedDownload[URL] = downloadState\n\t\tdefer func(sd map[string]*DownloadState, url string) {\n\t\t\tdelete(sd, url)\n\t\t}(sharedDownload, URL)\n\t\thttpClient := httpclient.NewHTTPClient(timeout)\n\t\tresponse, err := httpClient.Get(ctx, URL)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).WithField(\"url\", URL).Error(\"fetch image failed\")\n\t\t\tfor _, subs := range downloadState.Subs {\n\t\t\t\tsubs <- err\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdownloadState.Data = response.RawBody\n\t\tfor _, subs := range downloadState.Subs {\n\t\t\tsubs <- nil\n\t\t}\n\t\timageReader = bytes.NewReader(response.RawBody)\n\t}\n\n\treturn imageReader, nil\n}", "func Fetch(imageURI string, labels map[types.ACIdentifier]string, insecure bool) (tempfile.ReadSeekCloser, error) {\n\tu, err := url.Parse(imageURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar puller remote.Puller\n\n\tswitch u.Scheme {\n\tcase \"file\":\n\t\tfilename := u.Path\n\t\tif u.Host != \"\" {\n\t\t\tfilename = filepath.Join(u.Host, u.Path)\n\t\t}\n\t\tf, err := os.Open(filename)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn tempfile.New(f)\n\tcase \"http\", \"https\":\n\t\tpuller = http.New()\n\tcase \"docker\":\n\t\tpuller = docker.New(insecure)\n\tcase \"aci\", \"\":\n\t\tpuller = aci.New(insecure, labels)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"%q scheme not supported\", u.Scheme)\n\t}\n\n\tr, err := puller.Pull(imageURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tempfile.New(r)\n}", "func dirImageMock(t *testing.T, dir, dockerReference string) private.UnparsedImage {\n\tref, err := reference.ParseNormalizedNamed(dockerReference)\n\trequire.NoError(t, err)\n\treturn dirImageMockWithRef(t, dir, refImageReferenceMock{ref: ref})\n}", "func (m *MockUpstreamIntf) CachedRemoteDigestOfLocalHeight() blockdigest.Digest {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CachedRemoteDigestOfLocalHeight\")\n\tret0, _ := ret[0].(blockdigest.Digest)\n\treturn ret0\n}", "func LookupMyImage(connConfig string, myImageId string) (SpiderMyImageInfo, error) {\n\n\tif connConfig == \"\" {\n\t\terr := fmt.Errorf(\"LookupMyImage() called with empty connConfig.\")\n\t\tcommon.CBLog.Error(err)\n\t\treturn SpiderMyImageInfo{}, err\n\t} else if myImageId == \"\" {\n\t\terr := fmt.Errorf(\"LookupMyImage() called with empty myImageId.\")\n\t\tcommon.CBLog.Error(err)\n\t\treturn SpiderMyImageInfo{}, err\n\t}\n\n\turl := common.SpiderRestUrl + \"/myimage/\" + url.QueryEscape(myImageId)\n\n\t// Create Req body\n\ttempReq := common.SpiderConnectionName{}\n\ttempReq.ConnectionName = connConfig\n\n\tclient := resty.New().SetCloseConnection(true)\n\tclient.SetAllowGetMethodPayload(true)\n\n\tresp, err := client.R().\n\t\tSetHeader(\"Content-Type\", \"application/json\").\n\t\tSetBody(tempReq).\n\t\tSetResult(&SpiderMyImageInfo{}). // or SetResult(AuthSuccess{}).\n\t\t//SetError(&AuthError{}). // or SetError(AuthError{}).\n\t\tGet(url)\n\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\terr := fmt.Errorf(\"an error occurred while requesting to CB-Spider\")\n\t\treturn SpiderMyImageInfo{}, err\n\t}\n\n\tfmt.Println(string(resp.Body()))\n\n\tfmt.Println(\"HTTP Status code: \" + strconv.Itoa(resp.StatusCode()))\n\tswitch {\n\tcase resp.StatusCode() >= 400 || resp.StatusCode() < 200:\n\t\terr := fmt.Errorf(string(resp.Body()))\n\t\tcommon.CBLog.Error(err)\n\t\treturn SpiderMyImageInfo{}, err\n\t}\n\n\ttemp := resp.Result().(*SpiderMyImageInfo)\n\treturn *temp, nil\n\n}", "func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Image, retErr error) {\n\tpullCtx := defaultRemoteContext()\n\tfor _, o := range opts {\n\t\tif err := o(c, pullCtx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif pullCtx.PlatformMatcher == nil {\n\t\tif len(pullCtx.Platforms) > 1 {\n\t\t\treturn nil, errors.New(\"cannot pull multiplatform image locally, try Fetch\")\n\t\t} else if len(pullCtx.Platforms) == 0 {\n\t\t\tpullCtx.PlatformMatcher = c.platform\n\t\t} else {\n\t\t\tp, err := platforms.Parse(pullCtx.Platforms[0])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid platform %s: %w\", pullCtx.Platforms[0], err)\n\t\t\t}\n\n\t\t\tpullCtx.PlatformMatcher = platforms.Only(p)\n\t\t}\n\t}\n\n\tctx, done, err := c.WithLease(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer done(ctx)\n\n\tvar unpacks int32\n\tvar unpackEg *errgroup.Group\n\tvar unpackWrapper func(f images.Handler) images.Handler\n\n\tif pullCtx.Unpack {\n\t\t// unpacker only supports schema 2 image, for schema 1 this is noop.\n\t\tu, err := c.newUnpacker(ctx, pullCtx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"create unpacker: %w\", err)\n\t\t}\n\t\tunpackWrapper, unpackEg = u.handlerWrapper(ctx, pullCtx, &unpacks)\n\t\tdefer func() {\n\t\t\tif err := unpackEg.Wait(); err != nil {\n\t\t\t\tif retErr == nil {\n\t\t\t\t\tretErr = fmt.Errorf(\"unpack: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\twrapper := pullCtx.HandlerWrapper\n\t\tpullCtx.HandlerWrapper = func(h images.Handler) images.Handler {\n\t\t\tif wrapper == nil {\n\t\t\t\treturn unpackWrapper(h)\n\t\t\t}\n\t\t\treturn unpackWrapper(wrapper(h))\n\t\t}\n\t}\n\n\timg, err := c.fetch(ctx, pullCtx, ref, 1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// NOTE(fuweid): unpacker defers blobs download. before create image\n\t// record in ImageService, should wait for unpacking(including blobs\n\t// download).\n\tif pullCtx.Unpack {\n\t\tif unpackEg != nil {\n\t\t\tif err := unpackEg.Wait(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\timg, err = c.createNewImage(ctx, img)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ti := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher)\n\n\tif pullCtx.Unpack {\n\t\tif unpacks == 0 {\n\t\t\t// Try to unpack is none is done previously.\n\t\t\t// This is at least required for schema 1 image.\n\t\t\tif err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to unpack image on snapshotter %s: %w\", pullCtx.Snapshotter, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn i, nil\n}", "func (m *MockHandler) GetReleaseImage(arg0, arg1 string) (*models.ReleaseImage, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetReleaseImage\", arg0, arg1)\n\tret0, _ := ret[0].(*models.ReleaseImage)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockHandler) GetMustGatherImages(arg0, arg1, arg2 string) (MustGatherVersion, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetMustGatherImages\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(MustGatherVersion)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockImageTransferer) Stat(arg0 string, arg1 core.Digest) (*core.BlobInfo, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Stat\", arg0, arg1)\n\tret0, _ := ret[0].(*core.BlobInfo)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockEnvironment) Fetch() map[string]interface{} {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\")\n\tret0, _ := ret[0].(map[string]interface{})\n\treturn ret0\n}", "func (p *Pvr) LoadRemoteImage(app *AppData) error {\n\n\tvar dockerManifest *schema2.Manifest\n\n\tapp.RemoteImage = DockerImage{\n\t\tExists: false,\n\t}\n\timage, err := registry.ParseImage(app.From)\n\tif err != nil {\n\t\treturn err\n\t}\n\tauth, err := p.AuthConfig(app.Username, app.Password, image.Domain)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdockerRegistry, err := p.GetDockerRegistry(image, auth)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar repoDigest string\n\tvar dockerPlatform string\n\tvar platforms []interface{}\n\n\tif app.Platform == \"\" {\n\t\tdockerJsonI, ok := p.PristineJsonMap[\"_hostconfig/pvr/docker.json\"]\n\n\t\tif ok {\n\t\t\tdockerJson := dockerJsonI.(map[string]interface{})\n\t\t\tplatformsI, ok := dockerJson[\"platforms\"]\n\t\t\tif ok {\n\t\t\t\tplatforms = platformsI.([]interface{})\n\t\t\t}\n\t\t}\n\t} else {\n\t\tplatforms = append(platforms, app.Platform)\n\t}\n\n\t// we go down the multiarch path if we have seen a platform\n\t// restriction in pvr-docker.json\n\tif platforms != nil {\n\t\tmanifestList, err := dockerRegistry.ManifestList(context.Background(),\n\t\t\timage.Path, image.Reference())\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, v := range manifestList.Manifests {\n\t\t\tfor _, v1 := range platforms {\n\t\t\t\tv1S := v1.(string)\n\t\t\t\tp := strings.SplitN(v1S, \"/\", 3)\n\t\t\t\tif v.Platform.Architecture == p[1] &&\n\t\t\t\t\t(len(p) < 3 || p[2] == v.Platform.Variant) {\n\t\t\t\t\trepoDigest = v.Digest.String()\n\t\t\t\t\tdockerPlatform = v1S\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif repoDigest != \"\" {\n\t\t\t\tdm, err := dockerRegistry.ManifestV2(context.Background(), image.Path, repoDigest)\n\t\t\t\tdockerManifest = &dm\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Found Manifest for platform %s\\n\",\n\t\t\t\t\tdockerPlatform)\n\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tdockerPlatform = \"\"\n\t\t\t}\n\t\t}\n\t}\n\n\tif dockerManifest == nil {\n\t\tdockerManifest, err = p.GetDockerManifest(image, auth)\n\t\tif err != nil {\n\t\t\tmanifestErr := ReportDockerManifestError(err, app.From)\n\t\t\tif err.Error() == \"image not found or you do not have access\" {\n\t\t\t\tfmt.Fprintf(os.Stderr, manifestErr.Error()+\"\\n\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn manifestErr\n\t\t}\n\t\tfmt.Fprintf(os.Stderr, \"Found Manifest for default platform.\\n\")\n\t}\n\n\tdockerConfig, err := p.GetDockerConfig(dockerManifest, image, auth)\n\tif err != nil {\n\t\terr = ReportDockerManifestError(err, app.From)\n\t\treturn err\n\t}\n\n\t// if we cannot find our arch we go the old direct way of retrieving repo\n\tif repoDigest == \"\" && app.RemoteImage.DockerPlatform != \"\" {\n\t\treturn errors.New(\"no docker image found for platform \" + app.RemoteImage.DockerPlatform)\n\t} else if repoDigest == \"\" {\n\t\trepoDigest, err = p.GetDockerImageRepoDigest(image, auth)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tli := strings.LastIndex(app.From, \":\")\n\tvar imageName string\n\tif li < 0 {\n\t\timageName = app.From\n\t} else {\n\t\tsplits := []string{app.From[:li], app.From[li+1:]}\n\t\timageName = splits[0]\n\t}\n\n\t//Extract image name from repo digest. eg: Extract \"busybox\" from \"busybox@sha256:afe605d272837ce1732f390966166c2afff5391208ddd57de10942748694049d\"\n\tif strings.Contains(imageName, \"@sha256\") {\n\t\tsplits := strings.Split(imageName, \"@\")\n\t\timageName = splits[0]\n\t}\n\n\tif !strings.Contains(repoDigest, \"@\") {\n\t\trepoDigest = imageName + \"@\" + repoDigest\n\t}\n\n\tapp.Username = auth.Username\n\tapp.Password = auth.Password\n\n\tapp.RemoteImage.Exists = true\n\tapp.RemoteImage.DockerDigest = repoDigest\n\tapp.RemoteImage.DockerConfig = dockerConfig\n\tapp.RemoteImage.DockerManifest = dockerManifest\n\tapp.RemoteImage.DockerRegistry = dockerRegistry\n\tapp.RemoteImage.DockerPlatform = dockerPlatform\n\tapp.RemoteImage.ImagePath = image.Path\n\n\treturn nil\n}", "func (suite *APIImageInspectSuite) TestImageInspectOk(c *check.C) {\n\tvar (\n\t\trepo = environment.BusyboxRepo\n\t\ttag = \"1.24\"\n\n\t\tid = \"sha256:ca3d7d608b8a8bbaaac2c350bd0f9588cce0509ada74108d5c4b2afb24c46125\"\n\t\tdig = \"sha256:840f2b98a2540ff1d265782c42543dbec7218d3ab0e73b296d7dac846f146e27\"\n\t)\n\n\trepoTag := fmt.Sprintf(\"%s:%s\", repo, tag)\n\trepoDigest := fmt.Sprintf(\"%s@%s\", repo, dig)\n\n\tfor _, image := range []string{\n\t\tid,\n\t\trepoTag,\n\t\trepoDigest,\n\t\tfmt.Sprintf(\"%s:whatever@%s\", repo, dig),\n\t} {\n\t\tresp, err := request.Get(\"/images/\" + image + \"/json\")\n\t\tc.Assert(err, check.IsNil)\n\t\tCheckRespStatus(c, resp, 200)\n\n\t\tgot := types.ImageInfo{}\n\t\terr = request.DecodeBody(&got, resp.Body)\n\t\tc.Assert(err, check.IsNil)\n\n\t\t// TODO: More specific check is needed\n\t\tc.Assert(got.Config, check.NotNil)\n\t\tc.Assert(got.ID, check.Equals, id)\n\t\tc.Assert(got.CreatedAt, check.NotNil)\n\t\tc.Assert(got.Size, check.NotNil)\n\t\tc.Assert(reflect.DeepEqual(got.RepoTags, []string{repoTag}), check.Equals, true)\n\t\tc.Assert(reflect.DeepEqual(got.RepoDigests, []string{repoDigest}), check.Equals, true)\n\t}\n}", "func MockOnGetManagerVirtualMedia(ctx context.Context, mockAPI *redfishMocks.RedfishAPI,\n\tmanagerID string, virtualMediaID string, virtualMedia redfishClient.VirtualMedia,\n\thttpResponse *http.Response, err error) {\n\tmediaRequest := redfishClient.ApiGetManagerVirtualMediaRequest{}\n\tmockAPI.On(\"GetManagerVirtualMedia\", ctx, managerID, virtualMediaID).Return(mediaRequest).Times(1)\n\tmockAPI.On(\"GetManagerVirtualMediaExecute\", mock.Anything).Return(virtualMedia, httpResponse, err).Times(1)\n}", "func (m *MockUserServer) GetRoleImage(arg0 context.Context, arg1 *pb.RoleRequest) (*pb.TextReply, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetRoleImage\", arg0, arg1)\n\tret0, _ := ret[0].(*pb.TextReply)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockUserClient) GetRoleImage(ctx context.Context, in *pb.RoleRequest, opts ...grpc.CallOption) (*pb.TextReply, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"GetRoleImage\", varargs...)\n\tret0, _ := ret[0].(*pb.TextReply)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestCannotExecuteStatusImage(t *testing.T) {\n\tbuf := setLogBuffer()\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tt.Log(buf.String())\n\t\t}\n\t}()\n\n\tif StatusImage == \"\" {\n\t\tt.Skip(\"no status image defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tapp := &models.App{Name: id.New().String()}\n\tapp = ensureApp(t, app)\n\n\tfn := &models.Fn{\n\t\tAppID: app.ID,\n\t\tName: id.New().String(),\n\t\tImage: StatusImage,\n\t\tResourceConfig: models.ResourceConfig{\n\t\t\tMemory: memory,\n\t\t},\n\t}\n\tfn = ensureFn(t, fn)\n\n\tlb, err := LB()\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: lb,\n\t}\n\tu.Path = path.Join(u.Path, \"invoke\", fn.ID)\n\n\tcontent := bytes.NewBuffer([]byte(`status`))\n\toutput := &bytes.Buffer{}\n\n\tresp, err := callFN(ctx, u.String(), content, output, models.TypeSync)\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\n\tif resp.StatusCode != http.StatusBadRequest {\n\t\tt.Fatalf(\"StatusCode check failed on %v\", resp.StatusCode)\n\t}\n}", "func (imp *Importer) fetchImages() {\n err := downloadImages(\n imp.idPath,\n func(id string, bodyRdr io.Reader) error {\n img, err := jpeg.Decode(bodyRdr)\n if err == nil {\n imp.send(&imagedata.ImageData{Id: id, Data: &img})\n } else {\n log.Printf(\"Error decoding image %s to jpeg\\n\", id)\n }\n return nil\n },\n )\n\n if err != nil { imp.sendErr(err) }\n}", "func (f *SourceFetcher) Fetch(url string, namespace string) (*info.ImageProperties, bool, error) {\n\tc := make(chan FetchResult)\n\tgo f.uniqueFetchSource(c, url, namespace)\n\tr := <-c\n\treturn r.ImageDetails, r.Downloaded, r.Error\n}", "func (c *dockerClientMock) DownloadImageContent(imageSource, filePath string) (v1.Image, error) {\n\treturn &fake.FakeImage{}, nil // fmt.Errorf(\"%s\", filePath)\n}", "func (m *MockPexeler) GetRandomImage(arg0 string) (int, []byte, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetRandomImage\", arg0)\n\tret0, _ := ret[0].(int)\n\tret1, _ := ret[1].([]byte)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func picturesMock(w http.ResponseWriter, r *http.Request) {\n\tjson := `{\"copyright\":\"Amir H. Abolfath\",\"date\":\"2019-12-06\",\"explanation\":\"This frame.\",\"hdurl\":\"https://apod.nasa.gov/apod/image/1912/TaurusAbolfath.jpg\",\"media_type\":\"image\",\"service_version\":\"v1\",\"title\":\"Pleiades to Hyades\",\"url\":\"https://apod.nasa.gov/apod/image/1912/TaurusAbolfath1024.jpg\"}`\n\tw.WriteHeader(200)\n\t_, _ = w.Write([]byte(json))\n}", "func ttrPullImage(ctx context.Context, client apiclient.APIClient, image string) error {\n\trc, err := client.ImagePull(ctx, ttrImageName(image), types.ImagePullOptions{RegistryAuth: \"{}\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rc != nil {\n\t\tbody, err := readAllAndClose(rc)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !strings.Contains(body, \"Status: Downloaded newer image\") {\n\t\t\treturn errors.New(\"image pull not successful\")\n\t\t}\n\t}\n\treturn nil\n}", "func (m *MockManager) GetLoadedImageName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetLoadedImageName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func pull(ctx context.Context, imgCache *cache.Handle, directTo, pullFrom string, noHTTPS bool) (imagePath string, err error) {\n\tshubURI, err := ParseReference(pullFrom)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse shub uri: %s\", err)\n\t}\n\n\t// Get the image manifest\n\tmanifest, err := GetManifest(shubURI, noHTTPS)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to get manifest for: %s: %s\", pullFrom, err)\n\t}\n\n\tif directTo != \"\" {\n\t\tsylog.Infof(\"Downloading shub image\")\n\t\tif err := DownloadImage(ctx, manifest, directTo, pullFrom, true, noHTTPS); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\timagePath = directTo\n\t} else {\n\t\tcacheEntry, err := imgCache.GetEntry(cache.ShubCacheType, manifest.Commit)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to check if %v exists in cache: %v\", manifest.Commit, err)\n\t\t}\n\t\tdefer cacheEntry.CleanTmp()\n\t\tif !cacheEntry.Exists {\n\t\t\tsylog.Infof(\"Downloading shub image\")\n\n\t\t\terr := DownloadImage(ctx, manifest, cacheEntry.TmpPath, pullFrom, true, noHTTPS)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\terr = cacheEntry.Finalize()\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\timagePath = cacheEntry.Path\n\t\t} else {\n\t\t\tsylog.Infof(\"Use cached image\")\n\t\t\timagePath = cacheEntry.Path\n\t\t}\n\n\t}\n\n\treturn imagePath, nil\n}", "func (m *MockGetRandomPexeler) GetRandomImage(arg0 string) ([]byte, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetRandomImage\", arg0)\n\tret0, _ := ret[0].([]byte)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func PrepareBaseImage(ctx context.Context, ref string, output io.Writer) (err error) {\n\n\tcli, err := client.NewEnvClient()\n\tif err != nil {\n\t\treturn\n\t}\n\n\treader, err := cli.ImagePull(ctx, ref, types.ImagePullOptions{})\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer reader.Close()\n\n\tvar log pullLog\n\tstatus := make(map[string]string)\n\tscanner := bufio.NewScanner(reader)\n\tfor scanner.Scan() {\n\t\tjson.Unmarshal(scanner.Bytes(), &log)\n\t\tif log.ID != \"\" {\n\t\t\tcur := status[log.ID]\n\t\t\tif cur != log.Status {\n\t\t\t\tstatus[log.ID] = log.Status\n\t\t\t\tfmt.Fprintln(output, log.Status, log.ID)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Fprintln(output, log.Status)\n\t\t}\n\t}\n\n\treturn\n\n}", "func (v *Virt) ImageRemoteDigest(ctx context.Context, image string) (digest string, err error) {\n\treturn\n}", "func (m *MockInterface) AROOperatorImage() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"AROOperatorImage\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func (m *MockEcrClient) ImageListable(arg0, arg1, arg2, arg3 string) (bool, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImageListable\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func ttrPushImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\trc, err := client.ImagePush(ctx, ttrImageName(image), types.ImagePushOptions{RegistryAuth: \"{}\"})\n\tassert.Assert(t, err)\n\tif rc != nil {\n\t\tbody, err := readAllAndClose(rc)\n\t\tassert.Assert(t, err)\n\t\tassert.Assert(t, strings.Contains(body, `\"status\":\"latest: digest: `))\n\t}\n}", "func (v *Virt) ImagePull(ctx context.Context, ref string, all bool) (rc io.ReadCloser, err error) {\n\treturn\n}", "func _getMock(url string) (content []byte, err error) {\n\tvar idnum = crc32.ChecksumIEEE([]byte(url))%uint32(5) + 1\n\tvar response = fmt.Sprintf(mockResponseTemplate, idnum, idnum, \"no message\", 200)\n\treturn []byte(response), nil\n}", "func (m *MockSession) Remote() *iface.RemoteInfo {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Remote\")\n\tret0, _ := ret[0].(*iface.RemoteInfo)\n\treturn ret0\n}", "func (f FetchStruct) FetchFromRemote() *model.FetchResult {\n\trepo := f.Repo\n\tremoteURL := f.RemoteURL\n\tremoteBranch := f.RemoteBranch\n\trepoPath := f.RepoPath\n\n\tvar remoteDataObject RemoteDataInterface\n\tremoteDataObject = RemoteDataStruct{\n\t\tRepo: repo,\n\t\tRemoteURL: remoteURL,\n\t}\n\n\tremoteName := remoteDataObject.GetRemoteName()\n\tlogger := global.Logger{}\n\n\ttargetRefPsec := \"refs/heads/\" + remoteBranch + \":refs/remotes/\" + remoteBranch\n\tb := new(bytes.Buffer)\n\tvar fetchErr error\n\tgitSSHAuth, sshErr := ssh.NewSSHAgentAuth(\"git\")\n\tw, _ := repo.Worktree()\n\n\t// Check if repo path is empty and fetch path from worktree\n\tif repoPath == \"\" {\n\t\trepoPath = w.Filesystem.Root()\n\t}\n\n\tif sshErr != nil {\n\t\tlogger.Log(\"Authentication method failed -> \"+sshErr.Error(), global.StatusError)\n\t\tif w == nil {\n\t\t\treturn &model.FetchResult{\n\t\t\t\tStatus: global.FetchFromRemoteError,\n\t\t\t\tFetchedItems: nil,\n\t\t\t}\n\t\t}\n\t\tlogger.Log(\"Retrying fetch with fallback module using git client\", global.StatusWarning)\n\t\treturn f.windowsFetch()\n\t}\n\n\tlogger.Log(fmt.Sprintf(\"Fetching changes from -> %s : %s\", remoteURL, targetRefPsec), global.StatusInfo)\n\n\tif remoteURL != \"\" && remoteBranch != \"\" {\n\t\tif remoteName == \"\" {\n\t\t\treturn &model.FetchResult{\n\t\t\t\tStatus: global.FetchFromRemoteError,\n\t\t\t\tFetchedItems: nil,\n\t\t\t}\n\t\t}\n\n\t\tfetchErr = repo.Fetch(&git.FetchOptions{\n\t\t\tRemoteName: remoteName,\n\t\t\tAuth: gitSSHAuth,\n\t\t\tRefSpecs: []config.RefSpec{config.RefSpec(targetRefPsec)},\n\t\t\tProgress: sideband.Progress(func(f io.Writer) io.Writer {\n\t\t\t\treturn f\n\t\t\t}(b)),\n\t\t})\n\t} else {\n\t\tfetchErr = repo.Fetch(&git.FetchOptions{\n\t\t\tRemoteName: git.DefaultRemoteName,\n\t\t\tAuth: gitSSHAuth,\n\t\t\tProgress: sideband.Progress(func(f io.Writer) io.Writer {\n\t\t\t\treturn f\n\t\t\t}(b)),\n\t\t})\n\t}\n\n\tif fetchErr != nil {\n\t\tif fetchErr.Error() == \"already up-to-date\" {\n\t\t\tlogger.Log(fetchErr.Error(), global.StatusWarning)\n\t\t\treturn &model.FetchResult{\n\t\t\t\tStatus: global.FetchNoNewChanges,\n\t\t\t\tFetchedItems: nil,\n\t\t\t}\n\t\t} else {\n\t\t\tlogger.Log(fetchErr.Error(), global.StatusError)\n\t\t\tlogger.Log(\"Fetch failed. Retrying fetch with git client\", global.StatusWarning)\n\t\t\treturn f.windowsFetch()\n\t\t}\n\n\t} else {\n\t\tlogger.Log(b.String(), global.StatusInfo)\n\t\tlogger.Log(\"Changes fetched from remote\", global.StatusInfo)\n\n\t\tmsg := fmt.Sprintf(\"Changes fetched from remote %v\", remoteName)\n\t\treturn &model.FetchResult{\n\t\t\tStatus: global.FetchFromRemoteSuccess,\n\t\t\tFetchedItems: []*string{&msg},\n\t\t}\n\t}\n\n}", "func (m *MockRepository) Fetch(bucketName, name string, model db.Model) (bool, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\", bucketName, name, model)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *MockECRAPI) ListImages(_param0 *ecr.ListImagesInput) (*ecr.ListImagesOutput, error) {\n\tret := _m.ctrl.Call(_m, \"ListImages\", _param0)\n\tret0, _ := ret[0].(*ecr.ListImagesOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockFetcher) Fetch(req utils.Request) (responseBody []byte, err error) {\n\targs := m.Called(req)\n\n\tif args.Get(0) != nil {\n\t\tresponseBody = args.Get(0).([]byte)\n\t}\n\n\terr = args.Error(1)\n\n\treturn responseBody, err\n}", "func (_m *MockECRAPI) BatchGetImage(_param0 *ecr.BatchGetImageInput) (*ecr.BatchGetImageOutput, error) {\n\tret := _m.ctrl.Call(_m, \"BatchGetImage\", _param0)\n\tret0, _ := ret[0].(*ecr.BatchGetImageOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockHandler) GetProfileAvatar(arg0 http.ResponseWriter, arg1 *http.Request) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"GetProfileAvatar\", arg0, arg1)\n}", "func (a *UserServiceApiService) GetAvatarsExecute(r ApiGetAvatarsRequest) (*os.File, *_nethttp.Response, GenericOpenAPIError) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\texecutionError GenericOpenAPIError\n\t\tlocalVarReturnValue *os.File\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"UserServiceApiService.GetAvatars\")\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, nil, executionError\n\t}\n\n\tlocalVarPath := localBasePath + \"/users/{username}/avatars\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"username\"+\"}\", _neturl.PathEscape(parameterToString(r.username, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, nil, executionError\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, localVarHTTPResponse, executionError\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, localVarHTTPResponse, executionError\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, executionError\n}", "func (m *MockLoggingClient) Fetch(arg0 context.Context, arg1 *logging.QueryRequest, arg2 ...grpc.CallOption) (*logging.QueryResponse, error) {\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Fetch\", varargs...)\n\tret0, _ := ret[0].(*logging.QueryResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockClient) Fetch(keyword string) (*[]linebot.Response, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\", keyword)\n\tret0, _ := ret[0].(*[]linebot.Response)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (f *FakeImagesClient) Get(ctx context.Context, getOpts *images.GetRequest, opts ...grpc.CallOption) (*images.GetResponse, error) {\n\tf.Lock()\n\tdefer f.Unlock()\n\tf.appendCalled(\"get\", getOpts)\n\tif err := f.getError(\"get\"); err != nil {\n\t\treturn nil, err\n\t}\n\timage, ok := f.ImageList[getOpts.Name]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"image does not exist\")\n\t}\n\treturn &images.GetResponse{\n\t\tImage: &image,\n\t}, nil\n}", "func PullImage(c *check.C, image string) {\n\tresp, err := request.Get(\"/images/\" + image + \"/json\")\n\tc.Assert(err, check.IsNil)\n\n\tif resp.StatusCode == http.StatusOK {\n\t\tresp.Body.Close()\n\t\treturn\n\t}\n\n\tq := url.Values{}\n\tq.Add(\"fromImage\", image)\n\tresp, err = request.Post(\"/images/create\", request.WithQuery(q))\n\tc.Assert(err, check.IsNil)\n\tc.Assert(resp.StatusCode, check.Equals, 200)\n\n\tdefer resp.Body.Close()\n\tc.Assert(fetchPullStatus(resp.Body), check.IsNil)\n}", "func (a *ImagesApiService) GetImageExecute(r ApiGetImageRequest) ([]AnchoreImage, *http.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = http.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tformFiles []formFile\n\t\tlocalVarReturnValue []AnchoreImage\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"ImagesApiService.GetImage\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/images/{imageDigest}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"imageDigest\"+\"}\", url.PathEscape(parameterToString(r.imageDigest, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.xAnchoreAccount != nil {\n\t\tlocalVarHeaderParams[\"x-anchore-account\"] = parameterToString(*r.xAnchoreAccount, \"\")\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v ApiErrorResponse\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (m *MockUpstreamIntf) CachedRemoteHeight() uint64 {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CachedRemoteHeight\")\n\tret0, _ := ret[0].(uint64)\n\treturn ret0\n}", "func GetImg(fileName string) (*image.Image, error) {\n localFile := fmt.Sprintf(\"/data/edgebox/local/%s\", fileName)\n existingImageFile, err := os.Open(localFile)\n if err == nil {\n defer existingImageFile.Close()\n imageData, _, err := image.Decode(existingImageFile)\n if err != nil {\n return nil, err\n }\n return &imageData, nil\n }\n\n remoteFile := fmt.Sprintf(\"/data/edgebox/remote/%s\", fileName)\n existingImageFile, err = os.Open(remoteFile)\n if err == nil {\n defer existingImageFile.Close()\n imageData, _, err := image.Decode(existingImageFile)\n if err != nil {\n return nil, err\n }\n return &imageData, nil\n }\n return nil, err\n}", "func (a *ImageApiService) GetPersonImage(ctx _context.Context, name string, imageType ImageType, imageIndex int32, localVarOptionals *GetPersonImageOpts) (*os.File, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue *os.File\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/Persons/{name}/Images/{imageType}/{imageIndex}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"name\"+\"}\", _neturl.QueryEscape(parameterToString(name, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"imageType\"+\"}\", _neturl.QueryEscape(parameterToString(imageType, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"imageIndex\"+\"}\", _neturl.QueryEscape(parameterToString(imageIndex, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Tag.IsSet() {\n\t\tlocalVarQueryParams.Add(\"tag\", parameterToString(localVarOptionals.Tag.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Format.IsSet() {\n\t\tlocalVarQueryParams.Add(\"format\", parameterToString(localVarOptionals.Format.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.MaxWidth.IsSet() {\n\t\tlocalVarQueryParams.Add(\"maxWidth\", parameterToString(localVarOptionals.MaxWidth.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.MaxHeight.IsSet() {\n\t\tlocalVarQueryParams.Add(\"maxHeight\", parameterToString(localVarOptionals.MaxHeight.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.PercentPlayed.IsSet() {\n\t\tlocalVarQueryParams.Add(\"percentPlayed\", parameterToString(localVarOptionals.PercentPlayed.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.UnplayedCount.IsSet() {\n\t\tlocalVarQueryParams.Add(\"unplayedCount\", parameterToString(localVarOptionals.UnplayedCount.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Width.IsSet() {\n\t\tlocalVarQueryParams.Add(\"width\", parameterToString(localVarOptionals.Width.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Height.IsSet() {\n\t\tlocalVarQueryParams.Add(\"height\", parameterToString(localVarOptionals.Height.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Quality.IsSet() {\n\t\tlocalVarQueryParams.Add(\"quality\", parameterToString(localVarOptionals.Quality.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.CropWhitespace.IsSet() {\n\t\tlocalVarQueryParams.Add(\"cropWhitespace\", parameterToString(localVarOptionals.CropWhitespace.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.AddPlayedIndicator.IsSet() {\n\t\tlocalVarQueryParams.Add(\"addPlayedIndicator\", parameterToString(localVarOptionals.AddPlayedIndicator.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Blur.IsSet() {\n\t\tlocalVarQueryParams.Add(\"blur\", parameterToString(localVarOptionals.Blur.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.BackgroundColor.IsSet() {\n\t\tlocalVarQueryParams.Add(\"backgroundColor\", parameterToString(localVarOptionals.BackgroundColor.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.ForegroundLayer.IsSet() {\n\t\tlocalVarQueryParams.Add(\"foregroundLayer\", parameterToString(localVarOptionals.ForegroundLayer.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"image/_*\", \"application/json\", \"application/json; profile=CamelCase\", \"application/json; profile=PascalCase\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-Emby-Authorization\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func TestRemoteFactory_CreateForDockerHub(t *testing.T) {\n\t// No credentials required for public Image\n\tfact := NewRemoteClientFactory(Credentials{}, log.NewNopLogger(), nil, time.Second)\n\timg, err := flux.ParseImage(\"alpine:latest\", nil)\n\ttestRepository = RepositoryFromImage(img)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tr, err := fact.CreateFor(testRepository.Host())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres, err := r.Manifest(testRepository, img.Tag)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\texpected := \"index.docker.io/library/alpine:latest\"\n\tif res.FullID() != expected {\n\t\tt.Fatal(\"Expected %q. Got %q\", expected, res.FullID())\n\t}\n}", "func (c MockDockerClient) ImageInspect(ctx context.Context, imageName string) (dockertypes.ImageInspect, error) {\n\tif c.ImageInspectFn != nil {\n\t\tfmt.Println(\"[MockDockerClient] In \", utils.CurrentFunctionName())\n\t\tfmt.Println(\"[MockDockerClient] - ctx: \", ctx)\n\t\tfmt.Println(\"[MockDockerClient] - imageName: \", imageName)\n\t\treturn c.ImageInspectFn(ctx, imageName)\n\t}\n\tpanic(fmt.Sprintf(\"No function defined for: %s\", utils.CurrentFunctionName()))\n}", "func (cli *FakeDatabaseClient) FetchServiceImageMetaData(ctx context.Context, in *dbdpb.FetchServiceImageMetaDataRequest, opts ...grpc.CallOption) (*dbdpb.FetchServiceImageMetaDataResponse, error) {\n\tatomic.AddInt32(&cli.fetchServiceImageMetaDataCnt, 1)\n\tif cli.methodToResp == nil {\n\t\treturn nil, nil\n\t}\n\tmethod := \"FetchServiceImageMetaData\"\n\tif resp, ok := cli.methodToResp[method]; ok {\n\t\treturn resp.(*dbdpb.FetchServiceImageMetaDataResponse), nil\n\t}\n\treturn nil, nil\n\n}", "func getImages(hostBase string, organization string, application string) (*http.Response, []*server.Image, error) {\n\n\turl := getImagesURL(hostBase, organization, application)\n\n\tkiln.LogInfo.Printf(\"Invoking get at URL %s\", url)\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\treq.Header.Add(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", \"e30K.e30K.e30K\"))\n\tclient := &http.Client{}\n\tresponse, err := client.Do(req)\n\n\timages := []*server.Image{}\n\n\tbytes, err := ioutil.ReadAll(response.Body)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tbody := string(bytes)\n\n\tkiln.LogInfo.Printf(\"Response is %s\", body)\n\n\tjson.Unmarshal(bytes, &images)\n\n\treturn response, images, err\n\n}", "func pullMissingImage(ctx context.Context, apiClient client.CommonAPIClient, image string, force bool) error {\n\tif !force {\n\t\t_, inspectError := apiClient.ImageInspect(ctx, image)\n\t\tif inspectError == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif err, ok := inspectError.(client.RespError); !ok {\n\t\t\treturn inspectError\n\t\t} else if err.Code() != http.StatusNotFound {\n\t\t\treturn inspectError\n\t\t}\n\t}\n\n\tnamedRef, err := reference.Parse(image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnamedRef = reference.TrimTagForDigest(reference.WithDefaultTagIfMissing(namedRef))\n\n\tvar name, tag string\n\tif reference.IsNameTagged(namedRef) {\n\t\tname, tag = namedRef.Name(), namedRef.(reference.Tagged).Tag()\n\t} else {\n\t\tname = namedRef.String()\n\t}\n\n\tresponseBody, err := apiClient.ImagePull(ctx, name, tag, fetchRegistryAuth(namedRef.Name()))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to pull image: %v\", err)\n\t}\n\tdefer responseBody.Close()\n\n\treturn showProgress(responseBody)\n}", "func (s *Client) Image(fileID string, page int) (file []byte, err error) {\n\tif page <= 0 {\n\t\tpage = 1\n\t}\n\tqueryParam := fmt.Sprintf(\"?page=%d\", page)\n\turl := strings.Join([]string{s.config.apiBaseURL, \"/result/image/\", fileID, queryParam}, \"\")\n\n\tlog.Printf(\"get image url %s\", url)\n\treq, err := http.NewRequest(\"GET\", url, strings.NewReader(\"\"))\n\tif err != nil {\n\t\treturn\n\t}\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", strings.Join([]string{\"Bearer \", s.getToken()}, \"\"))\n\n\tres, err := s.httpClient.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\tfile, err = ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func (m *MockUpstreamIntf) RemoteDigestOfHeight(arg0 uint64) (blockdigest.Digest, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RemoteDigestOfHeight\", arg0)\n\tret0, _ := ret[0].(blockdigest.Digest)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockFetcherMockRecorder) FetchRemoteImage(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchRemoteImage\", reflect.TypeOf((*MockFetcher)(nil).FetchRemoteImage), arg0)\n}", "func dirImageMockWithRef(t *testing.T, dir string, ref types.ImageReference) private.UnparsedImage {\n\tsrcRef, err := directory.NewReference(dir)\n\trequire.NoError(t, err)\n\tsrc, err := srcRef.NewImageSource(context.Background(), nil)\n\trequire.NoError(t, err)\n\tt.Cleanup(func() {\n\t\terr := src.Close()\n\t\trequire.NoError(t, err)\n\t})\n\treturn image.UnparsedInstance(&dirImageSourceMock{\n\t\tImageSource: imagesource.FromPublic(src),\n\t\tref: ref,\n\t}, nil)\n}", "func (o *Options) fetchBlob(ctx context.Context, restctx *registryclient.Context, resource, layerDigest string, dstPaths []string) error {\n\n\trefStr := path.Join(o.ToMirror, resource)\n\tref, err := reference.Parse(refStr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse ref %s: %v\", refStr, err)\n\t}\n\n\tlogrus.Debugf(\"copying blob %s from %s\", layerDigest, ref.Exact())\n\n\trepo, err := restctx.RepositoryForRef(ctx, ref, o.SkipTLS)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create repo for %s: %v\", ref, err)\n\t}\n\tdgst, err := digest.Parse(layerDigest)\n\tif err != nil {\n\t\treturn err\n\t}\n\trc, err := repo.Blobs(ctx).Open(ctx, dgst)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"open blob: %v\", err)\n\t}\n\tdefer rc.Close()\n\tfor _, dstPath := range dstPaths {\n\t\tif err := copyBlobFile(rc, dstPath); err != nil {\n\t\t\treturn fmt.Errorf(\"copy blob: %v\", err)\n\t\t}\n\t\tif _, err := rc.Seek(0, 0); err != nil {\n\t\t\treturn fmt.Errorf(\"seek to start of blob: %v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func createImageMirrorForInternalImages(prefix string, ref reference.DockerImageReference, mirrored bool) ([]string, error) {\n\tsource := ref.Exact()\n\n\tinitialDefaults := k8simage.GetOriginalImageConfigs()\n\texceptions := image.Exceptions.List()\n\tdefaults := map[k8simage.ImageID]k8simage.Config{}\n\nimageLoop:\n\tfor i, config := range initialDefaults {\n\t\tfor _, exception := range exceptions {\n\t\t\tif strings.Contains(config.GetE2EImage(), exception) {\n\t\t\t\tcontinue imageLoop\n\t\t\t}\n\t\t}\n\t\tdefaults[i] = config\n\t}\n\n\tupdated := k8simage.GetMappedImageConfigs(defaults, ref.Exact())\n\topenshiftDefaults := image.OriginalImages()\n\topenshiftUpdated := image.GetMappedImages(openshiftDefaults, imagesetup.DefaultTestImageMirrorLocation)\n\n\t// if we've mirrored, then the source is going to be our repo, not upstream's\n\tif mirrored {\n\t\tbaseRef, err := reference.Parse(imagesetup.DefaultTestImageMirrorLocation)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid default mirror location: %v\", err)\n\t\t}\n\n\t\t// calculate the mapping of upstream images by setting defaults to baseRef\n\t\tcovered := sets.NewString()\n\t\tfor i, config := range updated {\n\t\t\tdefaultConfig := defaults[i]\n\t\t\tpullSpec := config.GetE2EImage()\n\t\t\tif pullSpec == defaultConfig.GetE2EImage() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif covered.Has(pullSpec) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcovered.Insert(pullSpec)\n\t\t\te2eRef, err := reference.Parse(pullSpec)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid test image: %s: %v\", pullSpec, err)\n\t\t\t}\n\t\t\tif len(e2eRef.Tag) == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid test image: %s: no tag\", pullSpec)\n\t\t\t}\n\t\t\tconfig.SetRegistry(baseRef.Registry)\n\t\t\tconfig.SetName(baseRef.RepositoryName())\n\t\t\tconfig.SetVersion(e2eRef.Tag)\n\t\t\tdefaults[i] = config\n\t\t}\n\n\t\t// calculate the mapping for openshift images by populating openshiftUpdated\n\t\topenshiftUpdated = make(map[string]string)\n\t\tsourceMappings := image.GetMappedImages(openshiftDefaults, imagesetup.DefaultTestImageMirrorLocation)\n\t\ttargetMappings := image.GetMappedImages(openshiftDefaults, source)\n\n\t\tfor from, to := range targetMappings {\n\t\t\tif from == to {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif covered.Has(to) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcovered.Insert(to)\n\t\t\tfrom := sourceMappings[from]\n\t\t\topenshiftUpdated[from] = to\n\t\t}\n\t}\n\n\tcovered := sets.NewString()\n\tvar lines []string\n\tfor i := range updated {\n\t\ta, b := defaults[i], updated[i]\n\t\tfrom, to := a.GetE2EImage(), b.GetE2EImage()\n\t\tif from == to {\n\t\t\tcontinue\n\t\t}\n\t\tif covered.Has(from) {\n\t\t\tcontinue\n\t\t}\n\t\tcovered.Insert(from)\n\t\tlines = append(lines, fmt.Sprintf(\"%s %s%s\", from, prefix, to))\n\t}\n\n\tfor from, to := range openshiftUpdated {\n\t\tif from == to {\n\t\t\tcontinue\n\t\t}\n\t\tif covered.Has(from) {\n\t\t\tcontinue\n\t\t}\n\t\tcovered.Insert(from)\n\t\tlines = append(lines, fmt.Sprintf(\"%s %s%s\", from, prefix, to))\n\t}\n\n\tsort.Strings(lines)\n\treturn lines, nil\n}", "func (l *RegistryListener) FindImage(rImg *registry.Image) (*dockerclient.Image, error) {\n\tregaddr := path.Join(l.address, rImg.String())\n\tglog.V(1).Infof(\"Searching for image %s\", regaddr)\n\n\t// check for UUID\n\tif img, err := l.docker.FindImage(rImg.UUID); err == nil {\n\t\treturn img, nil\n\t}\n\n\t// check by repo and tag, and compare hashes\n\tglog.V(1).Infof(\"UUID %s not found locally, searching by registry address for %s\", rImg.UUID, regaddr)\n\tif img, err := l.docker.FindImage(regaddr); err == nil {\n\t\tif localHash, err := l.docker.GetImageHash(img.ID); err != nil {\n\t\t\tglog.Warningf(\"Error building hash of image: %s: %s\", img.ID, err)\n\t\t} else {\n\t\t\tif localHash == rImg.Hash {\n\t\t\t\treturn img, nil\n\t\t\t}\n\t\t\tglog.V(1).Infof(\"Found %s locally, but hashes do not match\", regaddr)\n\t\t}\n\t}\n\n\t// attempt to pull the image, then compare hashes\n\tglog.V(0).Infof(\"Image address %s not found locally, attempting pull\", regaddr)\n\tif err := l.docker.PullImage(regaddr); err == nil {\n\t\tglog.V(1).Infof(\"Successfully pulled image %s from registry, checking for match\", regaddr)\n\t\tif img, err := l.docker.FindImage(regaddr); err == nil {\n\t\t\tif img.ID == rImg.UUID {\n\t\t\t\tglog.V(1).Infof(\"Found image %s in registry with correct UUID\", regaddr)\n\t\t\t\treturn img, nil\n\t\t\t}\n\t\t\tif localHash, err := l.docker.GetImageHash(img.ID); err != nil {\n\t\t\t\tglog.Warningf(\"Error building hash of image: %s: %s\", img.ID, err)\n\t\t\t} else {\n\t\t\t\tif localHash == rImg.Hash {\n\t\t\t\t\tglog.V(1).Infof(\"Found image %s in registry with correct Hash\", regaddr)\n\t\t\t\t\treturn img, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// search all images for a matching hash\n\t// First just check top-level layers\n\tglog.V(0).Infof(\"Image %s not found in registry, searching local images by hash\", regaddr)\n\tif img, err := l.docker.FindImageByHash(rImg.Hash, false); err == nil {\n\t\treturn img, nil\n\t}\n\n\t// Now check all layers\n\tglog.V(0).Infof(\"Hash for Image %s not found in top-level layers, searching all layers\", regaddr)\n\treturn l.docker.FindImageByHash(rImg.Hash, true)\n}", "func downloadImage(data *ImageData) (io.ReadCloser, error) {\n\ttimeout := time.Duration(10 * time.Second)\n\tclient := http.Client{\n\t\tTimeout: timeout,\n\t}\n\tresponse, err := client.Get(data.Url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer client.CloseIdleConnections()\n\treturn response.Body, err\n}", "func (suite *APIImageSaveLoadSuite) TestImageSaveLoadOk(c *check.C) {\n\tbefore, err := request.Get(\"/images/\" + busyboxImage125 + \"/json\")\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, before, 200)\n\tgotBefore := types.ImageInfo{}\n\terr = request.DecodeBody(&gotBefore, before.Body)\n\tc.Assert(err, check.IsNil)\n\n\tq := url.Values{}\n\tq.Set(\"name\", busyboxImage125)\n\tquery := request.WithQuery(q)\n\tresp, err := request.Get(\"/images/save\", query)\n\tc.Assert(err, check.IsNil)\n\tdefer resp.Body.Close()\n\n\tdir, err := ioutil.TempDir(\"\", \"TestImageSaveLoadOk\")\n\tif err != nil {\n\t\tc.Errorf(\"failed to create a new temporary directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\ttmpFile := filepath.Join(dir, \"busyboxImage.tar\")\n\tf, err := os.Create(tmpFile)\n\tif err != nil {\n\t\tc.Errorf(\"failed to create file: %v\", err)\n\t}\n\n\tif _, err := io.Copy(f, resp.Body); err != nil {\n\t\tc.Errorf(\"failed to save data to file: %v\", err)\n\t}\n\n\tdata, err := os.Open(tmpFile)\n\tif err != nil {\n\t\tc.Errorf(\"failed to load file's data: %v\", err)\n\t}\n\n\tloadImageName := \"load-busyboxImage\"\n\tq = url.Values{}\n\tq.Set(\"name\", loadImageName)\n\n\tquery = request.WithQuery(q)\n\treader := request.WithRawData(data)\n\theader := request.WithHeader(\"Content-Type\", \"application/x-tar\")\n\n\tresp, err = request.Post(\"/images/load\", query, reader, header)\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, resp, 200)\n\n\tafter, err := request.Get(\"/images/\" + loadImageName + \":\" + environment.Busybox125Tag + \"/json\")\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, after, 200)\n\tdefer request.Delete(\"/images/\" + loadImageName + \":\" + environment.Busybox125Tag)\n\n\tgotAfter := types.ImageInfo{}\n\terr = request.DecodeBody(&gotAfter, after.Body)\n\tc.Assert(err, check.IsNil)\n\n\tc.Assert(gotBefore.ID, check.Equals, gotAfter.ID)\n\tc.Assert(gotBefore.CreatedAt, check.Equals, gotAfter.CreatedAt)\n\tc.Assert(gotBefore.Size, check.Equals, gotAfter.Size)\n}", "func (c *Client) Image(ctx context.Context, number int) (io.Reader, string, error) {\n\tcomic, err := c.Get(ctx, number)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\treq, err := http.NewRequest(\"GET\", comic.ImageURL, nil)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"failed to build image request: %s\", err)\n\t}\n\treq = req.WithContext(ctx)\n\n\trsp, err := c.HTTPClient.Do(req)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"failed to do image request: %s\", err)\n\t}\n\tdefer drainAndClose(rsp.Body)\n\n\tif rsp.StatusCode != 200 {\n\t\treturn nil, \"\", StatusError{Code: rsp.StatusCode}\n\t}\n\n\tvar buf bytes.Buffer\n\tif _, err := io.Copy(&buf, rsp.Body); err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"failed to do copy image: %s\", err)\n\t}\n\n\treturn &buf, rsp.Header.Get(\"Content-Type\"), nil\n}", "func (f *fetcher) fetchSingleImage(img string, asc string, discover bool) (string, error) {\n\tvar (\n\t\tascFile *os.File\n\t\terr error\n\t\tlatest bool\n\t)\n\tif asc != \"\" && f.ks != nil {\n\t\tascFile, err = os.Open(asc)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to open signature file: %v\", err)\n\t\t}\n\t\tdefer ascFile.Close()\n\t}\n\n\tu, err := url.Parse(img)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"not a valid image reference (%s)\", img)\n\t}\n\n\t// if img refers to a local file, ensure the scheme is file:// and make the url path absolute\n\t_, err = os.Stat(u.Path)\n\tif err == nil {\n\t\tu.Path, err = filepath.Abs(u.Path)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to get abs path: %v\", err)\n\t\t}\n\t\tu.Scheme = \"file\"\n\t} else if !os.IsNotExist(err) {\n\t\treturn \"\", fmt.Errorf(\"unable to access %q: %v\", img, err)\n\t}\n\n\tif discover && u.Scheme == \"\" {\n\t\tif app := newDiscoveryApp(img); app != nil {\n\t\t\tvar discoveryError error\n\t\t\tif !f.local {\n\t\t\t\tstderr(\"rkt: searching for app image %s\", img)\n\t\t\t\tep, err := discoverApp(app, true)\n\t\t\t\tif err != nil {\n\t\t\t\t\tdiscoveryError = err\n\t\t\t\t} else {\n\t\t\t\t\t// No specified version label, mark it as latest\n\t\t\t\t\tif _, ok := app.Labels[\"version\"]; !ok {\n\t\t\t\t\t\tlatest = true\n\t\t\t\t\t}\n\t\t\t\t\treturn f.fetchImageFromEndpoints(app.Name.String(), ep, ascFile, latest)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif discoveryError != nil {\n\t\t\t\tstderr(\"discovery failed for %q: %v. Trying to find image in the store.\", img, discoveryError)\n\t\t\t}\n\t\t\tif f.local || discoveryError != nil {\n\t\t\t\treturn f.fetchImageFromStore(img)\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch u.Scheme {\n\tcase \"http\", \"https\", \"file\":\n\tcase \"docker\":\n\t\tdockerURL := common.ParseDockerURL(path.Join(u.Host, u.Path))\n\t\tif dockerURL.Tag == \"latest\" {\n\t\t\tlatest = true\n\t\t}\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"rkt only supports http, https, docker or file URLs (%s)\", img)\n\t}\n\treturn f.fetchImageFromURL(u.String(), u.Scheme, ascFile, latest)\n}" ]
[ "0.6551211", "0.63521427", "0.6168074", "0.5930461", "0.59249926", "0.59170794", "0.5911446", "0.5879098", "0.58760864", "0.5863561", "0.5850105", "0.5846192", "0.5763224", "0.5747897", "0.57216996", "0.57001495", "0.56998503", "0.56664616", "0.5652397", "0.56486833", "0.5608164", "0.55508506", "0.55447334", "0.5544553", "0.554286", "0.55327886", "0.5531382", "0.55122125", "0.55034727", "0.547106", "0.5470628", "0.5446105", "0.54407465", "0.54257965", "0.5415677", "0.54127294", "0.5409883", "0.53736585", "0.5358621", "0.53525597", "0.53125477", "0.53016263", "0.52929175", "0.52440464", "0.5233386", "0.5233321", "0.5219589", "0.52150685", "0.5210427", "0.5205058", "0.51979446", "0.51978713", "0.51880497", "0.51875573", "0.5180496", "0.51732975", "0.5169877", "0.5166297", "0.5163226", "0.51618034", "0.5153659", "0.5137862", "0.51335543", "0.5122026", "0.51157886", "0.5112688", "0.51124614", "0.51122326", "0.5111514", "0.5106319", "0.5104134", "0.51031154", "0.51028633", "0.5098946", "0.509732", "0.5093791", "0.50923604", "0.5087019", "0.5081302", "0.50709915", "0.506233", "0.5055836", "0.50467163", "0.5044104", "0.5039415", "0.5038196", "0.50360453", "0.5033169", "0.5026312", "0.5019462", "0.50011015", "0.49955928", "0.49870035", "0.49788576", "0.49710044", "0.49621484", "0.49602234", "0.49601215", "0.4958254", "0.49581626" ]
0.75892
0
FetchRemoteImage indicates an expected call of FetchRemoteImage
func (mr *MockFetcherMockRecorder) FetchRemoteImage(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchRemoteImage", reflect.TypeOf((*MockFetcher)(nil).FetchRemoteImage), arg0) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockFetcher) FetchRemoteImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchRemoteImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestFetch(t *testing.T) {\n\timage := \"rkt-inspect-fetch.aci\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// Fetch the image for the first time, this should write the image to the\n\t// on-disk store.\n\toldHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file\"}, t, ctx)\n\n\t// Fetch the image with the same name but different content, the expecting\n\t// result is that we should get a different hash since we are not fetching\n\t// from the on-disk store.\n\tnewHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file --write-file\"}, t, ctx)\n\n\tif oldHash == newHash {\n\t\tt.Fatalf(\"ACI hash should be different as the image has changed\")\n\t}\n}", "func TestImplicitFetch(t *testing.T) {\n\tfoundMsg := \"found image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// 1. Fetch the image.\n\t// TODO(yifan): Add other ACI with different schemes.\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:ubuntu-12.04\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 2. Try run/prepare with/without tag ':latest', should not get $foundMsg.\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err == nil {\n\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func (f *Frontend) fetchImage(i *img.Image) (*img.Image, error) {\n\tvar err error\n\n\t// go through image proxy to resize and cache the image\n\tkey := hmacKey(i.ID)\n\tu := fmt.Sprintf(\"%v/image/225x,s%v/%v\", f.Host, key, i.ID)\n\tfmt.Println(u)\n\n\tresp, err := f.Images.Client.Get(u)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbdy, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\ti.Base64 = base64.StdEncoding.EncodeToString(bdy)\n\treturn i, err\n}", "func (f *fetcher) fetchImage(img string, asc string, discover bool) (string, error) {\n\tif f.withDeps && !discover {\n\t\treturn \"\", fmt.Errorf(\"cannot fetch image's dependencies with discovery disabled\")\n\t}\n\thash, err := f.fetchSingleImage(img, asc, discover)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif f.withDeps {\n\t\terr = f.fetchImageDeps(hash)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn hash, nil\n}", "func pullMissingImage(ctx context.Context, apiClient client.CommonAPIClient, image string, force bool) error {\n\tif !force {\n\t\t_, inspectError := apiClient.ImageInspect(ctx, image)\n\t\tif inspectError == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif err, ok := inspectError.(client.RespError); !ok {\n\t\t\treturn inspectError\n\t\t} else if err.Code() != http.StatusNotFound {\n\t\t\treturn inspectError\n\t\t}\n\t}\n\n\tnamedRef, err := reference.Parse(image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnamedRef = reference.TrimTagForDigest(reference.WithDefaultTagIfMissing(namedRef))\n\n\tvar name, tag string\n\tif reference.IsNameTagged(namedRef) {\n\t\tname, tag = namedRef.Name(), namedRef.(reference.Tagged).Tag()\n\t} else {\n\t\tname = namedRef.String()\n\t}\n\n\tresponseBody, err := apiClient.ImagePull(ctx, name, tag, fetchRegistryAuth(namedRef.Name()))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to pull image: %v\", err)\n\t}\n\tdefer responseBody.Close()\n\n\treturn showProgress(responseBody)\n}", "func getRemoteImage(url string) ([]byte, error) {\n\tvar image []byte\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn image, err\n\t}\n\tdefer resp.Body.Close()\n\n\timage, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn image, err\n\t}\n\n\treturn image, nil\n}", "func (f OriginalFetcher) Fetch(namespace string, sourceURL string, imageHash string) (info *info.ImageProperties, downloaded bool, err error) {\n\tif sourceURL == \"\" && imageHash == \"\" {\n\t\treturn nil, false, fmt.Errorf(\"Missing Hash & URL\")\n\t}\n\n\tif imageHash != \"\" {\n\t\tinfo, err = f.fetchFromStore(namespace, imageHash)\n\t}\n\n\tif sourceURL != \"\" && (err != nil || imageHash == \"\") {\n\t\tinfo, downloaded, err = f.fetchFromSource(namespace, sourceURL)\n\t}\n\n\treturn info, downloaded, err\n}", "func (suite *APIImageInspectSuite) TestImageInspectOk(c *check.C) {\n\tvar (\n\t\trepo = environment.BusyboxRepo\n\t\ttag = \"1.24\"\n\n\t\tid = \"sha256:ca3d7d608b8a8bbaaac2c350bd0f9588cce0509ada74108d5c4b2afb24c46125\"\n\t\tdig = \"sha256:840f2b98a2540ff1d265782c42543dbec7218d3ab0e73b296d7dac846f146e27\"\n\t)\n\n\trepoTag := fmt.Sprintf(\"%s:%s\", repo, tag)\n\trepoDigest := fmt.Sprintf(\"%s@%s\", repo, dig)\n\n\tfor _, image := range []string{\n\t\tid,\n\t\trepoTag,\n\t\trepoDigest,\n\t\tfmt.Sprintf(\"%s:whatever@%s\", repo, dig),\n\t} {\n\t\tresp, err := request.Get(\"/images/\" + image + \"/json\")\n\t\tc.Assert(err, check.IsNil)\n\t\tCheckRespStatus(c, resp, 200)\n\n\t\tgot := types.ImageInfo{}\n\t\terr = request.DecodeBody(&got, resp.Body)\n\t\tc.Assert(err, check.IsNil)\n\n\t\t// TODO: More specific check is needed\n\t\tc.Assert(got.Config, check.NotNil)\n\t\tc.Assert(got.ID, check.Equals, id)\n\t\tc.Assert(got.CreatedAt, check.NotNil)\n\t\tc.Assert(got.Size, check.NotNil)\n\t\tc.Assert(reflect.DeepEqual(got.RepoTags, []string{repoTag}), check.Equals, true)\n\t\tc.Assert(reflect.DeepEqual(got.RepoDigests, []string{repoDigest}), check.Equals, true)\n\t}\n}", "func (f *fetcher) fetchImageFrom(appName string, aciURL, ascURL, scheme string, ascFile *os.File, latest bool) (string, error) {\n\tvar rem *store.Remote\n\n\tif f.insecureSkipVerify {\n\t\tif f.ks != nil {\n\t\t\tstderr(\"rkt: warning: TLS verification and signature verification has been disabled\")\n\t\t}\n\t} else if scheme == \"docker\" {\n\t\treturn \"\", fmt.Errorf(\"signature verification for docker images is not supported (try --insecure-skip-verify)\")\n\t}\n\n\tif (f.local && scheme != \"file\") || (scheme != \"file\" && !latest) {\n\t\tvar err error\n\t\tok := false\n\t\trem, ok, err = f.s.GetRemote(aciURL)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif ok {\n\t\t\tif f.local {\n\t\t\t\tstderr(\"rkt: using image in local store for app %s\", appName)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t\tif useCached(rem.DownloadTime, rem.CacheMaxAge) {\n\t\t\t\tstderr(\"rkt: found image in local store, skipping fetching from %s\", aciURL)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t}\n\t\tif f.local {\n\t\t\treturn \"\", fmt.Errorf(\"url %s not available in local store\", aciURL)\n\t\t}\n\t}\n\n\tif scheme != \"file\" && f.debug {\n\t\tstderr(\"rkt: fetching image from %s\", aciURL)\n\t}\n\n\tvar etag string\n\tif rem != nil {\n\t\tetag = rem.ETag\n\t}\n\tentity, aciFile, cd, err := f.fetch(appName, aciURL, ascURL, ascFile, etag)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif cd != nil && cd.useCached {\n\t\tif rem != nil {\n\t\t\treturn rem.BlobKey, nil\n\t\t} else {\n\t\t\t// should never happen\n\t\t\tpanic(\"asked to use cached image but remote is nil\")\n\t\t}\n\t}\n\tif scheme != \"file\" {\n\t\tdefer os.Remove(aciFile.Name())\n\t}\n\n\tif entity != nil && !f.insecureSkipVerify {\n\t\tstderr(\"rkt: signature verified:\")\n\t\tfor _, v := range entity.Identities {\n\t\t\tstderr(\" %s\", v.Name)\n\t\t}\n\t}\n\tkey, err := f.s.WriteACI(aciFile, latest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif scheme != \"file\" {\n\t\trem := store.NewRemote(aciURL, ascURL)\n\t\trem.BlobKey = key\n\t\trem.DownloadTime = time.Now()\n\t\tif cd != nil {\n\t\t\trem.ETag = cd.etag\n\t\t\trem.CacheMaxAge = cd.maxAge\n\t\t}\n\t\terr = f.s.WriteRemote(rem)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn key, nil\n}", "func (v *Virt) ImageRemoteDigest(ctx context.Context, image string) (digest string, err error) {\n\treturn\n}", "func TestCannotExecuteStatusImage(t *testing.T) {\n\tbuf := setLogBuffer()\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tt.Log(buf.String())\n\t\t}\n\t}()\n\n\tif StatusImage == \"\" {\n\t\tt.Skip(\"no status image defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tapp := &models.App{Name: id.New().String()}\n\tapp = ensureApp(t, app)\n\n\tfn := &models.Fn{\n\t\tAppID: app.ID,\n\t\tName: id.New().String(),\n\t\tImage: StatusImage,\n\t\tResourceConfig: models.ResourceConfig{\n\t\t\tMemory: memory,\n\t\t},\n\t}\n\tfn = ensureFn(t, fn)\n\n\tlb, err := LB()\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: lb,\n\t}\n\tu.Path = path.Join(u.Path, \"invoke\", fn.ID)\n\n\tcontent := bytes.NewBuffer([]byte(`status`))\n\toutput := &bytes.Buffer{}\n\n\tresp, err := callFN(ctx, u.String(), content, output, models.TypeSync)\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\n\tif resp.StatusCode != http.StatusBadRequest {\n\t\tt.Fatalf(\"StatusCode check failed on %v\", resp.StatusCode)\n\t}\n}", "func (c *dockerClientMock) GetRemoteImageInfo(imageSoure string) (v1.Image, error) {\n\treturn &fake.FakeImage{}, nil\n}", "func (f *SourceFetcher) Fetch(url string, namespace string) (*info.ImageProperties, bool, error) {\n\tc := make(chan FetchResult)\n\tgo f.uniqueFetchSource(c, url, namespace)\n\tr := <-c\n\treturn r.ImageDetails, r.Downloaded, r.Error\n}", "func Test_GetImageFromUrl_badUrl(t *testing.T) {\n\tb, err := GetImageFromUrl(\"some-bad-url\")\n\n\tassert.Equal(t, `Error getting image: Get some-bad-url: unsupported protocol scheme \"\"`, err.Error())\n\tassert.Equal(t, []byte(nil), b)\n}", "func ttrPullImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\terr := ttrPullImage(ctx, client, image)\n\tassert.NilError(t, err)\n}", "func PullImage(image, cacheDir string) (v1.Image, error) {\n var options []crane.Option\n\n // options = append(options, crane.Insecure)\n\n // Use current built OS and architecture\n options = append(options, crane.WithPlatform(&v1.Platform{\n OS: runtime.GOOS,\n Architecture: runtime.GOARCH,\n }))\n\n // Grab the remote manifest\n manifest, err := crane.Manifest(image, options...)\n if err != nil {\n return nil, fmt.Errorf(\"failed fetching manifest for %s: %v\", image, err)\n }\n\n if !gjson.Valid(string(manifest)) {\n return nil, fmt.Errorf(\"Cannot parse manifest: %s\", string(manifest))\n }\n\n value := gjson.Get(string(manifest), \"config.digest\").Value().(string)\n if value == \"\" {\n return nil, fmt.Errorf(\"Malformed manifest: %s\", string(manifest))\n }\n \n digest := strings.Split(value, \":\")[1]\n tarball := fmt.Sprintf(\"%s/%s.tar.gz\", cacheDir, digest)\n\n // Download the tarball of the image if not available in the cache\n if _, err := os.Stat(tarball); os.IsNotExist(err) {\n // Create the cacheDir if it does not already exist\n if cacheDir != \"\" {\n if _, err := os.Stat(cacheDir); os.IsNotExist(err) {\n os.MkdirAll(cacheDir, os.ModePerm)\n }\n }\n \n // Pull the image\n img, err := crane.Pull(image, options...)\n if err != nil {\n return nil, fmt.Errorf(\"Could not pull image: %s\", err)\n }\n \n f, err := os.Create(tarball)\n if err != nil {\n return nil, fmt.Errorf(\"Failed to open %s: %v\", tarball, err)\n }\n \n defer f.Close()\n \n err = crane.Save(img, image, tarball)\n if err != nil {\n return nil, fmt.Errorf(\"Could not save image: %s\", err)\n }\n }\n\n img, err := crane.Load(tarball)\n if err != nil {\n return nil, fmt.Errorf(\"Could not load image: %s\", err)\n }\n\n return img, nil\n}", "func Fetch(imageURI string, labels map[types.ACIdentifier]string, insecure bool) (tempfile.ReadSeekCloser, error) {\n\tu, err := url.Parse(imageURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar puller remote.Puller\n\n\tswitch u.Scheme {\n\tcase \"file\":\n\t\tfilename := u.Path\n\t\tif u.Host != \"\" {\n\t\t\tfilename = filepath.Join(u.Host, u.Path)\n\t\t}\n\t\tf, err := os.Open(filename)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn tempfile.New(f)\n\tcase \"http\", \"https\":\n\t\tpuller = http.New()\n\tcase \"docker\":\n\t\tpuller = docker.New(insecure)\n\tcase \"aci\", \"\":\n\t\tpuller = aci.New(insecure, labels)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"%q scheme not supported\", u.Scheme)\n\t}\n\n\tr, err := puller.Pull(imageURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tempfile.New(r)\n}", "func checkImage(url string) bool {\n\treq, err := http.NewRequest(\"HEAD\", url, nil)\n\tif err != nil {\n\t\treturn false\n\t}\n\tvar client http.Client\n\tresp, err := client.Do(req)\n\tif err != nil || len(resp.Header[\"Content-Length\"]) == 0 || len(resp.Header[\"Content-Type\"]) == 0 {\n\t\treturn false\n\t}\n\tdefer resp.Body.Close()\n\tmb, _ := strconv.Atoi(resp.Header[\"Content-Length\"][0])\n\tif mb > 10*1024*1024 {\n\t\treturn false\n\t}\n\tif !strings.HasPrefix(resp.Header[\"Content-Type\"][0], \"image\") {\n\t\treturn false\n\t}\n\treturn true\n}", "func (m *MockFetcher) FetchLocalImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchLocalImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (f *fetcher) fetchSingleImage(img string, asc string, discover bool) (string, error) {\n\tvar (\n\t\tascFile *os.File\n\t\terr error\n\t\tlatest bool\n\t)\n\tif asc != \"\" && f.ks != nil {\n\t\tascFile, err = os.Open(asc)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to open signature file: %v\", err)\n\t\t}\n\t\tdefer ascFile.Close()\n\t}\n\n\tu, err := url.Parse(img)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"not a valid image reference (%s)\", img)\n\t}\n\n\t// if img refers to a local file, ensure the scheme is file:// and make the url path absolute\n\t_, err = os.Stat(u.Path)\n\tif err == nil {\n\t\tu.Path, err = filepath.Abs(u.Path)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to get abs path: %v\", err)\n\t\t}\n\t\tu.Scheme = \"file\"\n\t} else if !os.IsNotExist(err) {\n\t\treturn \"\", fmt.Errorf(\"unable to access %q: %v\", img, err)\n\t}\n\n\tif discover && u.Scheme == \"\" {\n\t\tif app := newDiscoveryApp(img); app != nil {\n\t\t\tvar discoveryError error\n\t\t\tif !f.local {\n\t\t\t\tstderr(\"rkt: searching for app image %s\", img)\n\t\t\t\tep, err := discoverApp(app, true)\n\t\t\t\tif err != nil {\n\t\t\t\t\tdiscoveryError = err\n\t\t\t\t} else {\n\t\t\t\t\t// No specified version label, mark it as latest\n\t\t\t\t\tif _, ok := app.Labels[\"version\"]; !ok {\n\t\t\t\t\t\tlatest = true\n\t\t\t\t\t}\n\t\t\t\t\treturn f.fetchImageFromEndpoints(app.Name.String(), ep, ascFile, latest)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif discoveryError != nil {\n\t\t\t\tstderr(\"discovery failed for %q: %v. Trying to find image in the store.\", img, discoveryError)\n\t\t\t}\n\t\t\tif f.local || discoveryError != nil {\n\t\t\t\treturn f.fetchImageFromStore(img)\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch u.Scheme {\n\tcase \"http\", \"https\", \"file\":\n\tcase \"docker\":\n\t\tdockerURL := common.ParseDockerURL(path.Join(u.Host, u.Path))\n\t\tif dockerURL.Tag == \"latest\" {\n\t\t\tlatest = true\n\t\t}\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"rkt only supports http, https, docker or file URLs (%s)\", img)\n\t}\n\treturn f.fetchImageFromURL(u.String(), u.Scheme, ascFile, latest)\n}", "func (b *ecrBase) runGetImage(ctx context.Context, batchGetImageInput ecr.BatchGetImageInput) (*ecr.Image, error) {\n\t// Allow only a single image to be fetched at a time.\n\tif len(batchGetImageInput.ImageIds) != 1 {\n\t\treturn nil, errGetImageUnhandled\n\t}\n\n\tbatchGetImageInput.RegistryId = aws.String(b.ecrSpec.Registry())\n\tbatchGetImageInput.RepositoryName = aws.String(b.ecrSpec.Repository)\n\n\tlog.G(ctx).WithField(\"batchGetImageInput\", batchGetImageInput).Trace(\"ecr.base.image: requesting images\")\n\n\tbatchGetImageOutput, err := b.client.BatchGetImageWithContext(ctx, &batchGetImageInput)\n\tif err != nil {\n\t\tlog.G(ctx).WithError(err).Error(\"ecr.base.image: failed to get image\")\n\t\treturn nil, err\n\t}\n\tlog.G(ctx).WithField(\"batchGetImageOutput\", batchGetImageOutput).Trace(\"ecr.base.image: api response\")\n\n\t// Summarize image request failures for handled errors. Only the first\n\t// failure is checked as only a single ImageIdentifier is allowed to be\n\t// queried for.\n\tif len(batchGetImageOutput.Failures) > 0 {\n\t\tfailure := batchGetImageOutput.Failures[0]\n\t\tswitch aws.StringValue(failure.FailureCode) {\n\t\t// Requested image with a corresponding tag and digest does not exist.\n\t\t// This failure will generally occur when pushing an updated (or new)\n\t\t// image with a tag.\n\t\tcase ecr.ImageFailureCodeImageTagDoesNotMatchDigest:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Debug(\"ecr.base.image: no matching image with specified digest\")\n\t\t\treturn nil, errImageNotFound\n\t\t// Requested image doesn't resolve to a known image. A new image will\n\t\t// result in an ImageNotFound error when checked before push.\n\t\tcase ecr.ImageFailureCodeImageNotFound:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Debug(\"ecr.base.image: no image found\")\n\t\t\treturn nil, errImageNotFound\n\t\t// Requested image identifiers are invalid.\n\t\tcase ecr.ImageFailureCodeInvalidImageDigest, ecr.ImageFailureCodeInvalidImageTag:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Error(\"ecr.base.image: invalid image identifier\")\n\t\t\treturn nil, reference.ErrInvalid\n\t\t// Unhandled failure reported for image request made.\n\t\tdefault:\n\t\t\tlog.G(ctx).WithField(\"failure\", failure).Warn(\"ecr.base.image: unhandled image request failure\")\n\t\t\treturn nil, errGetImageUnhandled\n\t\t}\n\t}\n\n\treturn batchGetImageOutput.Images[0], nil\n}", "func (suite *APIImageInspectSuite) TestImageInspectNotFound(c *check.C) {\n\tresp, err := request.Get(\"/images/\" + \"TestImageInspectNotFound\" + \"/json\")\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, resp, 404)\n}", "func (mr *MockFetcherMockRecorder) FetchLocalImage(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchLocalImage\", reflect.TypeOf((*MockFetcher)(nil).FetchLocalImage), arg0)\n}", "func (mr *MockFetcherMockRecorder) FetchUpdatedLocalImage(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchUpdatedLocalImage\", reflect.TypeOf((*MockFetcher)(nil).FetchUpdatedLocalImage), arg0, arg1, arg2)\n}", "func LookupMyImage(connConfig string, myImageId string) (SpiderMyImageInfo, error) {\n\n\tif connConfig == \"\" {\n\t\terr := fmt.Errorf(\"LookupMyImage() called with empty connConfig.\")\n\t\tcommon.CBLog.Error(err)\n\t\treturn SpiderMyImageInfo{}, err\n\t} else if myImageId == \"\" {\n\t\terr := fmt.Errorf(\"LookupMyImage() called with empty myImageId.\")\n\t\tcommon.CBLog.Error(err)\n\t\treturn SpiderMyImageInfo{}, err\n\t}\n\n\turl := common.SpiderRestUrl + \"/myimage/\" + url.QueryEscape(myImageId)\n\n\t// Create Req body\n\ttempReq := common.SpiderConnectionName{}\n\ttempReq.ConnectionName = connConfig\n\n\tclient := resty.New().SetCloseConnection(true)\n\tclient.SetAllowGetMethodPayload(true)\n\n\tresp, err := client.R().\n\t\tSetHeader(\"Content-Type\", \"application/json\").\n\t\tSetBody(tempReq).\n\t\tSetResult(&SpiderMyImageInfo{}). // or SetResult(AuthSuccess{}).\n\t\t//SetError(&AuthError{}). // or SetError(AuthError{}).\n\t\tGet(url)\n\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\terr := fmt.Errorf(\"an error occurred while requesting to CB-Spider\")\n\t\treturn SpiderMyImageInfo{}, err\n\t}\n\n\tfmt.Println(string(resp.Body()))\n\n\tfmt.Println(\"HTTP Status code: \" + strconv.Itoa(resp.StatusCode()))\n\tswitch {\n\tcase resp.StatusCode() >= 400 || resp.StatusCode() < 200:\n\t\terr := fmt.Errorf(string(resp.Body()))\n\t\tcommon.CBLog.Error(err)\n\t\treturn SpiderMyImageInfo{}, err\n\t}\n\n\ttemp := resp.Result().(*SpiderMyImageInfo)\n\treturn *temp, nil\n\n}", "func inspectImage(appCfg config.App) {\n\timage, err := serviceRuntime.InspectImage(appCfg.Version())\n\tif err != nil {\n\t\tlog.Println(\"error inspecting image\", appCfg.Version())\n\t\treturn\n\t}\n\n\tif utils.StripSHA(image.ID) != appCfg.VersionID() {\n\t\tlog.Printf(\"warning: %s image ID does not match config\", appCfg.Name())\n\t}\n}", "func inspectImage(appCfg config.App) {\n\timage, err := serviceRuntime.InspectImage(appCfg.Version())\n\tif err != nil {\n\t\tlog.Println(\"error inspecting image\", appCfg.Version())\n\t\treturn\n\t}\n\n\tif utils.StripSHA(image.ID) != appCfg.VersionID() {\n\t\tlog.Printf(\"warning: %s image ID does not match config\", appCfg.Name())\n\t}\n}", "func TestRemote(t *testing.T) {\n\trnd, err := random.Image(1024, 3)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ts, err := registry.TLS(\"gcr.io\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttr := s.Client().Transport\n\n\tsrc := \"gcr.io/test/compressed\"\n\tref, err := name.ParseReference(src)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := remote.Write(ref, rnd, remote.WithTransport(tr)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\timg, err := remote.Image(ref, remote.WithTransport(tr))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := validate.Image(img); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcf, err := img.ConfigFile()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tm, err := img.Manifest()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tlayer, err := img.LayerByDiffID(cf.RootFS.DiffIDs[0])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\td, err := layer.Digest()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif diff := cmp.Diff(d, m.Layers[0].Digest); diff != \"\" {\n\t\tt.Errorf(\"mismatched digest: %v\", diff)\n\t}\n}", "func FetchRemoteFile() {\n\n}", "func (c *dockerClientMock) DownloadImage(imageSource, filePath string) (v1.Image, error) {\n\treturn &fake.FakeImage{}, nil // fmt.Errorf(\"%s\", filePath)\n}", "func (s *DockerHubPullSuite) TestPullNonExistingImage(c *check.C) {\n\ttestRequires(c, DaemonIsLinux)\n\tfor _, e := range []struct {\n\t\tImage string\n\t\tAlias string\n\t}{\n\t\t{\"library/asdfasdf:foobar\", \"asdfasdf:foobar\"},\n\t\t{\"library/asdfasdf:foobar\", \"library/asdfasdf:foobar\"},\n\t\t{\"library/asdfasdf:latest\", \"asdfasdf\"},\n\t\t{\"library/asdfasdf:latest\", \"asdfasdf:latest\"},\n\t\t{\"library/asdfasdf:latest\", \"library/asdfasdf\"},\n\t\t{\"library/asdfasdf:latest\", \"library/asdfasdf:latest\"},\n\t} {\n\t\tout, err := s.CmdWithError(\"pull\", e.Alias)\n\t\tc.Assert(err, checker.NotNil, check.Commentf(\"expected non-zero exit status when pulling non-existing image: %s\", out))\n\t\tc.Assert(out, checker.Contains, fmt.Sprintf(\"Error: image %s not found\", e.Image), check.Commentf(\"expected image not found error messages\"))\n\t}\n}", "func (imp *Importer) fetchImages() {\n err := downloadImages(\n imp.idPath,\n func(id string, bodyRdr io.Reader) error {\n img, err := jpeg.Decode(bodyRdr)\n if err == nil {\n imp.send(&imagedata.ImageData{Id: id, Data: &img})\n } else {\n log.Printf(\"Error decoding image %s to jpeg\\n\", id)\n }\n return nil\n },\n )\n\n if err != nil { imp.sendErr(err) }\n}", "func PullImage(c *check.C, image string) {\n\tresp, err := request.Get(\"/images/\" + image + \"/json\")\n\tc.Assert(err, check.IsNil)\n\n\tif resp.StatusCode == http.StatusOK {\n\t\tresp.Body.Close()\n\t\treturn\n\t}\n\n\tq := url.Values{}\n\tq.Add(\"fromImage\", image)\n\tresp, err = request.Post(\"/images/create\", request.WithQuery(q))\n\tc.Assert(err, check.IsNil)\n\tc.Assert(resp.StatusCode, check.Equals, 200)\n\n\tdefer resp.Body.Close()\n\tc.Assert(fetchPullStatus(resp.Body), check.IsNil)\n}", "func FetchComicImg(url, path string) error {\n\tpath, err := expand(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpath, err = filepath.Abs(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tout, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer out.Close()\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\t_, err = io.Copy(out, resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Client) DetectImpurityImage(r io.Reader) (ImageData, error) {\n\tdata := ImageData{}\n\tvar b bytes.Buffer\n\tw := multipart.NewWriter(&b)\n\tdefer w.Close()\n\n\tfw, err := w.CreateFormFile(\"file_image\", \"picpurify\")\n\tif err != nil {\n\t\treturn data, err\n\t}\n\tif _, err = io.Copy(fw, r); err != nil {\n\t\treturn data, err\n\t}\n\n\tfw, err = w.CreateFormField(\"API_KEY\")\n\tif err != nil {\n\t\treturn data, err\n\t}\n\tif _, err = fw.Write([]byte(c.Key)); err != nil {\n\t\treturn data, err\n\t}\n\n\tif fw, err = w.CreateFormField(\"task\"); err != nil {\n\t\treturn data, err\n\t}\n\tif _, err = fw.Write([]byte(c.Tasks)); err != nil {\n\t\treturn data, err\n\t}\n\n\treq, err := http.NewRequest(http.MethodPost, c.ImageURL, &b)\n\tif err != nil {\n\t\treturn data, errors.Wrap(err, \"picpurify client: creating request\")\n\t}\n\treq.ContentLength = int64(len(b.Bytes()))\n\treq.Header.Set(\"Content-Type\", w.FormDataContentType())\n\n\tvar bytes []byte\n\tresp, err := c.Client.Do(req)\n\tif err != nil {\n\t\treturn data, errors.Wrap(err, \"picpurify client: executing request\")\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn data, errors.Errorf(\"got unexpected http code: %d\", resp.StatusCode)\n\t}\n\n\tbytes, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn data, errors.Wrap(err, \"picpurify client: reading response body\")\n\t}\n\n\tif err != nil {\n\t\treturn data, errors.New(\"retrieving data from picpurify\")\n\t}\n\terr = json.Unmarshal(bytes, &data)\n\treturn data, err\n}", "func (c *Client) GetRemoteImageInfo(imageSource string) (v1.Image, error) {\n\tref, err := c.getImageRef(imageSource)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"parsing image reference\")\n\t}\n\n\treturn remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))\n}", "func TestExpectedImgRef(t *testing.T) {\n\n\tv, isSet := os.LookupEnv(\"DOCKERHUB_PROXY\")\n\tif isSet {\n\t\tdefer os.Setenv(\"DOCKERHUB_PROXY\", v)\n\t}\n\n\tos.Unsetenv(\"DOCKERHUB_PROXY\")\n\tassert.Equal(t,\n\t\t\"index.docker.io/library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\",\n\t\tCompleteImageRef(\"library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\"))\n\n\tos.Setenv(\"DOCKERHUB_PROXY\", \"my-dockerhub-proxy.tld/dockerhub-proxy\")\n\tassert.Equal(t,\n\t\t\"my-dockerhub-proxy.tld/dockerhub-proxy/library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\",\n\t\tCompleteImageRef(\"library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\"))\n\tos.Unsetenv(\"DOCKERHUB_PROXY\")\n}", "func testDownloadImages(ctx context.Context, t *testing.T, downloadCh chan<- downloadRequest, addr, ccvmDir string) {\n\twkld := &workload{\n\t\tspec: workloadSpec{\n\t\t\tBaseImageURL: \"http://\" + addr + \"/download/image\",\n\t\t\tBIOS: \"http://\" + addr + \"/download/bios\",\n\t\t},\n\t}\n\n\tresultCh := make(chan interface{})\n\tgo func() {\n\t\timg, bios, err := downloadImages(ctx, wkld, http.DefaultTransport.(*http.Transport),\n\t\t\tresultCh, downloadCh)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to download images: %v\", err)\n\t\t}\n\t\tif len(img) == 0 || len(bios) == 0 {\n\t\t\tt.Errorf(\"One the paths is empty img=%s bios=%s\", img, bios)\n\t\t}\n\t\tclose(resultCh)\n\t}()\n\n\tfor range resultCh {\n\t}\n\n\twkld.spec.BIOS = \"ftp://\" + addr + \"/download/bios\"\n\tresultCh = make(chan interface{})\n\tgo func() {\n\t\t_, _, err := downloadImages(ctx, wkld, http.DefaultTransport.(*http.Transport),\n\t\t\tresultCh, downloadCh)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected downloadImages with bad BIOS URL to fail\")\n\t\t}\n\t\tclose(resultCh)\n\t}()\n\n\tfor range resultCh {\n\t}\n}", "func (m *MockFetcher) FetchUpdatedLocalImage(arg0 context.Context, arg1 string, arg2 io.Writer) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchUpdatedLocalImage\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestIsImageInLocalRegistry(t *testing.T) {\n\ttype testDef struct {\n\t\timageName string\n\t\tdocker test.FakeDockerClient\n\t\texpectedResult bool\n\t\texpectedError string\n\t}\n\ttests := map[string]testDef{\n\t\t\"ImageFound\": {\"a_test_image\", test.FakeDockerClient{}, true, \"\"},\n\t\t\"ImageNotFound\": {\"a_test_image:sometag\", test.FakeDockerClient{}, false, \"unable to get metadata for a_test_image:sometag\"},\n\t}\n\n\tfor test, def := range tests {\n\t\tdh := getDocker(&def.docker)\n\t\tfake := dh.kubeDockerClient.(*dockertools.FakeDockerClient)\n\t\tif def.expectedResult {\n\t\t\tfake.Image = &dockertypes.ImageInspect{ID: def.imageName}\n\t\t}\n\n\t\tresult, err := dh.IsImageInLocalRegistry(def.imageName)\n\n\t\tif e := fake.AssertCalls([]string{\"inspect_image\"}); e != nil {\n\t\t\tt.Errorf(\"%+v\", e)\n\t\t}\n\n\t\tif result != def.expectedResult {\n\t\t\tt.Errorf(\"Test - %s: Expected result: %v. Got: %v\", test, def.expectedResult, result)\n\t\t}\n\t\tif err != nil && len(def.expectedError) > 0 && !strings.Contains(err.Error(), def.expectedError) {\n\t\t\tt.Errorf(\"Test - %s: Expected error: Got: %+v\", test, err)\n\t\t}\n\t}\n}", "func (cli *FakeDatabaseClient) FetchServiceImageMetaData(ctx context.Context, in *dbdpb.FetchServiceImageMetaDataRequest, opts ...grpc.CallOption) (*dbdpb.FetchServiceImageMetaDataResponse, error) {\n\tatomic.AddInt32(&cli.fetchServiceImageMetaDataCnt, 1)\n\tif cli.methodToResp == nil {\n\t\treturn nil, nil\n\t}\n\tmethod := \"FetchServiceImageMetaData\"\n\tif resp, ok := cli.methodToResp[method]; ok {\n\t\treturn resp.(*dbdpb.FetchServiceImageMetaDataResponse), nil\n\t}\n\treturn nil, nil\n\n}", "func (c *APODClient) FetchImageURLs(count int) ([]string, error) {\n\tvar urls []string\n\tdate := time.Now()\n\n\t// make the request\n\tfor i := 0; len(urls) < count; i++ {\n\t\tdate = date.AddDate(0, 0, -i)\n\t\tresp, err := http.Get(c.buildURL(date))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error fetching data from APOD API\")\n\t\t}\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\treturn nil, fmt.Errorf(\"Received non-200 status code %d\", resp.StatusCode)\n\t\t}\n\n\t\t// parse the response\n\t\tvar imageMeta APODImageMeta\n\t\tif err := json.NewDecoder(resp.Body).Decode(&imageMeta); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error parsing API response\")\n\t\t}\n\n\t\tif imageMeta.MediaType != APODTypeImage {\n\t\t\t// we only want images\n\t\t\tcontinue\n\t\t}\n\t\turls = append(urls, imageMeta.URL)\n\t}\n\n\treturn urls, nil\n}", "func waitForImage(ctx context.Context, cl client.Client, timeout time.Duration, ns, key, val, container, expected string) error {\n\tpods := &corev1.PodList{}\n\terr := wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {\n\t\tif err := cl.List(ctx, pods, client.MatchingLabels{key: val}, client.InNamespace(ns)); err != nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tfor _, p := range pods.Items {\n\t\t\tfor _, c := range p.Spec.Containers {\n\t\t\t\tif c.Name == container && c.Image != expected {\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *IloClient) GetRemoteImageStatusDell() (ImageStatusDell, error) {\n\turl := c.Hostname + \"/redfish/v1/Managers/iDRAC.Embedded.1/VirtualMedia/CD\"\n\n\tresp, _, _, err := queryData(c, \"GET\", url, nil)\n\tif err != nil {\n\t\treturn ImageStatusDell{}, err\n\t}\n\n\tvar x ImageStatusDell\n\n\tjson.Unmarshal(resp, &x)\n\n\treturn x, nil\n}", "func ImagePull(taggedName string, done chan bool, failed chan error) {\n\timg, err := imageExistsByName(taggedName)\n\tif err != nil {\n\t\tfailed <- err\n\t\treturn\n\t}\n\tif !img {\n\t\tif r, err := client.ImagePull(context.Background(), taggedName, types.ImagePullOptions{}); err != nil {\n\t\t\tfailed <- err\n\t\t\treturn\n\t\t} else {\n\t\t\tdefer r.Close()\n\t\t}\n\t\tcommon.CliFeedbackNotifier.Progress(\"cli.docker.download\", \"Downloading installer image\")\n\t\tfor {\n\t\t\tcommon.Logger.Printf(common.LOG_WAITING_DOWNLOAD)\n\t\t\ttime.Sleep(1000 * time.Millisecond)\n\t\t\timg, err := imageExistsByName(taggedName)\n\t\t\tif err != nil {\n\t\t\t\tfailed <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif img {\n\t\t\t\tcommon.Logger.Printf(common.LOG_DOWNLOAD_COMPLETED)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tdone <- true\n}", "func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Image, retErr error) {\n\tpullCtx := defaultRemoteContext()\n\tfor _, o := range opts {\n\t\tif err := o(c, pullCtx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif pullCtx.PlatformMatcher == nil {\n\t\tif len(pullCtx.Platforms) > 1 {\n\t\t\treturn nil, errors.New(\"cannot pull multiplatform image locally, try Fetch\")\n\t\t} else if len(pullCtx.Platforms) == 0 {\n\t\t\tpullCtx.PlatformMatcher = c.platform\n\t\t} else {\n\t\t\tp, err := platforms.Parse(pullCtx.Platforms[0])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid platform %s: %w\", pullCtx.Platforms[0], err)\n\t\t\t}\n\n\t\t\tpullCtx.PlatformMatcher = platforms.Only(p)\n\t\t}\n\t}\n\n\tctx, done, err := c.WithLease(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer done(ctx)\n\n\tvar unpacks int32\n\tvar unpackEg *errgroup.Group\n\tvar unpackWrapper func(f images.Handler) images.Handler\n\n\tif pullCtx.Unpack {\n\t\t// unpacker only supports schema 2 image, for schema 1 this is noop.\n\t\tu, err := c.newUnpacker(ctx, pullCtx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"create unpacker: %w\", err)\n\t\t}\n\t\tunpackWrapper, unpackEg = u.handlerWrapper(ctx, pullCtx, &unpacks)\n\t\tdefer func() {\n\t\t\tif err := unpackEg.Wait(); err != nil {\n\t\t\t\tif retErr == nil {\n\t\t\t\t\tretErr = fmt.Errorf(\"unpack: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\twrapper := pullCtx.HandlerWrapper\n\t\tpullCtx.HandlerWrapper = func(h images.Handler) images.Handler {\n\t\t\tif wrapper == nil {\n\t\t\t\treturn unpackWrapper(h)\n\t\t\t}\n\t\t\treturn unpackWrapper(wrapper(h))\n\t\t}\n\t}\n\n\timg, err := c.fetch(ctx, pullCtx, ref, 1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// NOTE(fuweid): unpacker defers blobs download. before create image\n\t// record in ImageService, should wait for unpacking(including blobs\n\t// download).\n\tif pullCtx.Unpack {\n\t\tif unpackEg != nil {\n\t\t\tif err := unpackEg.Wait(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\timg, err = c.createNewImage(ctx, img)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ti := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher)\n\n\tif pullCtx.Unpack {\n\t\tif unpacks == 0 {\n\t\t\t// Try to unpack is none is done previously.\n\t\t\t// This is at least required for schema 1 image.\n\t\t\tif err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to unpack image on snapshotter %s: %w\", pullCtx.Snapshotter, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn i, nil\n}", "func (c *repoCacheManager) fetchImages(tags []string) (fetchImagesResult, error) {\n\timages := map[string]image.Info{}\n\n\t// Create a list of images that need updating\n\tvar toUpdate []imageToUpdate\n\n\t// Counters for reporting what happened\n\tvar missing, refresh int\n\tfor _, tag := range tags {\n\t\tif tag == \"\" {\n\t\t\treturn fetchImagesResult{}, fmt.Errorf(\"empty tag in fetched tags\")\n\t\t}\n\n\t\t// See if we have the manifest already cached\n\t\tnewID := c.repoID.ToRef(tag)\n\t\tkey := NewManifestKey(newID.CanonicalRef())\n\t\tbytes, deadline, err := c.cacheClient.GetKey(key)\n\t\t// If err, then we don't have it yet. Update.\n\t\tswitch {\n\t\tcase err != nil: // by and large these are cache misses, but any error shall count as \"not found\"\n\t\t\tif err != ErrNotCached {\n\t\t\t\tc.logger.Log(\"warning\", \"error from cache\", \"err\", err, \"ref\", newID)\n\t\t\t}\n\t\t\tmissing++\n\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})\n\t\tcase len(bytes) == 0:\n\t\t\tc.logger.Log(\"warning\", \"empty result from cache\", \"ref\", newID)\n\t\t\tmissing++\n\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})\n\t\tdefault:\n\t\t\tvar entry registry.ImageEntry\n\t\t\tif err := json.Unmarshal(bytes, &entry); err == nil {\n\t\t\t\tif c.trace {\n\t\t\t\t\tc.logger.Log(\"trace\", \"found cached manifest\", \"ref\", newID, \"last_fetched\", entry.LastFetched.Format(time.RFC3339), \"deadline\", deadline.Format(time.RFC3339))\n\t\t\t\t}\n\n\t\t\t\tif entry.ExcludedReason == \"\" {\n\t\t\t\t\timages[tag] = entry.Info\n\t\t\t\t\tif c.now.After(deadline) {\n\t\t\t\t\t\tpreviousRefresh := minRefresh\n\t\t\t\t\t\tlastFetched := entry.Info.LastFetched\n\t\t\t\t\t\tif !lastFetched.IsZero() {\n\t\t\t\t\t\t\tpreviousRefresh = deadline.Sub(lastFetched)\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: previousRefresh, previousDigest: entry.Info.Digest})\n\t\t\t\t\t\trefresh++\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif c.trace {\n\t\t\t\t\t\tc.logger.Log(\"trace\", \"excluded in cache\", \"ref\", newID, \"reason\", entry.ExcludedReason)\n\t\t\t\t\t}\n\t\t\t\t\tif c.now.After(deadline) {\n\t\t\t\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: excludedRefresh})\n\t\t\t\t\t\trefresh++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tresult := fetchImagesResult{\n\t\timagesFound: images,\n\t\timagesToUpdate: toUpdate,\n\t\timagesToUpdateRefreshCount: refresh,\n\t\timagesToUpdateMissingCount: missing,\n\t}\n\n\treturn result, nil\n}", "func ttrPushImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\trc, err := client.ImagePush(ctx, ttrImageName(image), types.ImagePushOptions{RegistryAuth: \"{}\"})\n\tassert.Assert(t, err)\n\tif rc != nil {\n\t\tbody, err := readAllAndClose(rc)\n\t\tassert.Assert(t, err)\n\t\tassert.Assert(t, strings.Contains(body, `\"status\":\"latest: digest: `))\n\t}\n}", "func (i ImageFetcher) Fetch(path string) (image.Image, error) {\n\tresp, err := http.Get(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\treturn png.Decode(resp.Body)\n}", "func ensureNodeImages(status *cli.Status, image string) {\n\t// prints user friendly message\n\tif strings.Contains(image, \"@sha256:\") {\n\t\timage = strings.Split(image, \"@sha256:\")[0]\n\t}\n\tstatus.Start(fmt.Sprintf(\"Ensuring node image (%s) \", image))\n\n\tfmt.Println(\"pull image ...\")\n\t// attempt to explicitly pull the image if it doesn't exist locally\n\t// we don't care if this errors, we'll still try to run which also pulls\n\t_, _ = docker.PullIfNotPresent(image, 3)\n}", "func (p *Pvr) LoadRemoteImage(app *AppData) error {\n\n\tvar dockerManifest *schema2.Manifest\n\n\tapp.RemoteImage = DockerImage{\n\t\tExists: false,\n\t}\n\timage, err := registry.ParseImage(app.From)\n\tif err != nil {\n\t\treturn err\n\t}\n\tauth, err := p.AuthConfig(app.Username, app.Password, image.Domain)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdockerRegistry, err := p.GetDockerRegistry(image, auth)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar repoDigest string\n\tvar dockerPlatform string\n\tvar platforms []interface{}\n\n\tif app.Platform == \"\" {\n\t\tdockerJsonI, ok := p.PristineJsonMap[\"_hostconfig/pvr/docker.json\"]\n\n\t\tif ok {\n\t\t\tdockerJson := dockerJsonI.(map[string]interface{})\n\t\t\tplatformsI, ok := dockerJson[\"platforms\"]\n\t\t\tif ok {\n\t\t\t\tplatforms = platformsI.([]interface{})\n\t\t\t}\n\t\t}\n\t} else {\n\t\tplatforms = append(platforms, app.Platform)\n\t}\n\n\t// we go down the multiarch path if we have seen a platform\n\t// restriction in pvr-docker.json\n\tif platforms != nil {\n\t\tmanifestList, err := dockerRegistry.ManifestList(context.Background(),\n\t\t\timage.Path, image.Reference())\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, v := range manifestList.Manifests {\n\t\t\tfor _, v1 := range platforms {\n\t\t\t\tv1S := v1.(string)\n\t\t\t\tp := strings.SplitN(v1S, \"/\", 3)\n\t\t\t\tif v.Platform.Architecture == p[1] &&\n\t\t\t\t\t(len(p) < 3 || p[2] == v.Platform.Variant) {\n\t\t\t\t\trepoDigest = v.Digest.String()\n\t\t\t\t\tdockerPlatform = v1S\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif repoDigest != \"\" {\n\t\t\t\tdm, err := dockerRegistry.ManifestV2(context.Background(), image.Path, repoDigest)\n\t\t\t\tdockerManifest = &dm\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Found Manifest for platform %s\\n\",\n\t\t\t\t\tdockerPlatform)\n\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tdockerPlatform = \"\"\n\t\t\t}\n\t\t}\n\t}\n\n\tif dockerManifest == nil {\n\t\tdockerManifest, err = p.GetDockerManifest(image, auth)\n\t\tif err != nil {\n\t\t\tmanifestErr := ReportDockerManifestError(err, app.From)\n\t\t\tif err.Error() == \"image not found or you do not have access\" {\n\t\t\t\tfmt.Fprintf(os.Stderr, manifestErr.Error()+\"\\n\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn manifestErr\n\t\t}\n\t\tfmt.Fprintf(os.Stderr, \"Found Manifest for default platform.\\n\")\n\t}\n\n\tdockerConfig, err := p.GetDockerConfig(dockerManifest, image, auth)\n\tif err != nil {\n\t\terr = ReportDockerManifestError(err, app.From)\n\t\treturn err\n\t}\n\n\t// if we cannot find our arch we go the old direct way of retrieving repo\n\tif repoDigest == \"\" && app.RemoteImage.DockerPlatform != \"\" {\n\t\treturn errors.New(\"no docker image found for platform \" + app.RemoteImage.DockerPlatform)\n\t} else if repoDigest == \"\" {\n\t\trepoDigest, err = p.GetDockerImageRepoDigest(image, auth)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tli := strings.LastIndex(app.From, \":\")\n\tvar imageName string\n\tif li < 0 {\n\t\timageName = app.From\n\t} else {\n\t\tsplits := []string{app.From[:li], app.From[li+1:]}\n\t\timageName = splits[0]\n\t}\n\n\t//Extract image name from repo digest. eg: Extract \"busybox\" from \"busybox@sha256:afe605d272837ce1732f390966166c2afff5391208ddd57de10942748694049d\"\n\tif strings.Contains(imageName, \"@sha256\") {\n\t\tsplits := strings.Split(imageName, \"@\")\n\t\timageName = splits[0]\n\t}\n\n\tif !strings.Contains(repoDigest, \"@\") {\n\t\trepoDigest = imageName + \"@\" + repoDigest\n\t}\n\n\tapp.Username = auth.Username\n\tapp.Password = auth.Password\n\n\tapp.RemoteImage.Exists = true\n\tapp.RemoteImage.DockerDigest = repoDigest\n\tapp.RemoteImage.DockerConfig = dockerConfig\n\tapp.RemoteImage.DockerManifest = dockerManifest\n\tapp.RemoteImage.DockerRegistry = dockerRegistry\n\tapp.RemoteImage.DockerPlatform = dockerPlatform\n\tapp.RemoteImage.ImagePath = image.Path\n\n\treturn nil\n}", "func ttrPullImage(ctx context.Context, client apiclient.APIClient, image string) error {\n\trc, err := client.ImagePull(ctx, ttrImageName(image), types.ImagePullOptions{RegistryAuth: \"{}\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rc != nil {\n\t\tbody, err := readAllAndClose(rc)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !strings.Contains(body, \"Status: Downloaded newer image\") {\n\t\t\treturn errors.New(\"image pull not successful\")\n\t\t}\n\t}\n\treturn nil\n}", "func (s stack) InspectImage(ctx context.Context, id string) (_ *abstract.Image, ferr fail.Error) {\n\tif valid.IsNil(s) {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\treturn nil, fail.NotImplementedError(\"implement me\")\n}", "func (f *finder) findImage(img string, asc string, discover bool) (*types.Hash, error) {\n\t// check if it is a valid hash, if so let it pass through\n\th, err := types.NewHash(img)\n\tif err == nil {\n\t\tfullKey, err := f.s.ResolveKey(img)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not resolve key: %v\", err)\n\t\t}\n\t\th, err = types.NewHash(fullKey)\n\t\tif err != nil {\n\t\t\t// should never happen\n\t\t\tpanic(err)\n\t\t}\n\t\treturn h, nil\n\t}\n\n\t// try fetching the image, potentially remotely\n\tft := &fetcher{\n\t\timageActionData: f.imageActionData,\n\t\tlocal: f.local,\n\t\twithDeps: f.withDeps,\n\t}\n\tkey, err := ft.fetchImage(img, asc, discover)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th, err = types.NewHash(key)\n\tif err != nil {\n\t\t// should never happen\n\t\tpanic(err)\n\t}\n\n\treturn h, nil\n}", "func (o *VulnerabilityResponse) GetImageDigestOk() (*string, bool) {\n\tif o == nil || o.ImageDigest == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ImageDigest, true\n}", "func (f *Fetch) mustFetch(url string) []byte {\n\tb, err := f.fetch(url, true)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error downloading %s: %s\\n\", f.repos[len(f.repos)-1]+url, err)\n\t}\n\treturn b\n}", "func (a *AgentServer) PullImage(req PullImageRequest, image *string) error {\n\n\tlogger := plog.WithFields(logrus.Fields{\n\t\t\"image\": req.Image,\n\t\t\"registry\": req.Registry})\n\n\t// set up the connections\n\tdocker, err := docker.NewDockerClient()\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Could not connect to docker client\")\n\t\treturn err\n\t}\n\tconn, err := zzk.GetLocalConnection(\"/\")\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Could not acquire coordinator connection\")\n\t\treturn err\n\t}\n\n\t// pull the image from the registry\n\treg := registry.NewRegistryListener(docker, req.Registry, \"\")\n\treg.SetConnection(conn)\n\ttimer := time.NewTimer(req.Timeout)\n\tdefer timer.Stop()\n\tif err := reg.PullImage(timer.C, req.Image); err != nil {\n\t\tlogger.WithError(err).Error(\"Could not pull image from registry\")\n\t\treturn err\n\t}\n\n\t// get the tag of the image pulled\n\t*image, err = reg.ImagePath(req.Image)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Could not get image id for image from registry\")\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *SharemeService) ImageURL(c *gae.Context, key string) string {\n\tobj := s.Get(c, key)\n\tif bb, ok := obj.(*BlobBinary); ok && strings.HasPrefix(bb.MimeType(), \"image\") {\n\t\tif url, _, err := imgurl.UrlifyR(bb, bb.MimeType(), 0, 0); err == nil {\n\t\t\treturn url\n\t\t} else {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn \"\"\n}", "func (sub *Sub) fetchMissingObjects(srpcClient *srpc.Client, image *image.Image,\n\tfreeSpace *uint64, pushComputedFiles bool) (\n\tbool, subStatus) {\n\tif image == nil {\n\t\treturn false, statusImageNotReady\n\t}\n\tlogger := sub.herd.logger\n\tsubObj := lib.Sub{\n\t\tHostname: sub.mdb.Hostname,\n\t\tClient: srpcClient,\n\t\tFileSystem: sub.fileSystem,\n\t\tComputedInodes: sub.computedInodes,\n\t\tObjectCache: sub.objectCache,\n\t\tObjectGetter: sub.herd.objectServer}\n\tobjectsToFetch, objectsToPush := lib.BuildMissingLists(subObj, image,\n\t\tpushComputedFiles, false, logger)\n\tif objectsToPush == nil {\n\t\treturn false, statusMissingComputedFile\n\t}\n\tvar returnAvailable bool = true\n\tvar returnStatus subStatus = statusSynced\n\tif len(objectsToFetch) > 0 {\n\t\tif !sub.checkForEnoughSpace(freeSpace, objectsToFetch) {\n\t\t\treturn false, statusNotEnoughFreeSpace\n\t\t}\n\t\tlogger.Printf(\"Calling %s:Subd.Fetch() for: %d objects\\n\",\n\t\t\tsub, len(objectsToFetch))\n\t\terr := client.Fetch(srpcClient, sub.herd.imageManager.String(),\n\t\t\tobjectcache.ObjectMapToCache(objectsToFetch))\n\t\tif err != nil {\n\t\t\tsrpcClient.Close()\n\t\t\tlogger.Printf(\"Error calling %s:Subd.Fetch(): %s\\n\", sub, err)\n\t\t\tif err == srpc.ErrorAccessToMethodDenied {\n\t\t\t\treturn false, statusFetchDenied\n\t\t\t}\n\t\t\treturn false, statusFailedToFetch\n\t\t}\n\t\treturnAvailable = false\n\t\treturnStatus = statusFetching\n\t}\n\tif len(objectsToPush) > 0 {\n\t\tsub.herd.cpuSharer.GrabSemaphore(sub.herd.pushSemaphore)\n\t\tdefer func() { <-sub.herd.pushSemaphore }()\n\t\tsub.status = statusPushing\n\t\terr := lib.PushObjects(subObj, objectsToPush, logger)\n\t\tif err != nil {\n\t\t\tif err == srpc.ErrorAccessToMethodDenied {\n\t\t\t\treturn false, statusPushDenied\n\t\t\t}\n\t\t\tif err == lib.ErrorFailedToGetObject {\n\t\t\t\treturn false, statusFailedToGetObject\n\t\t\t}\n\t\t\treturn false, statusFailedToPush\n\t\t}\n\t\tif returnAvailable {\n\t\t\t// Update local copy of objectcache, since there will not be\n\t\t\t// another Poll() before the update computation.\n\t\t\tfor hashVal := range objectsToPush {\n\t\t\t\tsub.objectCache = append(sub.objectCache, hashVal)\n\t\t\t}\n\t\t}\n\t}\n\treturn returnAvailable, returnStatus\n}", "func downloadImage(data *ImageData) (io.ReadCloser, error) {\n\ttimeout := time.Duration(10 * time.Second)\n\tclient := http.Client{\n\t\tTimeout: timeout,\n\t}\n\tresponse, err := client.Get(data.Url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer client.CloseIdleConnections()\n\treturn response.Body, err\n}", "func (re *registryV1Endpoint) FetchLayers(img ImageRef, dest string) ([]string, error) {\n\temptySet := []string{}\n\tif _, ok := re.tokens[img.Name()]; !ok {\n\t\tif _, err := re.Token(img); err != nil {\n\t\t\treturn emptySet, err\n\t\t}\n\t}\n\tif img.ID() == \"\" {\n\t\tif _, err := re.ImageID(img); err != nil {\n\t\t\treturn emptySet, err\n\t\t}\n\t}\n\tif len(img.Ancestry()) == 0 {\n\t\tif _, err := re.Ancestry(img); err != nil {\n\t\t\treturn emptySet, err\n\t\t}\n\t}\n\n\tendpoint := re.host\n\tif len(re.endpoints) > 0 {\n\t\tendpoint = re.endpoints[0]\n\t}\n\tfor _, id := range img.Ancestry() {\n\t\tlogrus.Debugf(\"Fetching layer %s\", id)\n\t\tif err := os.MkdirAll(path.Join(dest, id), 0755); err != nil {\n\t\t\treturn emptySet, err\n\t\t}\n\t\t// get the json file first\n\t\terr := func() error {\n\t\t\turl := fmt.Sprintf(\"https://%s/v1/images/%s/json\", endpoint, id)\n\t\t\treq, err := http.NewRequest(\"GET\", url, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"Token %s\", re.tokens[img.Name()]))\n\n\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"Get(%q) returned %q\", url, resp.Status)\n\t\t\t}\n\n\t\t\t//logrus.Debugf(\"%#v\", resp)\n\t\t\tfh, err := os.Create(path.Join(dest, id, \"json\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer fh.Close()\n\t\t\tif _, err := io.Copy(fh, resp.Body); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}()\n\t\tif err != nil {\n\t\t\treturn emptySet, err\n\t\t}\n\n\t\t// get the layer file next\n\t\terr = func() error {\n\t\t\turl := fmt.Sprintf(\"https://%s/v1/images/%s/layer\", endpoint, id)\n\t\t\treq, err := http.NewRequest(\"GET\", url, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlogrus.Debugf(\"%q\", fmt.Sprintf(\"Token %s\", re.tokens[img.Name()]))\n\t\t\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"Token %s\", re.tokens[img.Name()]))\n\n\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"Get(%q) returned %q\", url, resp.Status)\n\t\t\t}\n\n\t\t\tlogrus.Debugf(\"[FetchLayers] ended up at %q\", resp.Request.URL.String())\n\t\t\tlogrus.Debugf(\"[FetchLayers] response %#v\", resp)\n\t\t\tfh, err := os.Create(path.Join(dest, id, \"layer.tar\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer fh.Close()\n\t\t\tif _, err := io.Copy(fh, resp.Body); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}()\n\t\tif err != nil {\n\t\t\treturn emptySet, err\n\t\t}\n\t}\n\n\treturn img.Ancestry(), nil\n}", "func (f MockFetch) Fetch(targetServer Server) ServerStatus {\n\tif targetServer.ID == 196 {\n\t\treturn ServerStatus{targetServer.ID, false, \"404\", targetServer.URL, time.Now()}\n\t}\n\treturn ServerStatus{targetServer.ID, true, \"\", targetServer.URL, time.Now()}\n}", "func (d *Daemon) ImageDownload(op *operation, server string, protocol string, certificate string, secret string, alias string, forContainer bool, autoUpdate bool) (string, error) {\n\tvar err error\n\tvar ss *shared.SimpleStreams\n\tvar ctxMap log.Ctx\n\n\tif protocol == \"\" {\n\t\tprotocol = \"lxd\"\n\t}\n\n\tfp := alias\n\n\t// Expand aliases\n\tif protocol == \"simplestreams\" {\n\t\timageStreamCacheLock.Lock()\n\t\tentry, _ := imageStreamCache[server]\n\t\tif entry == nil || entry.expiry.Before(time.Now()) {\n\t\t\trefresh := func() (*imageStreamCacheEntry, error) {\n\t\t\t\t// Setup simplestreams client\n\t\t\t\tss, err = shared.SimpleStreamsClient(server, d.proxy)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\t// Get all aliases\n\t\t\t\taliases, err := ss.ListAliases()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\t// Get all fingerprints\n\t\t\t\timages, err := ss.ListImages()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tfingerprints := []string{}\n\t\t\t\tfor _, image := range images {\n\t\t\t\t\tfingerprints = append(fingerprints, image.Fingerprint)\n\t\t\t\t}\n\n\t\t\t\t// Generate cache entry\n\t\t\t\tentry = &imageStreamCacheEntry{ss: ss, Aliases: aliases, Fingerprints: fingerprints, expiry: time.Now().Add(time.Hour)}\n\t\t\t\timageStreamCache[server] = entry\n\t\t\t\timageSaveStreamCache()\n\n\t\t\t\treturn entry, nil\n\t\t\t}\n\n\t\t\tnewEntry, err := refresh()\n\t\t\tif err == nil {\n\t\t\t\t// Cache refreshed\n\t\t\t\tentry = newEntry\n\t\t\t} else if entry != nil {\n\t\t\t\t// Failed to fetch entry but existing cache\n\t\t\t\tshared.LogWarn(\"Unable to refresh cache, using stale entry\", log.Ctx{\"server\": server})\n\t\t\t\tentry.expiry = time.Now().Add(time.Hour)\n\t\t\t} else {\n\t\t\t\t// Failed to fetch entry and nothing in cache\n\t\t\t\timageStreamCacheLock.Unlock()\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t} else {\n\t\t\tshared.LogDebug(\"Using SimpleStreams cache entry\", log.Ctx{\"server\": server, \"expiry\": entry.expiry})\n\t\t\tss = entry.ss\n\t\t}\n\t\timageStreamCacheLock.Unlock()\n\n\t\t// Expand aliases\n\t\tfor _, alias := range entry.Aliases {\n\t\t\tif alias.Name != fp {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfp = alias.Target\n\t\t\tbreak\n\t\t}\n\n\t\t// Expand fingerprint\n\t\tfor _, fingerprint := range entry.Fingerprints {\n\t\t\tif !strings.HasPrefix(fingerprint, fp) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif fp == alias {\n\t\t\t\talias = fingerprint\n\t\t\t}\n\t\t\tfp = fingerprint\n\t\t\tbreak\n\t\t}\n\t} else if protocol == \"lxd\" {\n\t\ttarget, err := remoteGetImageFingerprint(d, server, certificate, fp)\n\t\tif err == nil && target != \"\" {\n\t\t\tfp = target\n\t\t}\n\t}\n\n\tif _, _, err := dbImageGet(d.db, fp, false, false); err == nil {\n\t\tshared.LogDebug(\"Image already exists in the db\", log.Ctx{\"image\": fp})\n\t\t// already have it\n\t\treturn fp, nil\n\t}\n\n\t// Now check if we already downloading the image\n\td.imagesDownloadingLock.RLock()\n\tif waitChannel, ok := d.imagesDownloading[fp]; ok {\n\t\t// We already download the image\n\t\td.imagesDownloadingLock.RUnlock()\n\n\t\tshared.LogDebug(\n\t\t\t\"Already downloading the image, waiting for it to succeed\",\n\t\t\tlog.Ctx{\"image\": fp})\n\n\t\t// Wait until the download finishes (channel closes)\n\t\tif _, ok := <-waitChannel; ok {\n\t\t\tshared.LogWarnf(\"Value transmitted over image lock semaphore?\")\n\t\t}\n\n\t\tif _, _, err := dbImageGet(d.db, fp, false, true); err != nil {\n\t\t\tshared.LogError(\n\t\t\t\t\"Previous download didn't succeed\",\n\t\t\t\tlog.Ctx{\"image\": fp})\n\n\t\t\treturn \"\", fmt.Errorf(\"Previous download didn't succeed\")\n\t\t}\n\n\t\tshared.LogDebug(\n\t\t\t\"Previous download succeeded\",\n\t\t\tlog.Ctx{\"image\": fp})\n\n\t\treturn fp, nil\n\t}\n\n\td.imagesDownloadingLock.RUnlock()\n\n\tif op == nil {\n\t\tctxMap = log.Ctx{\"alias\": alias, \"server\": server}\n\t} else {\n\t\tctxMap = log.Ctx{\"trigger\": op.url, \"image\": fp, \"operation\": op.id, \"alias\": alias, \"server\": server}\n\t}\n\n\tshared.LogInfo(\"Downloading image\", ctxMap)\n\n\t// Add the download to the queue\n\td.imagesDownloadingLock.Lock()\n\td.imagesDownloading[fp] = make(chan bool)\n\td.imagesDownloadingLock.Unlock()\n\n\t// Unlock once this func ends.\n\tdefer func() {\n\t\td.imagesDownloadingLock.Lock()\n\t\tif waitChannel, ok := d.imagesDownloading[fp]; ok {\n\t\t\tclose(waitChannel)\n\t\t\tdelete(d.imagesDownloading, fp)\n\t\t}\n\t\td.imagesDownloadingLock.Unlock()\n\t}()\n\n\texporturl := server\n\n\tvar info shared.ImageInfo\n\tinfo.Fingerprint = fp\n\n\tdestDir := shared.VarPath(\"images\")\n\tdestName := filepath.Join(destDir, fp)\n\tif shared.PathExists(destName) {\n\t\td.Storage.ImageDelete(fp)\n\t}\n\n\tprogress := func(progressInt int64, speedInt int64) {\n\t\tif op == nil {\n\t\t\treturn\n\t\t}\n\n\t\tmeta := op.metadata\n\t\tif meta == nil {\n\t\t\tmeta = make(map[string]interface{})\n\t\t}\n\n\t\tprogress := fmt.Sprintf(\"%d%% (%s/s)\", progressInt, shared.GetByteSizeString(speedInt))\n\n\t\tif meta[\"download_progress\"] != progress {\n\t\t\tmeta[\"download_progress\"] = progress\n\t\t\top.UpdateMetadata(meta)\n\t\t}\n\t}\n\n\tif protocol == \"lxd\" {\n\t\t/* grab the metadata from /1.0/images/%s */\n\t\tvar url string\n\t\tif secret != \"\" {\n\t\t\turl = fmt.Sprintf(\n\t\t\t\t\"%s/%s/images/%s?secret=%s\",\n\t\t\t\tserver, shared.APIVersion, fp, secret)\n\t\t} else {\n\t\t\turl = fmt.Sprintf(\"%s/%s/images/%s\", server, shared.APIVersion, fp)\n\t\t}\n\n\t\tresp, err := d.httpGetSync(url, certificate)\n\t\tif err != nil {\n\t\t\tshared.LogError(\n\t\t\t\t\"Failed to download image metadata\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif err := json.Unmarshal(resp.Metadata, &info); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t/* now grab the actual file from /1.0/images/%s/export */\n\t\tif secret != \"\" {\n\t\t\texporturl = fmt.Sprintf(\n\t\t\t\t\"%s/%s/images/%s/export?secret=%s\",\n\t\t\t\tserver, shared.APIVersion, fp, secret)\n\n\t\t} else {\n\t\t\texporturl = fmt.Sprintf(\n\t\t\t\t\"%s/%s/images/%s/export\",\n\t\t\t\tserver, shared.APIVersion, fp)\n\t\t}\n\t} else if protocol == \"simplestreams\" {\n\t\terr := ss.Download(fp, \"meta\", destName, nil)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\terr = ss.Download(fp, \"root\", destName+\".rootfs\", progress)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tinfo, err := ss.GetImageInfo(fp)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tinfo.Public = false\n\t\tinfo.AutoUpdate = autoUpdate\n\n\t\t_, err = imageBuildFromInfo(d, *info)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif alias != fp {\n\t\t\tid, _, err := dbImageGet(d.db, fp, false, true)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\terr = dbImageSourceInsert(d.db, id, server, protocol, \"\", alias)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\n\t\tshared.LogInfo(\"Image downloaded\", ctxMap)\n\n\t\tif forContainer {\n\t\t\treturn fp, dbImageLastAccessInit(d.db, fp)\n\t\t}\n\n\t\treturn fp, nil\n\t}\n\n\traw, err := d.httpGetFile(exporturl, certificate)\n\tif err != nil {\n\t\tshared.LogError(\n\t\t\t\"Failed to download image\",\n\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\t\treturn \"\", err\n\t}\n\tinfo.Size = raw.ContentLength\n\n\tctype, ctypeParams, err := mime.ParseMediaType(raw.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tctype = \"application/octet-stream\"\n\t}\n\n\tbody := &shared.ProgressReader{\n\t\tReadCloser: raw.Body,\n\t\tTracker: &shared.ProgressTracker{\n\t\t\tLength: raw.ContentLength,\n\t\t\tHandler: progress,\n\t\t},\n\t}\n\n\tif ctype == \"multipart/form-data\" {\n\t\t// Parse the POST data\n\t\tmr := multipart.NewReader(body, ctypeParams[\"boundary\"])\n\n\t\t// Get the metadata tarball\n\t\tpart, err := mr.NextPart()\n\t\tif err != nil {\n\t\t\tshared.LogError(\n\t\t\t\t\"Invalid multipart image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif part.FormName() != \"metadata\" {\n\t\t\tshared.LogError(\n\t\t\t\t\"Invalid multipart image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\t\treturn \"\", fmt.Errorf(\"Invalid multipart image\")\n\t\t}\n\n\t\tdestName = filepath.Join(destDir, info.Fingerprint)\n\t\tf, err := os.Create(destName)\n\t\tif err != nil {\n\t\t\tshared.LogError(\n\t\t\t\t\"Failed to save image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t_, err = io.Copy(f, part)\n\t\tf.Close()\n\n\t\tif err != nil {\n\t\t\tshared.LogError(\n\t\t\t\t\"Failed to save image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t// Get the rootfs tarball\n\t\tpart, err = mr.NextPart()\n\t\tif err != nil {\n\t\t\tshared.LogError(\n\t\t\t\t\"Invalid multipart image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif part.FormName() != \"rootfs\" {\n\t\t\tshared.LogError(\n\t\t\t\t\"Invalid multipart image\",\n\t\t\t\tlog.Ctx{\"image\": fp})\n\t\t\treturn \"\", fmt.Errorf(\"Invalid multipart image\")\n\t\t}\n\n\t\tdestName = filepath.Join(destDir, info.Fingerprint+\".rootfs\")\n\t\tf, err = os.Create(destName)\n\t\tif err != nil {\n\t\t\tshared.LogError(\n\t\t\t\t\"Failed to save image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t_, err = io.Copy(f, part)\n\t\tf.Close()\n\n\t\tif err != nil {\n\t\t\tshared.LogError(\n\t\t\t\t\"Failed to save image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\t\t\treturn \"\", err\n\t\t}\n\t} else {\n\t\tdestName = filepath.Join(destDir, info.Fingerprint)\n\n\t\tf, err := os.Create(destName)\n\t\tif err != nil {\n\t\t\tshared.LogError(\n\t\t\t\t\"Failed to save image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t_, err = io.Copy(f, body)\n\t\tf.Close()\n\n\t\tif err != nil {\n\t\t\tshared.LogError(\n\t\t\t\t\"Failed to save image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tif protocol == \"direct\" {\n\t\timageMeta, err := getImageMetadata(destName)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tinfo.Architecture = imageMeta.Architecture\n\t\tinfo.CreationDate = time.Unix(imageMeta.CreationDate, 0)\n\t\tinfo.ExpiryDate = time.Unix(imageMeta.ExpiryDate, 0)\n\t\tinfo.Properties = imageMeta.Properties\n\t}\n\n\t// By default, make all downloaded images private\n\tinfo.Public = false\n\n\tif alias != fp && secret == \"\" {\n\t\tinfo.AutoUpdate = autoUpdate\n\t}\n\n\t_, err = imageBuildFromInfo(d, info)\n\tif err != nil {\n\t\tshared.LogError(\n\t\t\t\"Failed to create image\",\n\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\treturn \"\", err\n\t}\n\n\tif alias != fp {\n\t\tid, _, err := dbImageGet(d.db, fp, false, true)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\terr = dbImageSourceInsert(d.db, id, server, protocol, \"\", alias)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tshared.LogInfo(\"Image downloaded\", ctxMap)\n\n\tif forContainer {\n\t\treturn fp, dbImageLastAccessInit(d.db, fp)\n\t}\n\n\treturn fp, nil\n}", "func (c MockDockerClient) ImageInspect(ctx context.Context, imageName string) (dockertypes.ImageInspect, error) {\n\tif c.ImageInspectFn != nil {\n\t\tfmt.Println(\"[MockDockerClient] In \", utils.CurrentFunctionName())\n\t\tfmt.Println(\"[MockDockerClient] - ctx: \", ctx)\n\t\tfmt.Println(\"[MockDockerClient] - imageName: \", imageName)\n\t\treturn c.ImageInspectFn(ctx, imageName)\n\t}\n\tpanic(fmt.Sprintf(\"No function defined for: %s\", utils.CurrentFunctionName()))\n}", "func isImagePushingToECRInProgress(s string) bool {\n\treturn strings.Contains(s, \"denied: Your authorization token has expired. Reauthenticate and try again.\") ||\n\t\tstrings.Contains(s, \"no basic auth credentials\")\n}", "func ensureNodeImage(image string) {\n\tfmt.Printf(\"Ensuring node image (%s) 🖼\\n\", image)\n\n\t// attempt to explicitly pull the image if it doesn't exist locally\n\t// we don't care if this errors, we'll still try to run which also pulls\n\t_, _ = host.PullImage(image, 4)\n}", "func TestNonRootFetchRmGCImage(t *testing.T) {\n\tctx := testutils.NewRktRunCtx()\n\tdefer ctx.Cleanup()\n\n\tuid, _ := ctx.GetUidGidRktBinOwnerNotRoot()\n\n\trktGid, err := common.LookupGid(common.RktGroup)\n\tif err != nil {\n\t\tt.Skipf(\"Skipping the test because there's no %q group\", common.RktGroup)\n\t}\n\n\tif err := ctx.SetupDataDir(); err != nil {\n\t\tt.Fatalf(\"failed to setup data dir: %v\", err)\n\t}\n\n\trootImg := patchTestACI(\"rkt-inspect-root-rm.aci\", \"--exec=/inspect --print-msg=foobar\")\n\tdefer os.Remove(rootImg)\n\trootImgHash, err := importImageAndFetchHash(t, ctx, \"\", rootImg)\n\tif err != nil {\n\t\tt.Fatalf(\"%v\", err)\n\t}\n\n\t// Launch/gc a pod so we can test non-root image gc.\n\trunCmd := fmt.Sprintf(\"%s --insecure-options=image run --mds-register=false %s\", ctx.Cmd(), rootImg)\n\trunRktAndCheckOutput(t, runCmd, \"foobar\", false)\n\n\tctx.RunGC()\n\n\t// Should not be able to do image gc.\n\t// We can't touch the treestores even as members of the rkt group.\n\timgGCCmd := fmt.Sprintf(\"%s image gc\", ctx.Cmd())\n\tt.Logf(\"Running %s\", imgGCCmd)\n\trunRktAsUidGidAndCheckOutput(t, imgGCCmd, \"permission denied\", false, true, uid, rktGid)\n\n\t// Should be able to remove the image fetched by root since we're in the rkt group.\n\timgRmCmd := fmt.Sprintf(\"%s image rm %s\", ctx.Cmd(), rootImgHash)\n\tt.Logf(\"Running %s\", imgRmCmd)\n\trunRktAsUidGidAndCheckOutput(t, imgRmCmd, \"successfully removed\", false, false, uid, rktGid)\n\n\t// Should be able to remove the image fetched by ourselves.\n\tnonrootImg := patchTestACI(\"rkt-inspect-non-root-rm.aci\", \"--exec=/inspect\")\n\tdefer os.Remove(nonrootImg)\n\tnonrootImgHash, err := importImageAndFetchHashAsUidGid(t, ctx, nonrootImg, \"\", uid, rktGid)\n\tif err != nil {\n\t\tt.Fatalf(\"%v\", err)\n\t}\n\n\timgRmCmd = fmt.Sprintf(\"%s image rm %s\", ctx.Cmd(), nonrootImgHash)\n\tt.Logf(\"Running %s\", imgRmCmd)\n\trunRktAsUidGidAndCheckOutput(t, imgRmCmd, \"successfully removed\", false, false, uid, rktGid)\n}", "func checkImage(image liferay.Image) {\n\texists := docker.CheckDockerImageExists(image.GetFullyQualifiedName())\n\n\tif exists == false {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"image\": image.GetFullyQualifiedName(),\n\t\t}).Warn(\"Image has NOT been pulled from Docker Hub\")\n\t\treturn\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"image\": image.GetFullyQualifiedName(),\n\t}).Info(\"Image has been pulled from Docker Hub\")\n}", "func (e *Environment) ensureImageExists(image string) error {\n\te.Events().Publish(environment.DockerImagePullStarted, \"\")\n\tdefer e.Events().Publish(environment.DockerImagePullCompleted, \"\")\n\n\t// Images prefixed with a ~ are local images that we do not need to try and pull.\n\tif strings.HasPrefix(image, \"~\") {\n\t\treturn nil\n\t}\n\n\t// Give it up to 15 minutes to pull the image. I think this should cover 99.8% of cases where an\n\t// image pull might fail. I can't imagine it will ever take more than 15 minutes to fully pull\n\t// an image. Let me know when I am inevitably wrong here...\n\tctx, cancel := context.WithTimeout(context.Background(), time.Minute*15)\n\tdefer cancel()\n\n\t// Get a registry auth configuration from the config.\n\tvar registryAuth *config.RegistryConfiguration\n\tfor registry, c := range config.Get().Docker.Registries {\n\t\tif !strings.HasPrefix(image, registry) {\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.WithField(\"registry\", registry).Debug(\"using authentication for registry\")\n\t\tregistryAuth = &c\n\t\tbreak\n\t}\n\n\t// Get the ImagePullOptions.\n\timagePullOptions := types.ImagePullOptions{All: false}\n\tif registryAuth != nil {\n\t\tb64, err := registryAuth.Base64()\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"failed to get registry auth credentials\")\n\t\t}\n\n\t\t// b64 is a string so if there is an error it will just be empty, not nil.\n\t\timagePullOptions.RegistryAuth = b64\n\t}\n\n\tout, err := e.client.ImagePull(ctx, image, imagePullOptions)\n\tif err != nil {\n\t\timages, ierr := e.client.ImageList(ctx, types.ImageListOptions{})\n\t\tif ierr != nil {\n\t\t\t// Well damn, something has gone really wrong here, just go ahead and abort there\n\t\t\t// isn't much anything we can do to try and self-recover from this.\n\t\t\treturn errors.Wrap(ierr, \"environment/docker: failed to list images\")\n\t\t}\n\n\t\tfor _, img := range images {\n\t\t\tfor _, t := range img.RepoTags {\n\t\t\t\tif t != image {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"image\": image,\n\t\t\t\t\t\"container_id\": e.Id,\n\t\t\t\t\t\"err\": err.Error(),\n\t\t\t\t}).Warn(\"unable to pull requested image from remote source, however the image exists locally\")\n\n\t\t\t\t// Okay, we found a matching container image, in that case just go ahead and return\n\t\t\t\t// from this function, since there is nothing else we need to do here.\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\treturn errors.Wrapf(err, \"environment/docker: failed to pull \\\"%s\\\" image for server\", image)\n\t}\n\tdefer out.Close()\n\n\tlog.WithField(\"image\", image).Debug(\"pulling docker image... this could take a bit of time\")\n\n\t// I'm not sure what the best approach here is, but this will block execution until the image\n\t// is done being pulled, which is what we need.\n\tscanner := bufio.NewScanner(out)\n\n\tfor scanner.Scan() {\n\t\tb := scanner.Bytes()\n\t\tstatus, _ := jsonparser.GetString(b, \"status\")\n\t\tprogress, _ := jsonparser.GetString(b, \"progress\")\n\n\t\te.Events().Publish(environment.DockerImagePullStatus, status+\" \"+progress)\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithField(\"image\", image).Debug(\"completed docker image pull\")\n\n\treturn nil\n}", "func GetImage(ctx context.Context, sharedDownload map[string]*DownloadState, params *Params) (io.Reader, error) {\n\tlogger := logging.FromContext(ctx)\n\ttimeout := params.Timeout\n\tURL := params.URL\n\tvar imageReader io.Reader\n\n\tif dnState, ok := sharedDownload[URL]; ok {\n\t\tlogger.WithField(\"url\", URL).Trace(\"is fetching by another client\")\n\t\terrCh := make(chan error, 1)\n\t\tdnState.Subs = append(dnState.Subs, errCh)\n\t\tif err := <-errCh; err != nil {\n\t\t\tlogger.WithError(err).WithField(\"url\", URL).Trace(\"fetch failed\")\n\t\t\tdelete(sharedDownload, URL)\n\t\t\treturn nil, err\n\t\t}\n\t\timageReader = bytes.NewReader(dnState.Data)\n\t\tlogger.WithField(\"url\", URL).Trace(\"fetched shared\")\n\t} else {\n\t\tsubscribers := make([]chan error, 0, 1)\n\t\tdownloadState := &DownloadState{\n\t\t\tData: nil,\n\t\t\tSubs: subscribers,\n\t\t}\n\t\tsharedDownload[URL] = downloadState\n\t\tdefer func(sd map[string]*DownloadState, url string) {\n\t\t\tdelete(sd, url)\n\t\t}(sharedDownload, URL)\n\t\thttpClient := httpclient.NewHTTPClient(timeout)\n\t\tresponse, err := httpClient.Get(ctx, URL)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).WithField(\"url\", URL).Error(\"fetch image failed\")\n\t\t\tfor _, subs := range downloadState.Subs {\n\t\t\t\tsubs <- err\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdownloadState.Data = response.RawBody\n\t\tfor _, subs := range downloadState.Subs {\n\t\t\tsubs <- nil\n\t\t}\n\t\timageReader = bytes.NewReader(response.RawBody)\n\t}\n\n\treturn imageReader, nil\n}", "func (d *Daemon) ImageDownload(op *operation, server string, certificate string, secret string, fp string, forContainer bool, directDownload bool) error {\n\tif _, _, err := dbImageGet(d.db, fp, false, false); err == nil {\n\t\tshared.Log.Debug(\"Image already exists in the db\", log.Ctx{\"image\": fp})\n\t\t// already have it\n\t\treturn nil\n\t}\n\n\tshared.Log.Info(\n\t\t\"Image not in the db, downloading it\",\n\t\tlog.Ctx{\"image\": fp, \"server\": server})\n\n\t// Now check if we already downloading the image\n\td.imagesDownloadingLock.RLock()\n\tif waitChannel, ok := d.imagesDownloading[fp]; ok {\n\t\t// We already download the image\n\t\td.imagesDownloadingLock.RUnlock()\n\n\t\tshared.Log.Info(\n\t\t\t\"Already downloading the image, waiting for it to succeed\",\n\t\t\tlog.Ctx{\"image\": fp})\n\n\t\t// Wait until the download finishes (channel closes)\n\t\tif _, ok := <-waitChannel; ok {\n\t\t\tshared.Log.Warn(\"Value transmitted over image lock semaphore?\")\n\t\t}\n\n\t\tif _, _, err := dbImageGet(d.db, fp, false, true); err != nil {\n\t\t\tshared.Log.Error(\n\t\t\t\t\"Previous download didn't succeed\",\n\t\t\t\tlog.Ctx{\"image\": fp})\n\n\t\t\treturn fmt.Errorf(\"Previous download didn't succeed\")\n\t\t}\n\n\t\tshared.Log.Info(\n\t\t\t\"Previous download succeeded\",\n\t\t\tlog.Ctx{\"image\": fp})\n\n\t\treturn nil\n\t}\n\n\td.imagesDownloadingLock.RUnlock()\n\n\tshared.Log.Info(\n\t\t\"Downloading the image\",\n\t\tlog.Ctx{\"image\": fp})\n\n\t// Add the download to the queue\n\td.imagesDownloadingLock.Lock()\n\td.imagesDownloading[fp] = make(chan bool)\n\td.imagesDownloadingLock.Unlock()\n\n\t// Unlock once this func ends.\n\tdefer func() {\n\t\td.imagesDownloadingLock.Lock()\n\t\tif waitChannel, ok := d.imagesDownloading[fp]; ok {\n\t\t\tclose(waitChannel)\n\t\t\tdelete(d.imagesDownloading, fp)\n\t\t}\n\t\td.imagesDownloadingLock.Unlock()\n\t}()\n\n\texporturl := server\n\n\tvar info shared.ImageInfo\n\tinfo.Fingerprint = fp\n\n\tif !directDownload {\n\t\t/* grab the metadata from /1.0/images/%s */\n\t\tvar url string\n\t\tif secret != \"\" {\n\t\t\turl = fmt.Sprintf(\n\t\t\t\t\"%s/%s/images/%s?secret=%s\",\n\t\t\t\tserver, shared.APIVersion, fp, secret)\n\t\t} else {\n\t\t\turl = fmt.Sprintf(\"%s/%s/images/%s\", server, shared.APIVersion, fp)\n\t\t}\n\n\t\tresp, err := d.httpGetSync(url, certificate)\n\t\tif err != nil {\n\t\t\tshared.Log.Error(\n\t\t\t\t\"Failed to download image metadata\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\t\treturn err\n\t\t}\n\n\t\tif err := json.Unmarshal(resp.Metadata, &info); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t/* now grab the actual file from /1.0/images/%s/export */\n\t\tif secret != \"\" {\n\t\t\texporturl = fmt.Sprintf(\n\t\t\t\t\"%s/%s/images/%s/export?secret=%s\",\n\t\t\t\tserver, shared.APIVersion, fp, secret)\n\n\t\t} else {\n\t\t\texporturl = fmt.Sprintf(\n\t\t\t\t\"%s/%s/images/%s/export\",\n\t\t\t\tserver, shared.APIVersion, fp)\n\t\t}\n\t}\n\n\traw, err := d.httpGetFile(exporturl, certificate)\n\tif err != nil {\n\t\tshared.Log.Error(\n\t\t\t\"Failed to download image\",\n\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\t\treturn err\n\t}\n\tinfo.Size = raw.ContentLength\n\n\tdestDir := shared.VarPath(\"images\")\n\tdestName := filepath.Join(destDir, fp)\n\tif shared.PathExists(destName) {\n\t\td.Storage.ImageDelete(fp)\n\t}\n\n\tctype, ctypeParams, err := mime.ParseMediaType(raw.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tctype = \"application/octet-stream\"\n\t}\n\n\tbody := &Progress{Reader: raw.Body, length: raw.ContentLength, op: op}\n\n\tif ctype == \"multipart/form-data\" {\n\t\t// Parse the POST data\n\t\tmr := multipart.NewReader(body, ctypeParams[\"boundary\"])\n\n\t\t// Get the metadata tarball\n\t\tpart, err := mr.NextPart()\n\t\tif err != nil {\n\t\t\tshared.Log.Error(\n\t\t\t\t\"Invalid multipart image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\t\treturn err\n\t\t}\n\n\t\tif part.FormName() != \"metadata\" {\n\t\t\tshared.Log.Error(\n\t\t\t\t\"Invalid multipart image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\t\treturn fmt.Errorf(\"Invalid multipart image\")\n\t\t}\n\n\t\tdestName = filepath.Join(destDir, info.Fingerprint)\n\t\tf, err := os.Create(destName)\n\t\tif err != nil {\n\t\t\tshared.Log.Error(\n\t\t\t\t\"Failed to save image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = io.Copy(f, part)\n\t\tf.Close()\n\n\t\tif err != nil {\n\t\t\tshared.Log.Error(\n\t\t\t\t\"Failed to save image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\t\treturn err\n\t\t}\n\n\t\t// Get the rootfs tarball\n\t\tpart, err = mr.NextPart()\n\t\tif err != nil {\n\t\t\tshared.Log.Error(\n\t\t\t\t\"Invalid multipart image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\t\treturn err\n\t\t}\n\n\t\tif part.FormName() != \"rootfs\" {\n\t\t\tshared.Log.Error(\n\t\t\t\t\"Invalid multipart image\",\n\t\t\t\tlog.Ctx{\"image\": fp})\n\t\t\treturn fmt.Errorf(\"Invalid multipart image\")\n\t\t}\n\n\t\tdestName = filepath.Join(destDir, info.Fingerprint+\".rootfs\")\n\t\tf, err = os.Create(destName)\n\t\tif err != nil {\n\t\t\tshared.Log.Error(\n\t\t\t\t\"Failed to save image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = io.Copy(f, part)\n\t\tf.Close()\n\n\t\tif err != nil {\n\t\t\tshared.Log.Error(\n\t\t\t\t\"Failed to save image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tdestName = filepath.Join(destDir, info.Fingerprint)\n\n\t\tf, err := os.Create(destName)\n\t\tif err != nil {\n\t\t\tshared.Log.Error(\n\t\t\t\t\"Failed to save image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = io.Copy(f, body)\n\t\tf.Close()\n\n\t\tif err != nil {\n\t\t\tshared.Log.Error(\n\t\t\t\t\"Failed to save image\",\n\t\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif directDownload {\n\t\timageMeta, err := getImageMetadata(destName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tinfo.Architecture = imageMeta.Architecture\n\t\tinfo.CreationDate = time.Unix(imageMeta.CreationDate, 0)\n\t\tinfo.ExpiryDate = time.Unix(imageMeta.ExpiryDate, 0)\n\t\tinfo.Properties = imageMeta.Properties\n\t}\n\n\t// By default, make all downloaded images private\n\tinfo.Public = false\n\n\t_, err = imageBuildFromInfo(d, info)\n\tif err != nil {\n\t\tshared.Log.Error(\n\t\t\t\"Failed to create image\",\n\t\t\tlog.Ctx{\"image\": fp, \"err\": err})\n\n\t\treturn err\n\t}\n\n\tshared.Log.Info(\n\t\t\"Download succeeded\",\n\t\tlog.Ctx{\"image\": fp})\n\n\tif forContainer {\n\t\treturn dbImageLastAccessInit(d.db, fp)\n\t}\n\n\treturn nil\n}", "func VerifyIntegrity(dc *dockerclient.Client, notaryServerURL, imageRef string) bool {\n\n\tif notaryServerURL == \"\" {\n\t\tlog.Println(\"Notary URL is not specified in flavor.\")\n\t\treturn false\n\t}\n\n\t// Kubelet passes along image references as sha sums\n\t// we need to convert these back to readable names to proceed further\n\tif strings.HasPrefix(imageRef, imageNameShaPrefix) {\n\t\timage, err := getImageName(dc, imageRef)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error retrieving the image name and tag.\", err)\n\t\t\treturn false\n\t\t}\n\t\timageRef = image\n\t}\n\n\t// Make sense of the image reference\n\tregistryAddr, imageName, tag, err := util.GetRegistryAddr(imageRef)\n\tif err != nil {\n\t\tlog.Println(\"Failed in parsing Registry Address from Image reference.\", err, imageRef)\n\t\treturn false\n\t}\n\n\tfinalImageRef := \"\"\n\n\t// Handling use case where the tag name is specified with the sha256sum,\n\t// with this the tag parsed by GetRegistryAddr is blank,\n\t// we pass it as is in the form: registry:[port]/imagename@sha256:shasum\n\tif strings.Contains(imageRef, imageTagShaSeparator) {\n\t\tfinalImageRef = registryAddr + \"/\" + imageName + imageTagShaSeparator + strings.Split(imageRef, imageTagShaSeparator)[1]\n\t} else {\n\t\tfinalImageRef = registryAddr + \"/\" + imageName + \":\" + tag\n\t}\n\n\ttrustPullCmd := dockerContentTrustServer + notaryServerURL + \";\" + dockerPullCmd + \" \" + finalImageRef\n\tlog.Println(\"Docker trusted pull command: \", trustPullCmd)\n\n\ttrustPullCmdOut, trustPullCmdErr := exec.Command(\"bash\", \"-c\", trustPullCmd).Output()\n\tlog.Println(\"Trusted pull returned: \", string(trustPullCmdOut))\n\n\t// Was there an error? if yes, then assume not trusted and don't allow\n\tif trustPullCmdErr != nil {\n\t\tlog.Println(\"Trust Inspect returned error: \", trustPullCmdErr.Error())\n\t\treturn false\n\t}\n\n\treturn true\n}", "func testFetchURL() {\n\tconst url string = \"http://gopl.io\"\n\tfmt.Printf(\"\\nbytes of url (%s): %d\\n\", url, len(myFetchURL(url)))\n}", "func findImage(n *hetznerNodeGroup, serverType *hcloud.ServerType) (*hcloud.Image, error) {\n\t// Select correct image based on server type architecture\n\timage, _, err := n.manager.client.Image.GetForArchitecture(context.TODO(), n.manager.image, serverType.Architecture)\n\tif err != nil {\n\t\t// Keep looking for label if image was not found by id or name\n\t\tif !strings.HasPrefix(err.Error(), \"image not found\") {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif image != nil {\n\t\treturn image, nil\n\t}\n\n\t// Look for snapshot with label\n\timages, err := n.manager.client.Image.AllWithOpts(context.TODO(), hcloud.ImageListOpts{\n\t\tType: []hcloud.ImageType{hcloud.ImageTypeSnapshot},\n\t\tStatus: []hcloud.ImageStatus{hcloud.ImageStatusAvailable},\n\t\tSort: []string{\"created:desc\"},\n\t\tArchitecture: []hcloud.Architecture{serverType.Architecture},\n\t\tListOpts: hcloud.ListOpts{\n\t\t\tLabelSelector: n.manager.image,\n\t\t},\n\t})\n\n\tif err != nil || len(images) == 0 {\n\t\treturn nil, fmt.Errorf(\"unable to find image %s with architecture %s: %v\", n.manager.image, serverType.Architecture, err)\n\t}\n\n\treturn images[0], nil\n}", "func (c MockDockerClient) ImagePull(ctx context.Context, imageName string) error {\n\tif c.ImagePullFn != nil {\n\t\tfmt.Println(\"[MockDockerClient] In \", utils.CurrentFunctionName())\n\t\tfmt.Println(\"[MockDockerClient] - ctx: \", ctx)\n\t\tfmt.Println(\"[MockDockerClient] - imageName: \", imageName)\n\t\treturn c.ImagePullFn(ctx, imageName)\n\t}\n\tpanic(fmt.Sprintf(\"No function defined for: %s\", utils.CurrentFunctionName()))\n}", "func (o *ContainerSpec) GetImageOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Image, true\n}", "func (s *Client) Image(fileID string, page int) (file []byte, err error) {\n\tif page <= 0 {\n\t\tpage = 1\n\t}\n\tqueryParam := fmt.Sprintf(\"?page=%d\", page)\n\turl := strings.Join([]string{s.config.apiBaseURL, \"/result/image/\", fileID, queryParam}, \"\")\n\n\tlog.Printf(\"get image url %s\", url)\n\treq, err := http.NewRequest(\"GET\", url, strings.NewReader(\"\"))\n\tif err != nil {\n\t\treturn\n\t}\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", strings.Join([]string{\"Bearer \", s.getToken()}, \"\"))\n\n\tres, err := s.httpClient.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\tfile, err = ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func (cli Client) ImagePull(options v1alpha1.OCIPullOptions) (v1alpha1.OCIPullResponse, error) {\n\n\tpullFlags := []command.Flag{\n\t\t// Buildah registry auth in format username[:password]\n\t\t{Name: \"creds\", Value: options.RegistryAuth, Short: false, OmitEmpty: true},\n\t}\n\n\tcmd := command.Builder(\"buildah\").Command(\"pull\").Flags(pullFlags...).Args(options.Ref).Build()\n\tcli.Logger.WithField(\"cmd\", cmd).Debugln(\"executing pull with command\")\n\n\tstdout, stderr, err := execute(&cmd)\n\tif err != nil {\n\t\tcli.Logger.WithError(err).Errorln(\"error building image...\")\n\t\treturn v1alpha1.OCIPullResponse{}, err\n\t}\n\treturn v1alpha1.OCIPullResponse{\n\t\tBody: stdout,\n\t\tExec: &cmd,\n\t\tStderr: stderr,\n\t}, nil\n}", "func (mc *MockContainer) ImagePull() error {\n\treturn mc.MockImagePull()\n}", "func failedRemote(err error) bool {\n\tswitch t := err.(type) {\n\tcase *net.OpError:\n\t\tif strings.HasPrefix(t.Net, \"tcp\") {\n\t\t\tswitch t.Op {\n\t\t\tcase \"dial\", \"read\", \"write\":\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else if strings.HasPrefix(t.Net, \"udp\") {\n\t\t\tswitch t.Op {\n\t\t\tcase \"write\":\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func notifyNewImage(basePath, imageUrl string) error {\n\thook := os.Getenv(\"WEB_HOOK\")\n\tif hook == \"\" {\n\t\treturn nil\n\t}\n\n\ttype photo struct {\n\t\tUrl string `json:\"remote_url\"`\n\t}\n\n\ttype payload struct {\n\t\tPhoto *photo `json:\"photo\"`\n\t}\n\n\tbody := &payload{&photo{Url: imageUrl}}\n\tb, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfullURL := fmt.Sprintf(\"%s/buckets/%s/photos.json\", hook, basePath)\n\n\tlog.Infof(\"Notifying hook at %s\", fullURL)\n\n\treq, err := http.NewRequest(\"POST\", fullURL, bytes.NewBuffer(b))\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\treturn fmt.Errorf(\"expecting status code in 200 .. 299 got %d\", resp.StatusCode)\n\t}\n\tlog.Infof(\"Notification sent to %s\", hook)\n\treturn nil\n}", "func (r *RepoRef) IsRemote() bool {\n\treturn r.URL != \"\"\n}", "func FetchFoxPic() string {\n\tvar foxPic string\n\tresp, err := http.Get(\"https://randomfox.ca/floof/\")\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to get fox picture.\")\n\t}\n\n\tdefer resp.Body.Close()\n\tvar result map[string]interface{}\n\n\tjson.NewDecoder(resp.Body).Decode(&result)\n\tif foxPic, ok := result[\"image\"].(string); ok {\n\t\treturn foxPic\n\t}\n\treturn foxPic\n}", "func testDescribeImages(input *ec2.DescribeImagesInput) (*ec2.DescribeImagesOutput, error) {\n\tif *input.ImageIds[0] == \"bad\" {\n\t\treturn nil, fmt.Errorf(\"error\")\n\t}\n\treturn testDescribeImagesOutput(), nil\n}", "func (i *interactor) Fetch(arg ...string) error {\n\tremote, err := i.remote()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not resolve remote for fetching: %w\", err)\n\t}\n\targ = append([]string{\"fetch\", remote}, arg...)\n\ti.logger.Infof(\"Fetching from %s\", remote)\n\tif out, err := i.executor.Run(arg...); err != nil {\n\t\treturn fmt.Errorf(\"error fetching: %w %v\", err, string(out))\n\t}\n\treturn nil\n}", "func (o *VulnUpdateNotificationPayloadAllOf) GetImageDigestOk() (*string, bool) {\n\tif o == nil || o.ImageDigest == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ImageDigest, true\n}", "func (*ImagePullReply) Descriptor() ([]byte, []int) {\n\treturn file_proto_images_alive_proto_rawDescGZIP(), []int{1}\n}", "func (c *TestClient) GetImage(project, name string) (*compute.Image, error) {\n\tif c.GetImageFn != nil {\n\t\treturn c.GetImageFn(project, name)\n\t}\n\treturn c.client.GetImage(project, name)\n}", "func CheckinPeopleImageGET(w http.ResponseWriter, r *http.Request) {\n\tid := r.URL.Query().Get(\"id\")\n image_id := r.URL.Query().Get(\"image_id\")\n\n var image string\n if image_id == \"\" {\n p, err := people.GetPerson(id)\n if err != nil {\n Error(w, err, http.StatusInternalServerError)\n return\n }\n image = p.Image\n } else {\n imgs, err := images.GetImages(id, image_id)\n if err != nil || len(imgs.Images) != 1 {\n Error(w, err, http.StatusInternalServerError)\n return\n }\n image = imgs.Images[0]\n }\n\n by, err := base64.StdEncoding.DecodeString(image)\n if err != nil {\n Error(w, err, http.StatusInternalServerError)\n return\n }\n\tw.Header().Set(\"Content-Type\", \"image/*\")\n\tw.Write(by)\n}", "func fetchRemote(f *File, timeout int, concurrent bool) error {\n\tdefer f.Terminate()\n\n\tif concurrent {\n\t\tconcurrentLock.Lock()\n\t\tdefer concurrentLock.Unlock()\n\t}\n\n\tfilename := path.Base(f.Url)\n\tif f.StoragePath != \"\" {\n\t\tfilename = strings.Join([]string{f.StoragePath, filename}, string(os.PathSeparator))\n\t}\n\toriginal := filename\n\tfilename = fmt.Sprintf(\"%s.download.%d\", filename, time.Now().Nanosecond())\n\tif concurrent {\n\t\tif fetchedSize > 0 { //another process already fetched\n\t\t\tf.Size = fetchedSize\n\t\t\tf.path = original\n\t\t\tf.Status.Type = status.FETCHED\n\t\t\treturn nil\n\t\t}\n\t}\n\terr := validateRemote(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.path = filename\n\n\tout, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer out.Close()\n\n\tf.client = getHttpClient(f, timeout)\n\n\tf.path = filename\n\n\tresp, err := f.client.Get(f.Url)\n\tif err != nil {\n\t\tf.Delete()\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tresp.Header.Set(\"Connection\", \"Keep-Alive\")\n\tresp.Header.Set(\"Accept-Language\", \"en-US\")\n\tresp.Header.Set(\"User-Agent\", \"Mozilla/5.0\")\n\tcontentLength := resp.Header.Get(\"Content-Length\")\n\tif contentLength == \"\" {\n\t\tf.Delete()\n\t\treturn errors.New(\"Can not get content length, is this a binary file?\")\n\t}\n\tsize, err := strconv.Atoi(contentLength)\n\tif err != nil {\n\t\tf.Delete()\n\t\treturn errors.New(\"Can not parse content-length, is this binary? \" + err.Error())\n\t}\n\n\tf.Size = int64(size)\n\tquit := make(chan bool)\n\tdefer close(quit)\n\n\tgo downloadFile(quit, f)\n\n\t_, err = io.Copy(out, resp.Body)\n\tif err != nil {\n\t\tf.Delete()\n\t\treturn err\n\t}\n\n\tf.Status.Type = status.FETCHED\n\tfetchedSize = f.Size\n\thasLock = false\n\n\tmoveLock.Lock()\n\tdefer moveLock.Unlock()\n\terr = os.Rename(filename, original)\n\tif err != nil {\n\t\tf.Delete()\n\t\treturn err\n\t}\n\tf.path = original\n\treturn nil\n}", "func GetImage(ctx context.Context, nameOrID string, options *GetOptions) (*entities.ImageInspectReport, error) {\n\tif options == nil {\n\t\toptions = new(GetOptions)\n\t}\n\tconn, err := bindings.GetClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tparams, err := options.ToParams()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinspectedData := entities.ImageInspectReport{}\n\tresponse, err := conn.DoRequest(ctx, nil, http.MethodGet, \"/images/%s/json\", params, nil, nameOrID)\n\tif err != nil {\n\t\treturn &inspectedData, err\n\t}\n\tdefer response.Body.Close()\n\n\treturn &inspectedData, response.Process(&inspectedData)\n}", "func (imageService Service) ImageStatus(Id string) (Status string, err error) {\n\n\n\turl := strings.TrimSuffix(imageService.URL, \"/\") + \"/images/\" + Id\n\tvar headers http.Header\n\theaders, err = misc.GetHeader(url, imageService.TokenID, imageService.Client)\n\tif err != nil {\n\t\treturn \"\", err\n\t} else {\n\t\tfor header, value := range headers {\n\t\t\t//log.Printf (\"header '%s'='%s'\", header, value[0])\n\t\t\tif strings.ToLower(header) == \"x-image-meta-status\" {\n\t\t\t\tStatus = value[0]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn Status, nil\n}", "func (p Pipeline) PullImages(force bool) error {\n\tif Verbose {\n\t\tpipelineLogger.Printf(\"Pull Images:\")\n\t}\n\tcount, elapsedTime, totalElapsedTime, err := p.runCommand(runConfig{\n\t\tselection: func(step Step) bool {\n\t\t\treturn step.IsPullable()\n\t\t},\n\t\trun: func(runner Runner, step Step) func() error {\n\t\t\treturn func() error {\n\t\t\t\tif err := runner.ImageExistenceChecker(step)(); err != nil || force {\n\t\t\t\t\treturn runner.ImagePuller(step)()\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t},\n\t})\n\tif Verbose {\n\t\tpipelineLogger.Printf(\"Pulled %d images in %s\", count, elapsedTime)\n\t\tpipelineLogger.Printf(\"Total time spent pulling images: %s\", totalElapsedTime)\n\t}\n\treturn err\n}", "func imageGet(L *lua.LState) int {\n\tp := checkImage(L)\n\n\tL.Push(lua.LNumber(*p))\n\n\treturn 1\n}", "func (c *dockerClientMock) DownloadImageContent(imageSource, filePath string) (v1.Image, error) {\n\treturn &fake.FakeImage{}, nil // fmt.Errorf(\"%s\", filePath)\n}", "func (f Fetcher) printImageCount() {\n\tdoc, err := goquery.NewDocumentFromReader(f.resp.Body)\n\tcheck(err)\n\tc := len(doc.Find(\"img\").Nodes)\n\tfmt.Printf(\"Image count: %d\\n\", c)\n}", "func Fetch(FS billy.Filesystem, remote, owner, repo, tag string, private bool) error {\n\tsrcRepo := path.Join(owner, repo)\n\tgco := &gogit.CloneOptions{\n\t\tURL: fmt.Sprintf(\"https://%s/%s\", remote, srcRepo),\n\t\tDepth: 1,\n\t}\n\n\tif tag != \"v0.0.0\" {\n\t\tgco.ReferenceName = plumbing.NewTagReferenceName(tag)\n\t\tgco.SingleBranch = true\n\t}\n\n\tif private {\n\t\tif netrc, err := yagu.NetrcCredentials(remote); err == nil {\n\t\t\tgco.Auth = &http.BasicAuth{\n\t\t\t\tUsername: netrc.Login,\n\t\t\t\tPassword: netrc.Password,\n\t\t\t}\n\t\t} else if ssh, err := yagu.SSHCredentials(remote); err == nil {\n\t\t\tgco.Auth = ssh.Keys\n\t\t\tgco.URL = fmt.Sprintf(\"%s@%s:%s\", ssh.User, remote, srcRepo)\n\t\t} else {\n\t\t\tgco.URL = fmt.Sprintf(\"%s@%s:%s\", \"git\", remote, srcRepo)\n\t\t}\n\t}\n\n\t_, err := gogit.Clone(memory.NewStorage(), FS, gco)\n\n\treturn err\n}", "func diffImage(c *Client, desired, actual *Image, opts ...dcl.ApplyOption) ([]imageDiff, error) {\n\tif desired == nil || actual == nil {\n\t\treturn nil, fmt.Errorf(\"nil resource passed to diff - always a programming error: %#v, %#v\", desired, actual)\n\t}\n\n\tvar diffs []imageDiff\n\tif !dcl.IsZeroValue(desired.Description) && !dcl.StringCanonicalize(desired.Description, actual.Description) {\n\t\tc.Config.Logger.Infof(\"Detected diff in Description.\\nDESIRED: %v\\nACTUAL: %v\", desired.Description, actual.Description)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"Description\",\n\t\t})\n\t}\n\tif !reflect.DeepEqual(desired.DiskSizeGb, actual.DiskSizeGb) {\n\t\tc.Config.Logger.Infof(\"Detected diff in DiskSizeGb.\\nDESIRED: %v\\nACTUAL: %v\", desired.DiskSizeGb, actual.DiskSizeGb)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"DiskSizeGb\",\n\t\t})\n\t}\n\tif !dcl.IsZeroValue(desired.Family) && !dcl.StringCanonicalize(desired.Family, actual.Family) {\n\t\tc.Config.Logger.Infof(\"Detected diff in Family.\\nDESIRED: %v\\nACTUAL: %v\", desired.Family, actual.Family)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"Family\",\n\t\t})\n\t}\n\tif compareImageGuestOsFeatureSlice(c, desired.GuestOsFeature, actual.GuestOsFeature) {\n\t\tc.Config.Logger.Infof(\"Detected diff in GuestOsFeature.\\nDESIRED: %v\\nACTUAL: %v\", desired.GuestOsFeature, actual.GuestOsFeature)\n\t\ttoAdd, toRemove := compareImageGuestOsFeatureSets(c, desired.GuestOsFeature, actual.GuestOsFeature)\n\t\tif len(toAdd) > 0 {\n\t\t\tdiffs = append(diffs, imageDiff{\n\t\t\t\tRequiresRecreate: true,\n\t\t\t\tFieldName: \"GuestOsFeature\",\n\t\t\t})\n\t\t}\n\t\tif len(toRemove) > 0 {\n\t\t\tdiffs = append(diffs, imageDiff{\n\t\t\t\tRequiresRecreate: true,\n\t\t\t\tFieldName: \"GuestOsFeature\",\n\t\t\t})\n\t\t}\n\t}\n\tif compareImageImageEncryptionKey(c, desired.ImageEncryptionKey, actual.ImageEncryptionKey) {\n\t\tc.Config.Logger.Infof(\"Detected diff in ImageEncryptionKey.\\nDESIRED: %v\\nACTUAL: %v\", desired.ImageEncryptionKey, actual.ImageEncryptionKey)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"ImageEncryptionKey\",\n\t\t})\n\t}\n\tif !dcl.MapEquals(desired.Labels, actual.Labels, []string(nil)) {\n\t\tc.Config.Logger.Infof(\"Detected diff in Labels.\\nDESIRED: %v\\nACTUAL: %v\", desired.Labels, actual.Labels)\n\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tUpdateOp: &updateImageSetLabelsOperation{},\n\t\t\tFieldName: \"Labels\",\n\t\t})\n\n\t}\n\tif !dcl.StringSliceEquals(desired.License, actual.License) {\n\t\tc.Config.Logger.Infof(\"Detected diff in License.\\nDESIRED: %v\\nACTUAL: %v\", desired.License, actual.License)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"License\",\n\t\t})\n\t}\n\tif !dcl.IsZeroValue(desired.Name) && !dcl.StringCanonicalize(desired.Name, actual.Name) {\n\t\tc.Config.Logger.Infof(\"Detected diff in Name.\\nDESIRED: %v\\nACTUAL: %v\", desired.Name, actual.Name)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"Name\",\n\t\t})\n\t}\n\tif compareImageShieldedInstanceInitialState(c, desired.ShieldedInstanceInitialState, actual.ShieldedInstanceInitialState) {\n\t\tc.Config.Logger.Infof(\"Detected diff in ShieldedInstanceInitialState.\\nDESIRED: %v\\nACTUAL: %v\", desired.ShieldedInstanceInitialState, actual.ShieldedInstanceInitialState)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"ShieldedInstanceInitialState\",\n\t\t})\n\t}\n\tif !dcl.IsZeroValue(desired.SourceDisk) && !dcl.StringCanonicalize(desired.SourceDisk, actual.SourceDisk) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceDisk.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceDisk, actual.SourceDisk)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceDisk\",\n\t\t})\n\t}\n\tif compareImageSourceDiskEncryptionKey(c, desired.SourceDiskEncryptionKey, actual.SourceDiskEncryptionKey) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceDiskEncryptionKey.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceDiskEncryptionKey, actual.SourceDiskEncryptionKey)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceDiskEncryptionKey\",\n\t\t})\n\t}\n\tif !dcl.IsZeroValue(desired.SourceImage) && !dcl.StringCanonicalize(desired.SourceImage, actual.SourceImage) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceImage.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceImage, actual.SourceImage)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceImage\",\n\t\t})\n\t}\n\tif compareImageSourceImageEncryptionKey(c, desired.SourceImageEncryptionKey, actual.SourceImageEncryptionKey) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceImageEncryptionKey.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceImageEncryptionKey, actual.SourceImageEncryptionKey)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceImageEncryptionKey\",\n\t\t})\n\t}\n\tif !dcl.IsZeroValue(desired.SourceImageId) && !dcl.StringCanonicalize(desired.SourceImageId, actual.SourceImageId) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceImageId.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceImageId, actual.SourceImageId)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceImageId\",\n\t\t})\n\t}\n\tif !dcl.IsZeroValue(desired.SourceSnapshot) && !dcl.StringCanonicalize(desired.SourceSnapshot, actual.SourceSnapshot) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceSnapshot.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceSnapshot, actual.SourceSnapshot)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceSnapshot\",\n\t\t})\n\t}\n\tif compareImageSourceSnapshotEncryptionKey(c, desired.SourceSnapshotEncryptionKey, actual.SourceSnapshotEncryptionKey) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceSnapshotEncryptionKey.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceSnapshotEncryptionKey, actual.SourceSnapshotEncryptionKey)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceSnapshotEncryptionKey\",\n\t\t})\n\t}\n\tif !dcl.IsZeroValue(desired.SourceSnapshotId) && !dcl.StringCanonicalize(desired.SourceSnapshotId, actual.SourceSnapshotId) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceSnapshotId.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceSnapshotId, actual.SourceSnapshotId)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceSnapshotId\",\n\t\t})\n\t}\n\tif !reflect.DeepEqual(desired.SourceType, actual.SourceType) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceType.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceType, actual.SourceType)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceType\",\n\t\t})\n\t}\n\tif !dcl.StringSliceEquals(desired.StorageLocation, actual.StorageLocation) {\n\t\tc.Config.Logger.Infof(\"Detected diff in StorageLocation.\\nDESIRED: %v\\nACTUAL: %v\", desired.StorageLocation, actual.StorageLocation)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"StorageLocation\",\n\t\t})\n\t}\n\tif compareImageDeprecated(c, desired.Deprecated, actual.Deprecated) {\n\t\tc.Config.Logger.Infof(\"Detected diff in Deprecated.\\nDESIRED: %v\\nACTUAL: %v\", desired.Deprecated, actual.Deprecated)\n\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tUpdateOp: &updateImageDeprecateOperation{},\n\t\t\tFieldName: \"Deprecated\",\n\t\t})\n\n\t}\n\t// We need to ensure that this list does not contain identical operations *most of the time*.\n\t// There may be some cases where we will need multiple copies of the same operation - for instance,\n\t// if a resource has multiple prerequisite-containing fields. For now, we don't know of any\n\t// such examples and so we deduplicate unconditionally.\n\n\t// The best way for us to do this is to iterate through the list\n\t// and remove any copies of operations which are identical to a previous operation.\n\t// This is O(n^2) in the number of operations, but n will always be very small,\n\t// even 10 would be an extremely high number.\n\tvar opTypes []string\n\tvar deduped []imageDiff\n\tfor _, d := range diffs {\n\t\t// Two operations are considered identical if they have the same type.\n\t\t// The type of an operation is derived from the name of the update method.\n\t\tif !dcl.StringSliceContains(fmt.Sprintf(\"%T\", d.UpdateOp), opTypes) {\n\t\t\tdeduped = append(deduped, d)\n\t\t\topTypes = append(opTypes, fmt.Sprintf(\"%T\", d.UpdateOp))\n\t\t} else {\n\t\t\tc.Config.Logger.Infof(\"Omitting planned operation of type %T since once is already scheduled.\", d.UpdateOp)\n\t\t}\n\t}\n\n\treturn deduped, nil\n}", "func (o *V1WorkloadSpec) GetImagePullCredentialsOk() (*[]V1ImagePullCredential, bool) {\n\tif o == nil || o.ImagePullCredentials == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ImagePullCredentials, true\n}" ]
[ "0.6768596", "0.64943826", "0.6419135", "0.6157075", "0.6140877", "0.6051301", "0.6051102", "0.6015688", "0.6012333", "0.5803486", "0.5785222", "0.5717334", "0.568571", "0.5613724", "0.55998373", "0.5590659", "0.5588034", "0.5576534", "0.55490285", "0.55427665", "0.5537828", "0.5526416", "0.55005264", "0.5471348", "0.5402908", "0.53753555", "0.536118", "0.536118", "0.5328593", "0.5328454", "0.53193104", "0.5317137", "0.5296639", "0.5290764", "0.5279711", "0.52735686", "0.52711207", "0.5265115", "0.52614826", "0.52243525", "0.5192623", "0.5191988", "0.51782346", "0.51651263", "0.5155625", "0.5152187", "0.5136921", "0.5102327", "0.50981534", "0.50797987", "0.50765634", "0.50721174", "0.5066111", "0.50644153", "0.5055493", "0.50529253", "0.5045138", "0.5041352", "0.50304353", "0.5024896", "0.5021703", "0.501594", "0.5015782", "0.50125426", "0.5012218", "0.5009808", "0.5007416", "0.49976766", "0.49857664", "0.49849772", "0.4982376", "0.497695", "0.49757487", "0.49688014", "0.4968517", "0.49660015", "0.49556074", "0.49524516", "0.49519998", "0.49518737", "0.49392322", "0.493193", "0.49304387", "0.49283153", "0.49198437", "0.48983747", "0.48877427", "0.4887672", "0.4881911", "0.48788828", "0.48776862", "0.4872435", "0.4865807", "0.48605454", "0.48358285", "0.48321247", "0.48264188", "0.48159924", "0.48106977", "0.48057786" ]
0.66579026
1
FetchUpdatedLocalImage mocks base method
func (m *MockFetcher) FetchUpdatedLocalImage(arg0 context.Context, arg1 string, arg2 io.Writer) (image.Image, error) { ret := m.ctrl.Call(m, "FetchUpdatedLocalImage", arg0, arg1, arg2) ret0, _ := ret[0].(image.Image) ret1, _ := ret[1].(error) return ret0, ret1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockFetcher) FetchLocalImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchLocalImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockFetcher) FetchRemoteImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchRemoteImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockAPI) UpdateImageStatus(arg0 context.Context, arg1 *models.Host, arg2 *models.ContainerImageAvailability, arg3 *gorm.DB) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateImageStatus\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func TestIsImageInLocalRegistry(t *testing.T) {\n\ttype testDef struct {\n\t\timageName string\n\t\tdocker test.FakeDockerClient\n\t\texpectedResult bool\n\t\texpectedError string\n\t}\n\ttests := map[string]testDef{\n\t\t\"ImageFound\": {\"a_test_image\", test.FakeDockerClient{}, true, \"\"},\n\t\t\"ImageNotFound\": {\"a_test_image:sometag\", test.FakeDockerClient{}, false, \"unable to get metadata for a_test_image:sometag\"},\n\t}\n\n\tfor test, def := range tests {\n\t\tdh := getDocker(&def.docker)\n\t\tfake := dh.kubeDockerClient.(*dockertools.FakeDockerClient)\n\t\tif def.expectedResult {\n\t\t\tfake.Image = &dockertypes.ImageInspect{ID: def.imageName}\n\t\t}\n\n\t\tresult, err := dh.IsImageInLocalRegistry(def.imageName)\n\n\t\tif e := fake.AssertCalls([]string{\"inspect_image\"}); e != nil {\n\t\t\tt.Errorf(\"%+v\", e)\n\t\t}\n\n\t\tif result != def.expectedResult {\n\t\t\tt.Errorf(\"Test - %s: Expected result: %v. Got: %v\", test, def.expectedResult, result)\n\t\t}\n\t\tif err != nil && len(def.expectedError) > 0 && !strings.Contains(err.Error(), def.expectedError) {\n\t\t\tt.Errorf(\"Test - %s: Expected error: Got: %+v\", test, err)\n\t\t}\n\t}\n}", "func TestFetch(t *testing.T) {\n\timage := \"rkt-inspect-fetch.aci\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// Fetch the image for the first time, this should write the image to the\n\t// on-disk store.\n\toldHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file\"}, t, ctx)\n\n\t// Fetch the image with the same name but different content, the expecting\n\t// result is that we should get a different hash since we are not fetching\n\t// from the on-disk store.\n\tnewHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file --write-file\"}, t, ctx)\n\n\tif oldHash == newHash {\n\t\tt.Fatalf(\"ACI hash should be different as the image has changed\")\n\t}\n}", "func ttrPullImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\terr := ttrPullImage(ctx, client, image)\n\tassert.NilError(t, err)\n}", "func (r *MockRepoManager) mockUpdate() {\n\tr.mtx.Lock()\n\tdefer r.mtx.Unlock()\n\tr.updateCount++\n}", "func (m *MockUpstreamIntf) CachedRemoteDigestOfLocalHeight() blockdigest.Digest {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CachedRemoteDigestOfLocalHeight\")\n\tret0, _ := ret[0].(blockdigest.Digest)\n\treturn ret0\n}", "func fetchLocal(f *File) error {\n\n\terr := validateLocal(f)\n\tif err != nil {\n\t\tf.Status.Type = status.ERROR\n\t\treturn err\n\t}\n\tf.path = f.Url\n\tf.Status.Type = status.FETCHED\n\treturn nil\n\n}", "func (m *MockUsecase) UpdateAvatar(arg0 string, arg1 multipart.File, arg2 string) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateAvatar\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockFetcherMockRecorder) FetchUpdatedLocalImage(arg0, arg1, arg2 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchUpdatedLocalImage\", reflect.TypeOf((*MockFetcher)(nil).FetchUpdatedLocalImage), arg0, arg1, arg2)\n}", "func TestBaseImage(t *testing.T) {\n\tctx, err := controllerPrepare()\n\tif err != nil {\n\t\tt.Fatal(\"Fail in controller prepare: \", err)\n\t}\n\teveBaseRef := os.Getenv(\"EVE_BASE_REF\")\n\tif len(eveBaseRef) == 0 {\n\t\teveBaseRef = \"4.10.0\"\n\t}\n\tzArch := os.Getenv(\"ZARCH\")\n\tif len(eveBaseRef) == 0 {\n\t\tzArch = \"amd64\"\n\t}\n\tHV := os.Getenv(\"HV\")\n\tif HV == \"xen\" {\n\t\tHV = \"\"\n\t}\n\tvar baseImageTests = []struct {\n\t\tdataStoreID string\n\t\timageID string\n\t\tbaseID string\n\t\timageRelativePath string\n\t\timageFormat config.Format\n\t\teveBaseRef string\n\t\tzArch string\n\t\tHV string\n\t}{\n\t\t{eServerDataStoreID,\n\n\t\t\t\"1ab8761b-5f89-4e0b-b757-4b87a9fa93ec\",\n\n\t\t\t\"22b8761b-5f89-4e0b-b757-4b87a9fa93ec\",\n\n\t\t\t\"baseos.qcow2\",\n\t\t\tconfig.Format_QCOW2,\n\t\t\teveBaseRef,\n\t\t\tzArch,\n\t\t\tHV,\n\t\t},\n\t}\n\tfor _, tt := range baseImageTests {\n\t\tbaseOSVersion := fmt.Sprintf(\"%s-%s\", tt.eveBaseRef, tt.zArch)\n\t\tif tt.HV != \"\" {\n\t\t\tbaseOSVersion = fmt.Sprintf(\"%s-%s-%s\", tt.eveBaseRef, tt.zArch, tt.HV)\n\t\t}\n\t\tt.Run(baseOSVersion, func(t *testing.T) {\n\n\t\t\terr = prepareBaseImageLocal(ctx, tt.dataStoreID, tt.imageID, tt.baseID, tt.imageRelativePath, tt.imageFormat, baseOSVersion)\n\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in prepare base image from local file: \", err)\n\t\t\t}\n\t\t\tdeviceCtx, err := ctx.GetDeviceFirst()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in get first device: \", err)\n\t\t\t}\n\t\t\tdeviceCtx.SetBaseOSConfig([]string{tt.baseID})\n\t\t\tdevUUID := deviceCtx.GetID()\n\t\t\terr = ctx.ConfigSync(deviceCtx)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"Fail in sync config with controller: \", err)\n\t\t\t}\n\t\t\tt.Run(\"Started\", func(t *testing.T) {\n\t\t\t\terr := ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion}, einfo.ZInfoDevSW, 300)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image update init: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\tt.Run(\"Downloaded\", func(t *testing.T) {\n\t\t\t\terr := ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion, \"downloadProgress\": \"100\"}, einfo.ZInfoDevSW, 1500)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image download progress: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\tt.Run(\"Logs\", func(t *testing.T) {\n\t\t\t\tif !checkLogs {\n\t\t\t\t\tt.Skip(\"no LOGS flag set - skipped\")\n\t\t\t\t}\n\t\t\t\terr = ctx.LogChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"eveVersion\": baseOSVersion}, 1200)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image logs: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t\ttimeout := time.Duration(1200)\n\n\t\t\tif !checkLogs {\n\t\t\t\ttimeout = 2400\n\t\t\t}\n\t\t\tt.Run(\"Active\", func(t *testing.T) {\n\t\t\t\terr = ctx.InfoChecker(devUUID, map[string]string{\"devId\": devUUID.String(), \"shortVersion\": baseOSVersion, \"status\": \"INSTALLED\", \"partitionState\": \"(inprogress|active)\"}, einfo.ZInfoDevSW, timeout)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(\"Fail in waiting for base image installed status: \", err)\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n\n}", "func (m *MockCEImpl) ImagePull(image string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImagePull\", image)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (f OriginalFetcher) Fetch(namespace string, sourceURL string, imageHash string) (info *info.ImageProperties, downloaded bool, err error) {\n\tif sourceURL == \"\" && imageHash == \"\" {\n\t\treturn nil, false, fmt.Errorf(\"Missing Hash & URL\")\n\t}\n\n\tif imageHash != \"\" {\n\t\tinfo, err = f.fetchFromStore(namespace, imageHash)\n\t}\n\n\tif sourceURL != \"\" && (err != nil || imageHash == \"\") {\n\t\tinfo, downloaded, err = f.fetchFromSource(namespace, sourceURL)\n\t}\n\n\treturn info, downloaded, err\n}", "func (m *MockModuleService) GetLatestModuleImage(arg0 string) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetLatestModuleImage\", arg0)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockImageTransferer) Stat(arg0 string, arg1 core.Digest) (*core.BlobInfo, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Stat\", arg0, arg1)\n\tret0, _ := ret[0].(*core.BlobInfo)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func fetchUpdatableImageRepos(registry registry.Registry, updateable []*WorkloadUpdate, logger log.Logger) (ImageRepos, error) {\n\treturn FetchImageRepos(registry, workloadContainers(updateable), logger)\n}", "func (m *MockHandler) UpdateProfileAvatar(arg0 http.ResponseWriter, arg1 *http.Request) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"UpdateProfileAvatar\", arg0, arg1)\n}", "func (f *fetcher) fetchImageFrom(appName string, aciURL, ascURL, scheme string, ascFile *os.File, latest bool) (string, error) {\n\tvar rem *store.Remote\n\n\tif f.insecureSkipVerify {\n\t\tif f.ks != nil {\n\t\t\tstderr(\"rkt: warning: TLS verification and signature verification has been disabled\")\n\t\t}\n\t} else if scheme == \"docker\" {\n\t\treturn \"\", fmt.Errorf(\"signature verification for docker images is not supported (try --insecure-skip-verify)\")\n\t}\n\n\tif (f.local && scheme != \"file\") || (scheme != \"file\" && !latest) {\n\t\tvar err error\n\t\tok := false\n\t\trem, ok, err = f.s.GetRemote(aciURL)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif ok {\n\t\t\tif f.local {\n\t\t\t\tstderr(\"rkt: using image in local store for app %s\", appName)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t\tif useCached(rem.DownloadTime, rem.CacheMaxAge) {\n\t\t\t\tstderr(\"rkt: found image in local store, skipping fetching from %s\", aciURL)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t}\n\t\tif f.local {\n\t\t\treturn \"\", fmt.Errorf(\"url %s not available in local store\", aciURL)\n\t\t}\n\t}\n\n\tif scheme != \"file\" && f.debug {\n\t\tstderr(\"rkt: fetching image from %s\", aciURL)\n\t}\n\n\tvar etag string\n\tif rem != nil {\n\t\tetag = rem.ETag\n\t}\n\tentity, aciFile, cd, err := f.fetch(appName, aciURL, ascURL, ascFile, etag)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif cd != nil && cd.useCached {\n\t\tif rem != nil {\n\t\t\treturn rem.BlobKey, nil\n\t\t} else {\n\t\t\t// should never happen\n\t\t\tpanic(\"asked to use cached image but remote is nil\")\n\t\t}\n\t}\n\tif scheme != \"file\" {\n\t\tdefer os.Remove(aciFile.Name())\n\t}\n\n\tif entity != nil && !f.insecureSkipVerify {\n\t\tstderr(\"rkt: signature verified:\")\n\t\tfor _, v := range entity.Identities {\n\t\t\tstderr(\" %s\", v.Name)\n\t\t}\n\t}\n\tkey, err := f.s.WriteACI(aciFile, latest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif scheme != \"file\" {\n\t\trem := store.NewRemote(aciURL, ascURL)\n\t\trem.BlobKey = key\n\t\trem.DownloadTime = time.Now()\n\t\tif cd != nil {\n\t\t\trem.ETag = cd.etag\n\t\t\trem.CacheMaxAge = cd.maxAge\n\t\t}\n\t\terr = f.s.WriteRemote(rem)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn key, nil\n}", "func ttrPushImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\trc, err := client.ImagePush(ctx, ttrImageName(image), types.ImagePushOptions{RegistryAuth: \"{}\"})\n\tassert.Assert(t, err)\n\tif rc != nil {\n\t\tbody, err := readAllAndClose(rc)\n\t\tassert.Assert(t, err)\n\t\tassert.Assert(t, strings.Contains(body, `\"status\":\"latest: digest: `))\n\t}\n}", "func (imp *Importer) fetchLocalImages() {\n items, err := ioutil.ReadDir(STORE_DIR)\n if err != nil {\n imp.sendErr(err)\n return\n }\n\n for _, info := range items {\n if info.IsDir() { continue }\n filename := info.Name()\n\n file, err := os.Open(fmt.Sprintf(\"%s/%s\", STORE_DIR, filename))\n if err != nil {\n imp.sendErr(err)\n return\n }\n\n img, err := jpeg.Decode(file)\n if err != nil {\n log.Printf(\"Error decoding image file %s to jpeg\\n\", filename)\n continue\n }\n\n ext := filepath.Ext(filename)\n id := filename[:len(filename)-len(ext)]\n\n imp.send(&imagedata.ImageData{Id: id, Data: &img})\n }\n}", "func (m *MockRepository) Update(arg0 int, arg1 entity.FeiraLivre) (*entity.FeiraLivre, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Update\", arg0, arg1)\n\tret0, _ := ret[0].(*entity.FeiraLivre)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockManager) GetLoadedImageName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetLoadedImageName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}", "func TestBaseImage(t *testing.T) {\n\t// test with 'original.png'\n\timgs := map[string][]byte{\n\t\t\"original.png\": []byte(\"image\"),\n\t}\n\t_, err := backend.baseImage(imgs)\n\tif err != nil {\n\t\tt.Errorf(\"Got error %s\", err)\n\t}\n\n\t// test with 'original.jpg'\n\timgs = map[string][]byte{\n\t\t\"original.jpg\": []byte(\"image\"),\n\t}\n\t_, err = backend.baseImage(imgs)\n\tif err != nil {\n\t\tt.Errorf(\"Got error %s\", err)\n\t}\n\n\t// without 'original.*' should get an error\n\timgs = map[string][]byte{\n\t\t\"127x127.png\": []byte(\"image\"),\n\t}\n\t_, err = backend.baseImage(imgs)\n\tif err == nil {\n\t\tt.Errorf(\"Should get an error, didn't pass original image.\")\n\t}\n}", "func (m *MockRepository) Fetch(bucketName, name string, model db.Model) (bool, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\", bucketName, name, model)\n\tret0, _ := ret[0].(bool)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *TeamStore) UpdateLastTeamIconUpdate(teamID string, curTime int64) error {\n\tret := _m.Called(teamID, curTime)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(string, int64) error); ok {\n\t\tr0 = rf(teamID, curTime)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockRequester) Fetch(url string) (io.ReadCloser, error) {\n\tret := m.ctrl.Call(m, \"Fetch\", url)\n\tret0, _ := ret[0].(io.ReadCloser)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func LoadLocalImage(app *AppData) error {\n\tapp.LocalImage = DockerImage{\n\t\tExists: false,\n\t}\n\tctx := context.Background()\n\tcli, err := client.NewClientWithOpts(client.FromEnv)\n\tcli.NegotiateAPIVersion(ctx)\n\tdefer cli.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tinspect, _, err := cli.ImageInspectWithRaw(ctx, app.From)\n\tif err != nil {\n\t\tif err.Error() == \"Error: No such image: \"+app.From {\n\t\t\tfmt.Printf(\"Repo not exists in local docker\\n\")\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tfmt.Printf(\"Repo exists in local docker\\n\")\n\tapp.LocalImage.Exists = true\n\tif len(inspect.RepoDigests) > 0 {\n\t\tapp.LocalImage.DockerDigest = inspect.RepoDigests[0]\n\t} else {\n\t\tapp.LocalImage.DockerDigest = inspect.ID\n\t}\n\n\t//Setting Docker Config values\n\tconfigData, err := json.Marshal(inspect.Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = pvjson.Unmarshal(configData, &app.LocalImage.DockerConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (f *Frontend) fetchImage(i *img.Image) (*img.Image, error) {\n\tvar err error\n\n\t// go through image proxy to resize and cache the image\n\tkey := hmacKey(i.ID)\n\tu := fmt.Sprintf(\"%v/image/225x,s%v/%v\", f.Host, key, i.ID)\n\tfmt.Println(u)\n\n\tresp, err := f.Images.Client.Get(u)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbdy, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\ti.Base64 = base64.StdEncoding.EncodeToString(bdy)\n\treturn i, err\n}", "func TestImplicitFetch(t *testing.T) {\n\tfoundMsg := \"found image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// 1. Fetch the image.\n\t// TODO(yifan): Add other ACI with different schemes.\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:ubuntu-12.04\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 2. Try run/prepare with/without tag ':latest', should not get $foundMsg.\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err == nil {\n\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func (m *MockRepository) UpdateCache() (map[string]float32, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateCache\")\n\tret0, _ := ret[0].(map[string]float32)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *MockRepository) UpdateCCLFFileImportStatus(ctx context.Context, fileID uint, importStatus string) error {\n\tret := _m.Called(ctx, fileID, importStatus)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, uint, string) error); ok {\n\t\tr0 = rf(ctx, fileID, importStatus)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (m *MockEnvironment) Fetch() map[string]interface{} {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Fetch\")\n\tret0, _ := ret[0].(map[string]interface{})\n\treturn ret0\n}", "func TestCannotExecuteStatusImage(t *testing.T) {\n\tbuf := setLogBuffer()\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tt.Log(buf.String())\n\t\t}\n\t}()\n\n\tif StatusImage == \"\" {\n\t\tt.Skip(\"no status image defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tapp := &models.App{Name: id.New().String()}\n\tapp = ensureApp(t, app)\n\n\tfn := &models.Fn{\n\t\tAppID: app.ID,\n\t\tName: id.New().String(),\n\t\tImage: StatusImage,\n\t\tResourceConfig: models.ResourceConfig{\n\t\t\tMemory: memory,\n\t\t},\n\t}\n\tfn = ensureFn(t, fn)\n\n\tlb, err := LB()\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: lb,\n\t}\n\tu.Path = path.Join(u.Path, \"invoke\", fn.ID)\n\n\tcontent := bytes.NewBuffer([]byte(`status`))\n\toutput := &bytes.Buffer{}\n\n\tresp, err := callFN(ctx, u.String(), content, output, models.TypeSync)\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\n\tif resp.StatusCode != http.StatusBadRequest {\n\t\tt.Fatalf(\"StatusCode check failed on %v\", resp.StatusCode)\n\t}\n}", "func (m *MockHandler) GetLatestOsImage(arg0 string) (*models.OsImage, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetLatestOsImage\", arg0)\n\tret0, _ := ret[0].(*models.OsImage)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockSystem) FetchURL(ctx context.Context, url string) (semver.Tags, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FetchURL\", ctx, url)\n\tret0, _ := ret[0].(semver.Tags)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func ttrPullImage(ctx context.Context, client apiclient.APIClient, image string) error {\n\trc, err := client.ImagePull(ctx, ttrImageName(image), types.ImagePullOptions{RegistryAuth: \"{}\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rc != nil {\n\t\tbody, err := readAllAndClose(rc)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !strings.Contains(body, \"Status: Downloaded newer image\") {\n\t\t\treturn errors.New(\"image pull not successful\")\n\t\t}\n\t}\n\treturn nil\n}", "func (m *MockUserUsecase) UpdateAvatar(id int, avatarPath string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateAvatar\", id, avatarPath)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (c MockDockerClient) ImagePull(ctx context.Context, imageName string) error {\n\tif c.ImagePullFn != nil {\n\t\tfmt.Println(\"[MockDockerClient] In \", utils.CurrentFunctionName())\n\t\tfmt.Println(\"[MockDockerClient] - ctx: \", ctx)\n\t\tfmt.Println(\"[MockDockerClient] - imageName: \", imageName)\n\t\treturn c.ImagePullFn(ctx, imageName)\n\t}\n\tpanic(fmt.Sprintf(\"No function defined for: %s\", utils.CurrentFunctionName()))\n}", "func (c *repoCacheManager) fetchImages(tags []string) (fetchImagesResult, error) {\n\timages := map[string]image.Info{}\n\n\t// Create a list of images that need updating\n\tvar toUpdate []imageToUpdate\n\n\t// Counters for reporting what happened\n\tvar missing, refresh int\n\tfor _, tag := range tags {\n\t\tif tag == \"\" {\n\t\t\treturn fetchImagesResult{}, fmt.Errorf(\"empty tag in fetched tags\")\n\t\t}\n\n\t\t// See if we have the manifest already cached\n\t\tnewID := c.repoID.ToRef(tag)\n\t\tkey := NewManifestKey(newID.CanonicalRef())\n\t\tbytes, deadline, err := c.cacheClient.GetKey(key)\n\t\t// If err, then we don't have it yet. Update.\n\t\tswitch {\n\t\tcase err != nil: // by and large these are cache misses, but any error shall count as \"not found\"\n\t\t\tif err != ErrNotCached {\n\t\t\t\tc.logger.Log(\"warning\", \"error from cache\", \"err\", err, \"ref\", newID)\n\t\t\t}\n\t\t\tmissing++\n\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})\n\t\tcase len(bytes) == 0:\n\t\t\tc.logger.Log(\"warning\", \"empty result from cache\", \"ref\", newID)\n\t\t\tmissing++\n\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})\n\t\tdefault:\n\t\t\tvar entry registry.ImageEntry\n\t\t\tif err := json.Unmarshal(bytes, &entry); err == nil {\n\t\t\t\tif c.trace {\n\t\t\t\t\tc.logger.Log(\"trace\", \"found cached manifest\", \"ref\", newID, \"last_fetched\", entry.LastFetched.Format(time.RFC3339), \"deadline\", deadline.Format(time.RFC3339))\n\t\t\t\t}\n\n\t\t\t\tif entry.ExcludedReason == \"\" {\n\t\t\t\t\timages[tag] = entry.Info\n\t\t\t\t\tif c.now.After(deadline) {\n\t\t\t\t\t\tpreviousRefresh := minRefresh\n\t\t\t\t\t\tlastFetched := entry.Info.LastFetched\n\t\t\t\t\t\tif !lastFetched.IsZero() {\n\t\t\t\t\t\t\tpreviousRefresh = deadline.Sub(lastFetched)\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: previousRefresh, previousDigest: entry.Info.Digest})\n\t\t\t\t\t\trefresh++\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif c.trace {\n\t\t\t\t\t\tc.logger.Log(\"trace\", \"excluded in cache\", \"ref\", newID, \"reason\", entry.ExcludedReason)\n\t\t\t\t\t}\n\t\t\t\t\tif c.now.After(deadline) {\n\t\t\t\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: excludedRefresh})\n\t\t\t\t\t\trefresh++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tresult := fetchImagesResult{\n\t\timagesFound: images,\n\t\timagesToUpdate: toUpdate,\n\t\timagesToUpdateRefreshCount: refresh,\n\t\timagesToUpdateMissingCount: missing,\n\t}\n\n\treturn result, nil\n}", "func TestGetStatusByHostnameAtTimestamp2(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"} // nolint\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\t// just reuse the existing struct\n\tfakeCloudAssetChange.ARN = \"arn2\"\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-11T08:29:35+00:00\") // August 11\n\tfakeCloudAssetChange.ChangeTime = timestamp2\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\thostname := \"yahoo.com\" // nolint\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-10T08:29:35+00:00\") // query is for status on August 10\n\tnetworkChangeEvents, err := dbStorage.FetchByHostname(ctx, at, hostname)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}} // nolint\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func (ss *Sources) localFetch(spec v1.SourceSpec) (string, error) {\n\tp := ss.repoPath(spec)\n\terr := os.MkdirAll(p, 0750)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t//TODO prevent target directory from accumulating unused files\n\t// remove all files before copy\n\t// or\n\t// walk target dir and diff with source dir\n\n\t// Copy local dir to repo path.\n\t// Ignore .git directory.\n\terr = otia10copy.Copy(spec.URL, p, otia10copy.Options{Skip: func(src string) bool {\n\t\treturn filepath.Base(src) == \".git\"\n\t}})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"fetch: %w\", err)\n\t}\n\n\th, err := ss.hashAll(spec.URL) // TODO use hashAll(p) when dir is properly synced (see previous to do)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ts := hex.EncodeToString(h.Sum(nil))\n\n\treturn s, err\n}", "func TestExpectedImgRef(t *testing.T) {\n\n\tv, isSet := os.LookupEnv(\"DOCKERHUB_PROXY\")\n\tif isSet {\n\t\tdefer os.Setenv(\"DOCKERHUB_PROXY\", v)\n\t}\n\n\tos.Unsetenv(\"DOCKERHUB_PROXY\")\n\tassert.Equal(t,\n\t\t\"index.docker.io/library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\",\n\t\tCompleteImageRef(\"library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\"))\n\n\tos.Setenv(\"DOCKERHUB_PROXY\", \"my-dockerhub-proxy.tld/dockerhub-proxy\")\n\tassert.Equal(t,\n\t\t\"my-dockerhub-proxy.tld/dockerhub-proxy/library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\",\n\t\tCompleteImageRef(\"library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\"))\n\tos.Unsetenv(\"DOCKERHUB_PROXY\")\n}", "func TestRemote(t *testing.T) {\n\trnd, err := random.Image(1024, 3)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ts, err := registry.TLS(\"gcr.io\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttr := s.Client().Transport\n\n\tsrc := \"gcr.io/test/compressed\"\n\tref, err := name.ParseReference(src)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := remote.Write(ref, rnd, remote.WithTransport(tr)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\timg, err := remote.Image(ref, remote.WithTransport(tr))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := validate.Image(img); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcf, err := img.ConfigFile()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tm, err := img.Manifest()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tlayer, err := img.LayerByDiffID(cf.RootFS.DiffIDs[0])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\td, err := layer.Digest()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif diff := cmp.Diff(d, m.Layers[0].Digest); diff != \"\" {\n\t\tt.Errorf(\"mismatched digest: %v\", diff)\n\t}\n}", "func (m *MockAPI) UpdateMediaConnected(arg0 context.Context, arg1 *models.Host) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateMediaConnected\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func dirImageMock(t *testing.T, dir, dockerReference string) private.UnparsedImage {\n\tref, err := reference.ParseNormalizedNamed(dockerReference)\n\trequire.NoError(t, err)\n\treturn dirImageMockWithRef(t, dir, refImageReferenceMock{ref: ref})\n}", "func retagLocalImageForRemotePush(localTag string, remoteUrl string) string {\n\tnewTag := fmt.Sprintf(\"%s/%s\", remoteUrl, localTag)\n\tdockerTag(localTag, newTag)\n\treturn newTag\n}", "func TestGetStatusByIPAddressAtTimestamp2(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"}\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\t// just reuse the existing struct\n\tfakeCloudAssetChange.ARN = \"arn2\"\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-11T08:29:35+00:00\") // August 11\n\tfakeCloudAssetChange.ChangeTime = timestamp2\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tipAddress := \"88.77.66.55\"\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-10T08:29:35+00:00\") // query is for status on August 10\n\tnetworkChangeEvents, err := dbStorage.FetchByIP(ctx, at, ipAddress)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func TestRunPrepareLocal(t *testing.T) {\n\tnotAvailableMsg := \"not available in local store\"\n\tfoundMsg := \"using image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --local --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --local --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare --local docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare --local docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\t// 1. Try run/prepare with the image not available in the store, should get $notAvailableMsg.\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, notAvailableMsg); err != nil {\n\t\t\tt.Fatalf(\"%q should be found\", notAvailableMsg)\n\t\t}\n\t\tchild.Wait()\n\t}\n\n\t// 2. Fetch the image\n\timportImageAndFetchHash(t, ctx, \"docker://busybox\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 3. Try run/prepare with the image available in the store, should get $foundMsg.\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err != nil {\n\t\t\tt.Fatalf(\"%q should be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func (m *VirtualMachinesClientMock) Update(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate, source string) *retry.Error {\n\treturn nil\n}", "func (m *MockRepository) Update(ctx context.Context, asset *model.Asset) (*model.Asset, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Update\", ctx, asset)\n\tret0, _ := ret[0].(*model.Asset)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestGetStatusByIPAddressAtTimestamp1(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"} // nolint\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tipAddress := \"88.77.66.55\" // nolint\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-10T08:29:35+00:00\")\n\tnetworkChangeEvents, err := dbStorage.FetchByIP(ctx, at, ipAddress)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func TestGetStatusByHostnameAtTimestamp1(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"}\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\thostname := \"yahoo.com\" // nolint\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-10T08:29:35+00:00\")\n\tnetworkChangeEvents, err := dbStorage.FetchByHostname(ctx, at, hostname)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{ //nolint\n\t\t\tnil,\n\t\t\t[]string{\"88.77.66.55\"},\n\t\t\t[]string{\"yahoo.com\"},\n\t\t\t\"rtype\",\n\t\t\t\"aid\",\n\t\t\t\"region\",\n\t\t\t\"arn\",\n\t\t\tnil,\n\t\t\tdomain.AccountOwner{},\n\t\t},\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func (m *MockStorage) Update(arg0 model.Car) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Update\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (_m *AWSResourceDescriptor) UpdateCRStatus(_a0 types.AWSResource) (bool, error) {\n\tret := _m.Called(_a0)\n\n\tvar r0 bool\n\tif rf, ok := ret.Get(0).(func(types.AWSResource) bool); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(types.AWSResource) error); ok {\n\t\tr1 = rf(_a0)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockHandler) Update(ctx context.Context, exactType string, tasks []models.Task) error {\n\tret := m.ctrl.Call(m, \"Update\", ctx, exactType, tasks)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockRepository) Update(tag *models.RestTag) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Update\", tag)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (f MockFetch) Fetch(targetServer Server) ServerStatus {\n\tif targetServer.ID == 196 {\n\t\treturn ServerStatus{targetServer.ID, false, \"404\", targetServer.URL, time.Now()}\n\t}\n\treturn ServerStatus{targetServer.ID, true, \"\", targetServer.URL, time.Now()}\n}", "func (m *MockManager) LoadImage(arg0 context.Context, arg1 *config.Config, arg2 dockerapi.DockerClient) (*types.ImageInspect, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"LoadImage\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(*types.ImageInspect)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockRepoClient) LocalPath() (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"LocalPath\")\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *MockBookingStorage) Update(_a0 interface{}) {\n\t_m.Called(_a0)\n}", "func (_m *Repository) Update(ctx context.Context, _a1 *models.Host) error {\n\tret := _m.Called(ctx, _a1)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *models.Host) error); ok {\n\t\tr0 = rf(ctx, _a1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func prepull(ctx context.Context, req types.FunctionDeployment, client *containerd.Client, alwaysPull bool) (containerd.Image, error) {\n\tstart := time.Now()\n\tr, err := reference.ParseNormalizedNamed(req.Image)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timgRef := reference.TagNameOnly(r).String()\n\n\tsnapshotter := \"\"\n\tif val, ok := os.LookupEnv(\"snapshotter\"); ok {\n\t\tsnapshotter = val\n\t}\n\n\timage, err := service.PrepareImage(ctx, client, imgRef, snapshotter, alwaysPull)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to pull image %s\", imgRef)\n\t}\n\n\tsize, _ := image.Size(ctx)\n\tlog.Printf(\"Image for: %s size: %d, took: %fs\\n\", image.Name(), size, time.Since(start).Seconds())\n\n\treturn image, nil\n}", "func (suite *APIImageSaveLoadSuite) TestImageSaveLoadOk(c *check.C) {\n\tbefore, err := request.Get(\"/images/\" + busyboxImage125 + \"/json\")\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, before, 200)\n\tgotBefore := types.ImageInfo{}\n\terr = request.DecodeBody(&gotBefore, before.Body)\n\tc.Assert(err, check.IsNil)\n\n\tq := url.Values{}\n\tq.Set(\"name\", busyboxImage125)\n\tquery := request.WithQuery(q)\n\tresp, err := request.Get(\"/images/save\", query)\n\tc.Assert(err, check.IsNil)\n\tdefer resp.Body.Close()\n\n\tdir, err := ioutil.TempDir(\"\", \"TestImageSaveLoadOk\")\n\tif err != nil {\n\t\tc.Errorf(\"failed to create a new temporary directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\ttmpFile := filepath.Join(dir, \"busyboxImage.tar\")\n\tf, err := os.Create(tmpFile)\n\tif err != nil {\n\t\tc.Errorf(\"failed to create file: %v\", err)\n\t}\n\n\tif _, err := io.Copy(f, resp.Body); err != nil {\n\t\tc.Errorf(\"failed to save data to file: %v\", err)\n\t}\n\n\tdata, err := os.Open(tmpFile)\n\tif err != nil {\n\t\tc.Errorf(\"failed to load file's data: %v\", err)\n\t}\n\n\tloadImageName := \"load-busyboxImage\"\n\tq = url.Values{}\n\tq.Set(\"name\", loadImageName)\n\n\tquery = request.WithQuery(q)\n\treader := request.WithRawData(data)\n\theader := request.WithHeader(\"Content-Type\", \"application/x-tar\")\n\n\tresp, err = request.Post(\"/images/load\", query, reader, header)\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, resp, 200)\n\n\tafter, err := request.Get(\"/images/\" + loadImageName + \":\" + environment.Busybox125Tag + \"/json\")\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, after, 200)\n\tdefer request.Delete(\"/images/\" + loadImageName + \":\" + environment.Busybox125Tag)\n\n\tgotAfter := types.ImageInfo{}\n\terr = request.DecodeBody(&gotAfter, after.Body)\n\tc.Assert(err, check.IsNil)\n\n\tc.Assert(gotBefore.ID, check.Equals, gotAfter.ID)\n\tc.Assert(gotBefore.CreatedAt, check.Equals, gotAfter.CreatedAt)\n\tc.Assert(gotBefore.Size, check.Equals, gotAfter.Size)\n}", "func (m *MockInterface) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateStatus\", ctx, persistentVolume, opts)\n\tret0, _ := ret[0].(*v1.PersistentVolume)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockFeiraStore) Update(ctx context.Context, id string, feira model.FeiraRequest) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Update\", ctx, id, feira)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mc *MockContainer) ImagePull() error {\n\treturn mc.MockImagePull()\n}", "func PrepareBaseImage(ctx context.Context, ref string, output io.Writer) (err error) {\n\n\tcli, err := client.NewEnvClient()\n\tif err != nil {\n\t\treturn\n\t}\n\n\treader, err := cli.ImagePull(ctx, ref, types.ImagePullOptions{})\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer reader.Close()\n\n\tvar log pullLog\n\tstatus := make(map[string]string)\n\tscanner := bufio.NewScanner(reader)\n\tfor scanner.Scan() {\n\t\tjson.Unmarshal(scanner.Bytes(), &log)\n\t\tif log.ID != \"\" {\n\t\t\tcur := status[log.ID]\n\t\t\tif cur != log.Status {\n\t\t\t\tstatus[log.ID] = log.Status\n\t\t\t\tfmt.Fprintln(output, log.Status, log.ID)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Fprintln(output, log.Status)\n\t\t}\n\t}\n\n\treturn\n\n}", "func (_m *CacheManager) Update(key string, o *objects.Object) bool {\n\tret := _m.Called(key, o)\n\n\tvar r0 bool\n\tif rf, ok := ret.Get(0).(func(string, *objects.Object) bool); ok {\n\t\tr0 = rf(key, o)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\n\treturn r0\n}", "func fetch(hash string, endpoint string, original []byte, ruid string, tuid string) error {\n\tctx, sp := spancontext.StartSpan(context.Background(), \"upload-and-sync.fetch\")\n\tdefer sp.Finish()\n\n\tlog.Info(\"http get request\", \"tuid\", tuid, \"ruid\", ruid, \"endpoint\", endpoint, \"hash\", hash)\n\n\tvar tn time.Time\n\treqUri := endpoint + \"/bzz:/\" + hash + \"/\"\n\treq, _ := http.NewRequest(\"GET\", reqUri, nil)\n\n\topentracing.GlobalTracer().Inject(\n\t\tsp.Context(),\n\t\topentracing.HTTPHeaders,\n\t\topentracing.HTTPHeadersCarrier(req.Header))\n\n\ttrace := client.GetClientTrace(commandName+\" - http get\", commandName, ruid, &tn)\n\n\treq = req.WithContext(httptrace.WithClientTrace(ctx, trace))\n\ttransport := http.DefaultTransport\n\n\t//transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\n\ttn = time.Now()\n\tres, err := transport.RoundTrip(req)\n\tif err != nil {\n\t\tlog.Error(err.Error(), \"ruid\", ruid)\n\t\treturn err\n\t}\n\tlog.Info(\"http get response\", \"tuid\", tuid, \"ruid\", ruid, \"endpoint\", endpoint, \"hash\", hash, \"code\", res.StatusCode, \"len\", res.ContentLength)\n\n\tif res.StatusCode != 200 {\n\t\terr := fmt.Errorf(\"expected status code %d, got %v\", 200, res.StatusCode)\n\t\tlog.Warn(err.Error(), \"ruid\", ruid)\n\t\treturn err\n\t}\n\n\tdefer res.Body.Close()\n\n\trdigest, err := digest(res.Body)\n\tif err != nil {\n\t\tlog.Warn(err.Error(), \"ruid\", ruid)\n\t\treturn err\n\t}\n\n\tif !bytes.Equal(rdigest, original) {\n\t\terr := fmt.Errorf(\"downloaded imported file md5=%x is not the same as the generated one=%x\", rdigest, original)\n\t\tlog.Warn(err.Error(), \"ruid\", ruid)\n\t\treturn err\n\t}\n\n\tlog.Trace(\"downloaded file matches random file\", \"ruid\", ruid, \"len\", res.ContentLength)\n\n\treturn nil\n}", "func (m *MockClientInterface) UpdateCustomResource(item *unstructured.Unstructured) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateCustomResource\", item)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (f FetchStruct) FetchFromRemote() *model.FetchResult {\n\trepo := f.Repo\n\tremoteURL := f.RemoteURL\n\tremoteBranch := f.RemoteBranch\n\trepoPath := f.RepoPath\n\n\tvar remoteDataObject RemoteDataInterface\n\tremoteDataObject = RemoteDataStruct{\n\t\tRepo: repo,\n\t\tRemoteURL: remoteURL,\n\t}\n\n\tremoteName := remoteDataObject.GetRemoteName()\n\tlogger := global.Logger{}\n\n\ttargetRefPsec := \"refs/heads/\" + remoteBranch + \":refs/remotes/\" + remoteBranch\n\tb := new(bytes.Buffer)\n\tvar fetchErr error\n\tgitSSHAuth, sshErr := ssh.NewSSHAgentAuth(\"git\")\n\tw, _ := repo.Worktree()\n\n\t// Check if repo path is empty and fetch path from worktree\n\tif repoPath == \"\" {\n\t\trepoPath = w.Filesystem.Root()\n\t}\n\n\tif sshErr != nil {\n\t\tlogger.Log(\"Authentication method failed -> \"+sshErr.Error(), global.StatusError)\n\t\tif w == nil {\n\t\t\treturn &model.FetchResult{\n\t\t\t\tStatus: global.FetchFromRemoteError,\n\t\t\t\tFetchedItems: nil,\n\t\t\t}\n\t\t}\n\t\tlogger.Log(\"Retrying fetch with fallback module using git client\", global.StatusWarning)\n\t\treturn f.windowsFetch()\n\t}\n\n\tlogger.Log(fmt.Sprintf(\"Fetching changes from -> %s : %s\", remoteURL, targetRefPsec), global.StatusInfo)\n\n\tif remoteURL != \"\" && remoteBranch != \"\" {\n\t\tif remoteName == \"\" {\n\t\t\treturn &model.FetchResult{\n\t\t\t\tStatus: global.FetchFromRemoteError,\n\t\t\t\tFetchedItems: nil,\n\t\t\t}\n\t\t}\n\n\t\tfetchErr = repo.Fetch(&git.FetchOptions{\n\t\t\tRemoteName: remoteName,\n\t\t\tAuth: gitSSHAuth,\n\t\t\tRefSpecs: []config.RefSpec{config.RefSpec(targetRefPsec)},\n\t\t\tProgress: sideband.Progress(func(f io.Writer) io.Writer {\n\t\t\t\treturn f\n\t\t\t}(b)),\n\t\t})\n\t} else {\n\t\tfetchErr = repo.Fetch(&git.FetchOptions{\n\t\t\tRemoteName: git.DefaultRemoteName,\n\t\t\tAuth: gitSSHAuth,\n\t\t\tProgress: sideband.Progress(func(f io.Writer) io.Writer {\n\t\t\t\treturn f\n\t\t\t}(b)),\n\t\t})\n\t}\n\n\tif fetchErr != nil {\n\t\tif fetchErr.Error() == \"already up-to-date\" {\n\t\t\tlogger.Log(fetchErr.Error(), global.StatusWarning)\n\t\t\treturn &model.FetchResult{\n\t\t\t\tStatus: global.FetchNoNewChanges,\n\t\t\t\tFetchedItems: nil,\n\t\t\t}\n\t\t} else {\n\t\t\tlogger.Log(fetchErr.Error(), global.StatusError)\n\t\t\tlogger.Log(\"Fetch failed. Retrying fetch with git client\", global.StatusWarning)\n\t\t\treturn f.windowsFetch()\n\t\t}\n\n\t} else {\n\t\tlogger.Log(b.String(), global.StatusInfo)\n\t\tlogger.Log(\"Changes fetched from remote\", global.StatusInfo)\n\n\t\tmsg := fmt.Sprintf(\"Changes fetched from remote %v\", remoteName)\n\t\treturn &model.FetchResult{\n\t\t\tStatus: global.FetchFromRemoteSuccess,\n\t\t\tFetchedItems: []*string{&msg},\n\t\t}\n\t}\n\n}", "func updateImageResource(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {\n\t// Warning or errors can be collected in a slice type\n\tvar diags diag.Diagnostics\n\n\tclient := (meta.(Client)).Client\n\tname := rdEntryStr(d, \"name\")\n\tid := rdEntryStr(d, \"id\")\n\terrMsgPrefix := getErrMsgPrefix(\"Image\", name, id, \"Update\")\n\tif client == nil {\n\t\treturn diag.Errorf(\"%s nil Client\", errMsgPrefix)\n\t}\n\tcfg, err := getImage(client, name, id)\n\tif err != nil {\n\t\treturn diag.Errorf(\"%s err: %s\", errMsgPrefix, err.Error())\n\t}\n\tlog.Printf(\"[INFO] Updating Image: %s (ID: %s)\", name, cfg.ID)\n\terr = updateImageCfgFromResourceData(cfg, d)\n\tif err != nil {\n\t\treturn diag.Errorf(\"%s err: %s\", errMsgPrefix, err.Error())\n\t}\n\tclient.XRequestIdPrefix = \"TF-image-update\"\n\turlExtension := getImageUrl(name, id, \"update\")\n\trspData := &swagger_models.ZsrvResponse{}\n\t_, err = client.SendReq(\"PUT\", urlExtension, cfg, rspData)\n\tif err != nil {\n\t\treturn diag.Errorf(\"%s Request Failed. err: %s\", errMsgPrefix, err.Error())\n\t}\n\treturn diags\n}", "func (m *MockStorage) Get(ctx context.Context, id string) (*storage.ImageModel, error) {\n\tret := m.ctrl.Call(m, \"Get\", ctx, id)\n\tret0, _ := ret[0].(*storage.ImageModel)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockClientInterface) UpdateCustomResourceRaw(apiGroup, version, namespace, resourceKind, resourceName string, data []byte) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateCustomResourceRaw\", apiGroup, version, namespace, resourceKind, resourceName, data)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockCompute) ImageIDFromName(arg0 string) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ImageIDFromName\", arg0)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func createImageMirrorForInternalImages(prefix string, ref reference.DockerImageReference, mirrored bool) ([]string, error) {\n\tsource := ref.Exact()\n\n\tinitialDefaults := k8simage.GetOriginalImageConfigs()\n\texceptions := image.Exceptions.List()\n\tdefaults := map[k8simage.ImageID]k8simage.Config{}\n\nimageLoop:\n\tfor i, config := range initialDefaults {\n\t\tfor _, exception := range exceptions {\n\t\t\tif strings.Contains(config.GetE2EImage(), exception) {\n\t\t\t\tcontinue imageLoop\n\t\t\t}\n\t\t}\n\t\tdefaults[i] = config\n\t}\n\n\tupdated := k8simage.GetMappedImageConfigs(defaults, ref.Exact())\n\topenshiftDefaults := image.OriginalImages()\n\topenshiftUpdated := image.GetMappedImages(openshiftDefaults, imagesetup.DefaultTestImageMirrorLocation)\n\n\t// if we've mirrored, then the source is going to be our repo, not upstream's\n\tif mirrored {\n\t\tbaseRef, err := reference.Parse(imagesetup.DefaultTestImageMirrorLocation)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid default mirror location: %v\", err)\n\t\t}\n\n\t\t// calculate the mapping of upstream images by setting defaults to baseRef\n\t\tcovered := sets.NewString()\n\t\tfor i, config := range updated {\n\t\t\tdefaultConfig := defaults[i]\n\t\t\tpullSpec := config.GetE2EImage()\n\t\t\tif pullSpec == defaultConfig.GetE2EImage() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif covered.Has(pullSpec) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcovered.Insert(pullSpec)\n\t\t\te2eRef, err := reference.Parse(pullSpec)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid test image: %s: %v\", pullSpec, err)\n\t\t\t}\n\t\t\tif len(e2eRef.Tag) == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid test image: %s: no tag\", pullSpec)\n\t\t\t}\n\t\t\tconfig.SetRegistry(baseRef.Registry)\n\t\t\tconfig.SetName(baseRef.RepositoryName())\n\t\t\tconfig.SetVersion(e2eRef.Tag)\n\t\t\tdefaults[i] = config\n\t\t}\n\n\t\t// calculate the mapping for openshift images by populating openshiftUpdated\n\t\topenshiftUpdated = make(map[string]string)\n\t\tsourceMappings := image.GetMappedImages(openshiftDefaults, imagesetup.DefaultTestImageMirrorLocation)\n\t\ttargetMappings := image.GetMappedImages(openshiftDefaults, source)\n\n\t\tfor from, to := range targetMappings {\n\t\t\tif from == to {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif covered.Has(to) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcovered.Insert(to)\n\t\t\tfrom := sourceMappings[from]\n\t\t\topenshiftUpdated[from] = to\n\t\t}\n\t}\n\n\tcovered := sets.NewString()\n\tvar lines []string\n\tfor i := range updated {\n\t\ta, b := defaults[i], updated[i]\n\t\tfrom, to := a.GetE2EImage(), b.GetE2EImage()\n\t\tif from == to {\n\t\t\tcontinue\n\t\t}\n\t\tif covered.Has(from) {\n\t\t\tcontinue\n\t\t}\n\t\tcovered.Insert(from)\n\t\tlines = append(lines, fmt.Sprintf(\"%s %s%s\", from, prefix, to))\n\t}\n\n\tfor from, to := range openshiftUpdated {\n\t\tif from == to {\n\t\t\tcontinue\n\t\t}\n\t\tif covered.Has(from) {\n\t\t\tcontinue\n\t\t}\n\t\tcovered.Insert(from)\n\t\tlines = append(lines, fmt.Sprintf(\"%s %s%s\", from, prefix, to))\n\t}\n\n\tsort.Strings(lines)\n\treturn lines, nil\n}", "func notifyNewImage(basePath, imageUrl string) error {\n\thook := os.Getenv(\"WEB_HOOK\")\n\tif hook == \"\" {\n\t\treturn nil\n\t}\n\n\ttype photo struct {\n\t\tUrl string `json:\"remote_url\"`\n\t}\n\n\ttype payload struct {\n\t\tPhoto *photo `json:\"photo\"`\n\t}\n\n\tbody := &payload{&photo{Url: imageUrl}}\n\tb, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfullURL := fmt.Sprintf(\"%s/buckets/%s/photos.json\", hook, basePath)\n\n\tlog.Infof(\"Notifying hook at %s\", fullURL)\n\n\treq, err := http.NewRequest(\"POST\", fullURL, bytes.NewBuffer(b))\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\treturn fmt.Errorf(\"expecting status code in 200 .. 299 got %d\", resp.StatusCode)\n\t}\n\tlog.Infof(\"Notification sent to %s\", hook)\n\treturn nil\n}", "func (c *repoCacheManager) updateImages(ctx context.Context, images []imageToUpdate) (map[string]image.Info, int, int) {\n\t// The upper bound for concurrent fetches against a single host is\n\t// w.Burst, so limit the number of fetching goroutines to that.\n\tfetchers := make(chan struct{}, c.burst)\n\tawaitFetchers := &sync.WaitGroup{}\n\n\tctxc, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tvar successCount int\n\tvar manifestUnknownCount int\n\tvar result = map[string]image.Info{}\n\tvar warnAboutRateLimit sync.Once\nupdates:\n\tfor _, up := range images {\n\t\t// to avoid race condition, when accessing it in the go routine\n\t\tupCopy := up\n\t\tselect {\n\t\tcase <-ctxc.Done():\n\t\t\tbreak updates\n\t\tcase fetchers <- struct{}{}:\n\t\t}\n\t\tawaitFetchers.Add(1)\n\t\tgo func() {\n\t\t\tdefer func() { awaitFetchers.Done(); <-fetchers }()\n\t\t\tctxcc, cancel := context.WithTimeout(ctxc, c.clientTimeout)\n\t\t\tdefer cancel()\n\t\t\tentry, err := c.updateImage(ctxcc, upCopy)\n\t\t\tif err != nil {\n\t\t\t\tif err, ok := errors.Cause(err).(net.Error); (ok && err.Timeout()) || ctxcc.Err() == context.DeadlineExceeded {\n\t\t\t\t\t// This was due to a context timeout, don't bother logging\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tswitch {\n\t\t\t\tcase strings.Contains(err.Error(), \"429\"), strings.Contains(err.Error(), \"toomanyrequests\"):\n\t\t\t\t\t// abort the image tags fetching if we've been rate limited\n\t\t\t\t\twarnAboutRateLimit.Do(func() {\n\t\t\t\t\t\tc.logger.Log(\"warn\", \"aborting image tag fetching due to rate limiting, will try again later\")\n\t\t\t\t\t\tcancel()\n\t\t\t\t\t})\n\t\t\t\tcase strings.Contains(err.Error(), \"manifest unknown\"):\n\t\t\t\t\t// Registry is corrupted, keep going, this manifest may not be relevant for automatic updates\n\t\t\t\t\tc.Lock()\n\t\t\t\t\tmanifestUnknownCount++\n\t\t\t\t\tc.Unlock()\n\t\t\t\t\tc.logger.Log(\"warn\", fmt.Sprintf(\"manifest for tag %s missing in repository %s\", up.ref.Tag, up.ref.Name),\n\t\t\t\t\t\t\"impact\", \"flux will fail to auto-release workloads with matching images, ask the repository administrator to fix the inconsistency\")\n\t\t\t\tdefault:\n\t\t\t\t\tc.logger.Log(\"err\", err, \"ref\", up.ref)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc.Lock()\n\t\t\tsuccessCount++\n\t\t\tif entry.ExcludedReason == \"\" {\n\t\t\t\tresult[upCopy.ref.Tag] = entry.Info\n\t\t\t}\n\t\t\tc.Unlock()\n\t\t}()\n\t}\n\tawaitFetchers.Wait()\n\treturn result, successCount, manifestUnknownCount\n}", "func (m *MockRepository) Update(e *entity.StreetMarket) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Update\", e)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockCustomResourceClient) UpdateCustomResourceRaw(apiGroup, version, namespace, resourceKind, resourceName string, data []byte) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateCustomResourceRaw\", apiGroup, version, namespace, resourceKind, resourceName, data)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockCacheService) FetchFromCache(pagination model.Pagination) (model.Response, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FetchFromCache\", pagination)\n\tret0, _ := ret[0].(model.Response)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func testDownloadImages(ctx context.Context, t *testing.T, downloadCh chan<- downloadRequest, addr, ccvmDir string) {\n\twkld := &workload{\n\t\tspec: workloadSpec{\n\t\t\tBaseImageURL: \"http://\" + addr + \"/download/image\",\n\t\t\tBIOS: \"http://\" + addr + \"/download/bios\",\n\t\t},\n\t}\n\n\tresultCh := make(chan interface{})\n\tgo func() {\n\t\timg, bios, err := downloadImages(ctx, wkld, http.DefaultTransport.(*http.Transport),\n\t\t\tresultCh, downloadCh)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to download images: %v\", err)\n\t\t}\n\t\tif len(img) == 0 || len(bios) == 0 {\n\t\t\tt.Errorf(\"One the paths is empty img=%s bios=%s\", img, bios)\n\t\t}\n\t\tclose(resultCh)\n\t}()\n\n\tfor range resultCh {\n\t}\n\n\twkld.spec.BIOS = \"ftp://\" + addr + \"/download/bios\"\n\tresultCh = make(chan interface{})\n\tgo func() {\n\t\t_, _, err := downloadImages(ctx, wkld, http.DefaultTransport.(*http.Transport),\n\t\t\tresultCh, downloadCh)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected downloadImages with bad BIOS URL to fail\")\n\t\t}\n\t\tclose(resultCh)\n\t}()\n\n\tfor range resultCh {\n\t}\n}", "func (m *MockFoldersRepo) Update(arg0 domain.Folder) (domain.Folder, error) {\n\tret := m.ctrl.Call(m, \"Update\", arg0)\n\tret0, _ := ret[0].(domain.Folder)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func testUpdateOperationHandlerGet(t *testing.T) {\n\tt.Parallel()\n\n\tid := uuid.New()\n\tidStr := \"\\\"\" + id.String() + \"\\\"\"\n\tvar called bool\n\tvar latestCalled bool\n\th := UpdateOperationHandler(&matcher.Mock{\n\t\tLatestUpdateOperation_: func(context.Context, driver.UpdateKind) (uuid.UUID, error) {\n\t\t\treturn id, nil\n\t\t},\n\t\tLatestUpdateOperations_: func(context.Context, driver.UpdateKind) (map[string][]driver.UpdateOperation, error) {\n\t\t\tlatestCalled = true\n\t\t\treturn nil, nil\n\t\t},\n\t\tUpdateOperations_: func(context.Context, driver.UpdateKind, ...string) (map[string][]driver.UpdateOperation, error) {\n\t\t\tcalled = true\n\t\t\treturn nil, nil\n\t\t},\n\t})\n\tsrv := httptest.NewServer(h)\n\tdefer srv.Close()\n\tc := srv.Client()\n\n\t// get without latest param\n\treq, err := http.NewRequest(http.MethodGet, srv.URL, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create request: %v\", err)\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to make request: %v\", err)\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Fatalf(\"got: %v, want: %v\", resp.StatusCode, http.StatusOK)\n\t}\n\tif !called {\n\t\tt.Fatalf(\"got: %v, want: %v\", called, true)\n\t}\n\tetag := resp.Header.Get(\"etag\")\n\tif etag != idStr {\n\t\tt.Fatalf(\"got: %v, want: %v\", etag, id.String())\n\t}\n\n\t// get with latest param\n\tu, _ := url.Parse(srv.URL)\n\tq := u.Query()\n\tq.Add(\"latest\", \"true\")\n\tu.RawQuery = q.Encode()\n\treq = &http.Request{\n\t\tURL: u,\n\t\tMethod: http.MethodGet,\n\t}\n\tresp, err = c.Do(req)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to make request: %v\", err)\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tt.Fatalf(\"got: %v, want: %v\", resp.StatusCode, http.StatusOK)\n\t}\n\tif !latestCalled {\n\t\tt.Fatalf(\"got: %v, want: %v\", latestCalled, true)\n\t}\n\tetag = resp.Header.Get(\"etag\")\n\tif etag != idStr {\n\t\tt.Fatalf(\"got: %v, want: %v\", etag, id.String())\n\t}\n}", "func (m *MockImageTransferer) Download(arg0 string, arg1 core.Digest) (base.FileReader, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Download\", arg0, arg1)\n\tret0, _ := ret[0].(base.FileReader)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func dirImageMockWithRef(t *testing.T, dir string, ref types.ImageReference) private.UnparsedImage {\n\tsrcRef, err := directory.NewReference(dir)\n\trequire.NoError(t, err)\n\tsrc, err := srcRef.NewImageSource(context.Background(), nil)\n\trequire.NoError(t, err)\n\tt.Cleanup(func() {\n\t\terr := src.Close()\n\t\trequire.NoError(t, err)\n\t})\n\treturn image.UnparsedInstance(&dirImageSourceMock{\n\t\tImageSource: imagesource.FromPublic(src),\n\t\tref: ref,\n\t}, nil)\n}", "func MockRoundUpdate(round uint64, p *user.Provisioners) RoundUpdate {\n\tprovisioners := p\n\tif p == nil {\n\t\tprovisioners, _ = MockProvisioners(1)\n\t}\n\n\tseed, _ := crypto.RandEntropy(33)\n\thash, _ := crypto.RandEntropy(32)\n\n\treturn RoundUpdate{\n\t\tRound: round,\n\t\tP: *provisioners,\n\t\tSeed: seed,\n\t\tHash: hash,\n\t\tLastCertificate: block.EmptyCertificate(),\n\t}\n}", "func (m *MockFamilyRepo) Update(arg0 models.Family) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Update\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockRepositoryClient) UpdateRepoLabel(org, repo, label, newName, description, color string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateRepoLabel\", org, repo, label, newName, description, color)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (mr *MockFetcherMockRecorder) FetchLocalImage(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchLocalImage\", reflect.TypeOf((*MockFetcher)(nil).FetchLocalImage), arg0)\n}", "func ServerTestBattery(t *testing.T, wrap func(mock api.Server) api.Server) {\n\t// set up\n\tnamespace := \"the-space-of-names\"\n\tserviceID := resource.MustParseID(namespace + \"/service\")\n\tserviceList := []resource.ID{serviceID}\n\tservices := resource.IDSet{}\n\tservices.Add(serviceList)\n\n\tnow := time.Now().UTC()\n\n\timageID, _ := image.ParseRef(\"quay.io/example.com/frob:v0.4.5\")\n\tserviceAnswer := []v6.ControllerStatus{\n\t\tv6.ControllerStatus{\n\t\t\tID: resource.MustParseID(\"foobar/hello\"),\n\t\t\tStatus: \"ok\",\n\t\t\tContainers: []v6.Container{\n\t\t\t\tv6.Container{\n\t\t\t\t\tName: \"frobnicator\",\n\t\t\t\t\tCurrent: image.Info{\n\t\t\t\t\t\tID: imageID,\n\t\t\t\t\t\tCreatedAt: now,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tv6.ControllerStatus{},\n\t}\n\n\timagesAnswer := []v6.ImageStatus{\n\t\tv6.ImageStatus{\n\t\t\tID: resource.MustParseID(\"barfoo/yello\"),\n\t\t\tContainers: []v6.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"flubnicator\",\n\t\t\t\t\tCurrent: image.Info{\n\t\t\t\t\t\tID: imageID,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tsyncStatusAnswer := []string{\n\t\t\"commit 1\",\n\t\t\"commit 2\",\n\t\t\"commit 3\",\n\t}\n\n\tupdateSpec := update.Spec{\n\t\tType: update.Images,\n\t\tSpec: update.ReleaseImageSpec{\n\t\t\tServiceSpecs: []update.ResourceSpec{\n\t\t\t\tupdate.ResourceSpecAll,\n\t\t\t},\n\t\t\tImageSpec: update.ImageSpecLatest,\n\t\t},\n\t}\n\tcheckUpdateSpec := func(s update.Spec) error {\n\t\tif !reflect.DeepEqual(updateSpec, s) {\n\t\t\treturn errors.New(\"expected != actual\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tmock := &MockServer{\n\t\tListServicesAnswer: serviceAnswer,\n\t\tListImagesAnswer: imagesAnswer,\n\t\tUpdateManifestsArgTest: checkUpdateSpec,\n\t\tUpdateManifestsAnswer: job.ID(guid.New()),\n\t\tSyncStatusAnswer: syncStatusAnswer,\n\t}\n\n\tctx := context.Background()\n\n\t// OK, here we go\n\tclient := wrap(mock)\n\n\tif err := client.Ping(ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tss, err := client.ListServices(ctx, namespace)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif !reflect.DeepEqual(ss, mock.ListServicesAnswer) {\n\t\tt.Error(fmt.Errorf(\"expected:\\n%#v\\ngot:\\n%#v\", mock.ListServicesAnswer, ss))\n\t}\n\tmock.ListServicesError = fmt.Errorf(\"list services query failure\")\n\tss, err = client.ListServices(ctx, namespace)\n\tif err == nil {\n\t\tt.Error(\"expected error from ListServices, got nil\")\n\t}\n\n\tims, err := client.ListImagesWithOptions(ctx, v10.ListImagesOptions{\n\t\tSpec: update.ResourceSpecAll,\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif !reflect.DeepEqual(ims, mock.ListImagesAnswer) {\n\t\tt.Error(fmt.Errorf(\"expected:\\n%#v\\ngot:\\n%#v\", mock.ListImagesAnswer, ims))\n\t}\n\tmock.ListImagesError = fmt.Errorf(\"list images error\")\n\tif _, err = client.ListImagesWithOptions(ctx, v10.ListImagesOptions{\n\t\tSpec: update.ResourceSpecAll,\n\t}); err == nil {\n\t\tt.Error(\"expected error from ListImages, got nil\")\n\t}\n\n\tjobid, err := mock.UpdateManifests(ctx, updateSpec)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif jobid != mock.UpdateManifestsAnswer {\n\t\tt.Error(fmt.Errorf(\"expected %q, got %q\", mock.UpdateManifestsAnswer, jobid))\n\t}\n\tmock.UpdateManifestsError = fmt.Errorf(\"update manifests error\")\n\tif _, err = client.UpdateManifests(ctx, updateSpec); err == nil {\n\t\tt.Error(\"expected error from UpdateManifests, got nil\")\n\t}\n\n\tchange := v9.Change{Kind: v9.GitChange, Source: v9.GitUpdate{URL: \"[email protected]:foo/bar\"}}\n\tif err := client.NotifyChange(ctx, change); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tsyncSt, err := client.SyncStatus(ctx, \"HEAD\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif !reflect.DeepEqual(mock.SyncStatusAnswer, syncSt) {\n\t\tt.Errorf(\"expected: %#v\\ngot: %#v\", mock.SyncStatusAnswer, syncSt)\n\t}\n}", "func (f *SourceFetcher) Fetch(url string, namespace string) (*info.ImageProperties, bool, error) {\n\tc := make(chan FetchResult)\n\tgo f.uniqueFetchSource(c, url, namespace)\n\tr := <-c\n\treturn r.ImageDetails, r.Downloaded, r.Error\n}", "func TestGetStatusByHostnameAtTimestamp4(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"} // nolint\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-12T08:29:35+00:00\") // August 12\n\tprivateIPs2 := []string{\"4.3.2.1\"}\n\tpublicIPs2 := []string{\"8.7.6.5\"}\n\tfakeCloudAssetChange2 := newFakeCloudAssetChange(privateIPs2, publicIPs2, hostnames, timestamp2, `arn2`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange2); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\thostname := \"yahoo.com\" // nolint\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-11T08:29:35+00:00\") // query is for status on August 11\n\tnetworkChangeEvents, err := dbStorage.FetchByHostname(ctx, at, hostname)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func (f *fetcher) fetchImage(img string, asc string, discover bool) (string, error) {\n\tif f.withDeps && !discover {\n\t\treturn \"\", fmt.Errorf(\"cannot fetch image's dependencies with discovery disabled\")\n\t}\n\thash, err := f.fetchSingleImage(img, asc, discover)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif f.withDeps {\n\t\terr = f.fetchImageDeps(hash)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn hash, nil\n}", "func TestUpdate(t *testing.T) {\n\n\t// Test Data\n\tk8sNamespace1 := \"TestK8SNamespace1\"\n\tk8sNamespace2 := \"TestK8SNamespace2\"\n\n\tkafkaSecretName1 := \"TestKafkaSecretName1\"\n\tkafkaSecretName2 := \"TestKafkaSecretName2\"\n\tkafkaSecretName3 := \"TestKafkaSecretName3\"\n\n\tkafkaSecretBrokers1 := \"TestKafkaSecretBrokers1\"\n\tkafkaSecretBrokers2 := \"TestKafkaSecretBrokers2\"\n\tkafkaSecretBrokers3 := \"TestKafkaSecretBrokers3\"\n\n\tkafkaSecretUsername1 := \"TestKafkaSecretUsername1\"\n\tkafkaSecretUsername2 := \"TestKafkaSecretUsername2\"\n\tkafkaSecretUsername3 := \"TestKafkaSecretUsername3\"\n\n\tkafkaSecretPassword1 := \"TestKafkaSecretPassword1\"\n\tkafkaSecretPassword2 := \"TestKafkaSecretPassword2\"\n\tkafkaSecretPassword3 := \"TestKafkaSecretPassword3\"\n\n\tkafkaSecretNamespace1 := \"TestKafkaSecretNamespace1\"\n\tkafkaSecretNamespace2 := \"TestKafkaSecretNamespace2\"\n\tkafkaSecretNamespace3 := \"TestKafkaSecretNamespace3\"\n\n\tkafkaSecret1 := createKafkaSecret(kafkaSecretName1, k8sNamespace1, kafkaSecretBrokers1, kafkaSecretUsername1, kafkaSecretPassword1, kafkaSecretNamespace1)\n\tkafkaSecret2 := createKafkaSecret(kafkaSecretName2, k8sNamespace1, kafkaSecretBrokers2, kafkaSecretUsername2, kafkaSecretPassword2, kafkaSecretNamespace2)\n\tkafkaSecret3 := createKafkaSecret(kafkaSecretName3, k8sNamespace2, kafkaSecretBrokers3, kafkaSecretUsername3, kafkaSecretPassword3, kafkaSecretNamespace3)\n\n\tHubEntityName1 := \"TestHubEntityName1\"\n\tHubEntityName2 := \"TestHubEntityName2\"\n\tHubEntityName3 := \"TestHubEntityName3\"\n\n\thubEntity1 := createEventHubEntity(HubEntityName1)\n\thubEntity2 := createEventHubEntity(HubEntityName2)\n\thubEntity3 := createEventHubEntity(HubEntityName3)\n\n\t// Create A Test Logger\n\tlogger := logtesting.TestLogger(t).Desugar()\n\n\t// Create A Cache To Test\n\tcache := &Cache{\n\t\tlogger: logger,\n\t\tk8sClient: fake.NewSimpleClientset(kafkaSecret1, kafkaSecret2, kafkaSecret3),\n\t\tk8sNamespace: k8sNamespace1,\n\t\tnamespaceMap: make(map[string]*Namespace),\n\t\teventhubMap: make(map[string]*Namespace),\n\t}\n\n\t// Create Some Mock HubManagers To Return EventHubs For Azure Namespace List Queries\n\tmockHubManager1 := &MockHubManager{ListHubEntities: []*eventhub.HubEntity{hubEntity1, hubEntity2}}\n\tmockHubManager2 := &MockHubManager{ListHubEntities: []*eventhub.HubEntity{hubEntity3}}\n\n\t// Replace The NewHubManagerFromConnectionString Wrapper To Provide Mock Implementation & Defer Reset\n\tnewHubManagerFromConnectionStringWrapperPlaceholder := NewHubManagerFromConnectionStringWrapper\n\tNewHubManagerFromConnectionStringWrapper = func(connectionString string) (managerInterface HubManagerInterface, e error) {\n\t\tif strings.Contains(connectionString, kafkaSecretPassword1) {\n\t\t\treturn mockHubManager1, nil\n\t\t} else if strings.Contains(connectionString, kafkaSecretPassword2) {\n\t\t\treturn mockHubManager2, nil\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"unexpected test connectionString '%s'\", connectionString)\n\t\t}\n\t}\n\tdefer func() { NewHubManagerFromConnectionStringWrapper = newHubManagerFromConnectionStringWrapperPlaceholder }()\n\n\t// Perform The Test\n\terr := cache.Update(context.TODO())\n\n\t// Verify Results\n\tassert.Nil(t, err)\n\tassert.Len(t, cache.eventhubMap, 3) // The Number Of HubEntities Returned From List\n\tcacheEventHubNamespace1 := cache.eventhubMap[hubEntity1.Name]\n\tcacheEventHubNamespace2 := cache.eventhubMap[hubEntity2.Name]\n\tcacheEventHubNamespace3 := cache.eventhubMap[hubEntity3.Name]\n\tassert.NotNil(t, cacheEventHubNamespace1)\n\tassert.NotNil(t, cacheEventHubNamespace2)\n\tassert.NotNil(t, cacheEventHubNamespace3)\n\tassert.Equal(t, kafkaSecretNamespace1, cacheEventHubNamespace1.Name)\n\tassert.Equal(t, kafkaSecretNamespace1, cacheEventHubNamespace2.Name)\n\tassert.Equal(t, kafkaSecretNamespace2, cacheEventHubNamespace3.Name)\n}", "func (m *MockRepository) FetchAll(bucketName string, modelFn func([]byte) (db.Model, error)) (interface{}, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FetchAll\", bucketName, modelFn)\n\tret0, _ := ret[0].(interface{})\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestGetStatusByHostnameAtTimestamp3(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"} // nolint\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-11T08:29:35+00:00\") // August 11\n\tprivateIPs2 := []string{\"4.3.2.1\"}\n\tpublicIPs2 := []string{\"8.7.6.5\"}\n\tfakeCloudAssetChange2 := newFakeCloudAssetChange(privateIPs2, publicIPs2, hostnames, timestamp2, `arn2`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange2); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\thostname := \"yahoo.com\" // nolint\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-12T08:29:35+00:00\") // query is for status on August 12\n\tnetworkChangeEvents, err := dbStorage.FetchByHostname(ctx, at, hostname)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 2, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t\tdomain.CloudAssetDetails{nil, []string{\"8.7.6.5\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn2\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func (m *MockService) Update(arg0 interface{}) error {\n\tret := m.ctrl.Call(m, \"Update\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockAPI) RefreshStatus(arg0 context.Context, arg1 *models.Host, arg2 *gorm.DB) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RefreshStatus\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}" ]
[ "0.7315873", "0.6466252", "0.621011", "0.58941764", "0.5832389", "0.5714767", "0.57144743", "0.5696898", "0.56600785", "0.5652856", "0.5563077", "0.5445605", "0.54443705", "0.5374082", "0.53736216", "0.5362618", "0.5343925", "0.533864", "0.532144", "0.53188276", "0.5303848", "0.5292312", "0.5290114", "0.5281899", "0.52689815", "0.5256937", "0.5243443", "0.51999325", "0.51975834", "0.5187384", "0.51852524", "0.5174279", "0.5172064", "0.5162931", "0.51595217", "0.51483405", "0.5134183", "0.5121002", "0.5106018", "0.5079694", "0.5070232", "0.50697476", "0.506625", "0.50638455", "0.50460374", "0.50445884", "0.50182384", "0.5012236", "0.50102895", "0.5002038", "0.49830067", "0.49820563", "0.49805045", "0.4973599", "0.49644107", "0.4958075", "0.49558002", "0.4953657", "0.49455148", "0.4937509", "0.49309063", "0.492474", "0.4921105", "0.49199718", "0.4910066", "0.49026412", "0.490193", "0.4897672", "0.4892741", "0.48926032", "0.48921138", "0.4889113", "0.48882192", "0.48837382", "0.48828596", "0.48774096", "0.48751155", "0.48744744", "0.4873111", "0.48719501", "0.48701808", "0.48655865", "0.48641217", "0.48626474", "0.4857851", "0.48533094", "0.48415402", "0.4840228", "0.4840204", "0.48379937", "0.48353893", "0.48313665", "0.48291758", "0.4828443", "0.48241717", "0.48183075", "0.4814759", "0.48138478", "0.480723", "0.48052147" ]
0.805092
0
FetchUpdatedLocalImage indicates an expected call of FetchUpdatedLocalImage
func (mr *MockFetcherMockRecorder) FetchUpdatedLocalImage(arg0, arg1, arg2 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchUpdatedLocalImage", reflect.TypeOf((*MockFetcher)(nil).FetchUpdatedLocalImage), arg0, arg1, arg2) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MockFetcher) FetchUpdatedLocalImage(arg0 context.Context, arg1 string, arg2 io.Writer) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchUpdatedLocalImage\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockFetcher) FetchLocalImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchLocalImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockFetcherMockRecorder) FetchLocalImage(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchLocalImage\", reflect.TypeOf((*MockFetcher)(nil).FetchLocalImage), arg0)\n}", "func TestFetch(t *testing.T) {\n\timage := \"rkt-inspect-fetch.aci\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// Fetch the image for the first time, this should write the image to the\n\t// on-disk store.\n\toldHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file\"}, t, ctx)\n\n\t// Fetch the image with the same name but different content, the expecting\n\t// result is that we should get a different hash since we are not fetching\n\t// from the on-disk store.\n\tnewHash := patchImportAndFetchHash(image, []string{\"--exec=/inspect --read-file --write-file\"}, t, ctx)\n\n\tif oldHash == newHash {\n\t\tt.Fatalf(\"ACI hash should be different as the image has changed\")\n\t}\n}", "func fetchLocal(f *File) error {\n\n\terr := validateLocal(f)\n\tif err != nil {\n\t\tf.Status.Type = status.ERROR\n\t\treturn err\n\t}\n\tf.path = f.Url\n\tf.Status.Type = status.FETCHED\n\treturn nil\n\n}", "func TestCannotExecuteStatusImage(t *testing.T) {\n\tbuf := setLogBuffer()\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tt.Log(buf.String())\n\t\t}\n\t}()\n\n\tif StatusImage == \"\" {\n\t\tt.Skip(\"no status image defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tapp := &models.App{Name: id.New().String()}\n\tapp = ensureApp(t, app)\n\n\tfn := &models.Fn{\n\t\tAppID: app.ID,\n\t\tName: id.New().String(),\n\t\tImage: StatusImage,\n\t\tResourceConfig: models.ResourceConfig{\n\t\t\tMemory: memory,\n\t\t},\n\t}\n\tfn = ensureFn(t, fn)\n\n\tlb, err := LB()\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: lb,\n\t}\n\tu.Path = path.Join(u.Path, \"invoke\", fn.ID)\n\n\tcontent := bytes.NewBuffer([]byte(`status`))\n\toutput := &bytes.Buffer{}\n\n\tresp, err := callFN(ctx, u.String(), content, output, models.TypeSync)\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error: %v\", err)\n\t}\n\n\tif resp.StatusCode != http.StatusBadRequest {\n\t\tt.Fatalf(\"StatusCode check failed on %v\", resp.StatusCode)\n\t}\n}", "func TestIsImageInLocalRegistry(t *testing.T) {\n\ttype testDef struct {\n\t\timageName string\n\t\tdocker test.FakeDockerClient\n\t\texpectedResult bool\n\t\texpectedError string\n\t}\n\ttests := map[string]testDef{\n\t\t\"ImageFound\": {\"a_test_image\", test.FakeDockerClient{}, true, \"\"},\n\t\t\"ImageNotFound\": {\"a_test_image:sometag\", test.FakeDockerClient{}, false, \"unable to get metadata for a_test_image:sometag\"},\n\t}\n\n\tfor test, def := range tests {\n\t\tdh := getDocker(&def.docker)\n\t\tfake := dh.kubeDockerClient.(*dockertools.FakeDockerClient)\n\t\tif def.expectedResult {\n\t\t\tfake.Image = &dockertypes.ImageInspect{ID: def.imageName}\n\t\t}\n\n\t\tresult, err := dh.IsImageInLocalRegistry(def.imageName)\n\n\t\tif e := fake.AssertCalls([]string{\"inspect_image\"}); e != nil {\n\t\t\tt.Errorf(\"%+v\", e)\n\t\t}\n\n\t\tif result != def.expectedResult {\n\t\t\tt.Errorf(\"Test - %s: Expected result: %v. Got: %v\", test, def.expectedResult, result)\n\t\t}\n\t\tif err != nil && len(def.expectedError) > 0 && !strings.Contains(err.Error(), def.expectedError) {\n\t\t\tt.Errorf(\"Test - %s: Expected error: Got: %+v\", test, err)\n\t\t}\n\t}\n}", "func (o *ImageImportManifest) GetLocalImageIdOk() (*string, bool) {\n\tif o == nil || o.LocalImageId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.LocalImageId, true\n}", "func (m *MockFetcher) FetchRemoteImage(arg0 string) (image.Image, error) {\n\tret := m.ctrl.Call(m, \"FetchRemoteImage\", arg0)\n\tret0, _ := ret[0].(image.Image)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestImplicitFetch(t *testing.T) {\n\tfoundMsg := \"found image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\t// 1. Fetch the image.\n\t// TODO(yifan): Add other ACI with different schemes.\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:ubuntu-12.04\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 2. Try run/prepare with/without tag ':latest', should not get $foundMsg.\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err == nil {\n\t\t\tt.Fatalf(\"%q should not be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func (f OriginalFetcher) Fetch(namespace string, sourceURL string, imageHash string) (info *info.ImageProperties, downloaded bool, err error) {\n\tif sourceURL == \"\" && imageHash == \"\" {\n\t\treturn nil, false, fmt.Errorf(\"Missing Hash & URL\")\n\t}\n\n\tif imageHash != \"\" {\n\t\tinfo, err = f.fetchFromStore(namespace, imageHash)\n\t}\n\n\tif sourceURL != \"\" && (err != nil || imageHash == \"\") {\n\t\tinfo, downloaded, err = f.fetchFromSource(namespace, sourceURL)\n\t}\n\n\treturn info, downloaded, err\n}", "func ttrPushImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\trc, err := client.ImagePush(ctx, ttrImageName(image), types.ImagePushOptions{RegistryAuth: \"{}\"})\n\tassert.Assert(t, err)\n\tif rc != nil {\n\t\tbody, err := readAllAndClose(rc)\n\t\tassert.Assert(t, err)\n\t\tassert.Assert(t, strings.Contains(body, `\"status\":\"latest: digest: `))\n\t}\n}", "func (mr *MockFetcherMockRecorder) FetchRemoteImage(arg0 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchRemoteImage\", reflect.TypeOf((*MockFetcher)(nil).FetchRemoteImage), arg0)\n}", "func ttrPullImageAsserting(ctx context.Context, t *testing.T, client apiclient.APIClient, image string) {\n\terr := ttrPullImage(ctx, client, image)\n\tassert.NilError(t, err)\n}", "func retagLocalImageForRemotePush(localTag string, remoteUrl string) string {\n\tnewTag := fmt.Sprintf(\"%s/%s\", remoteUrl, localTag)\n\tdockerTag(localTag, newTag)\n\treturn newTag\n}", "func (f *fetcher) fetchImageFrom(appName string, aciURL, ascURL, scheme string, ascFile *os.File, latest bool) (string, error) {\n\tvar rem *store.Remote\n\n\tif f.insecureSkipVerify {\n\t\tif f.ks != nil {\n\t\t\tstderr(\"rkt: warning: TLS verification and signature verification has been disabled\")\n\t\t}\n\t} else if scheme == \"docker\" {\n\t\treturn \"\", fmt.Errorf(\"signature verification for docker images is not supported (try --insecure-skip-verify)\")\n\t}\n\n\tif (f.local && scheme != \"file\") || (scheme != \"file\" && !latest) {\n\t\tvar err error\n\t\tok := false\n\t\trem, ok, err = f.s.GetRemote(aciURL)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif ok {\n\t\t\tif f.local {\n\t\t\t\tstderr(\"rkt: using image in local store for app %s\", appName)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t\tif useCached(rem.DownloadTime, rem.CacheMaxAge) {\n\t\t\t\tstderr(\"rkt: found image in local store, skipping fetching from %s\", aciURL)\n\t\t\t\treturn rem.BlobKey, nil\n\t\t\t}\n\t\t}\n\t\tif f.local {\n\t\t\treturn \"\", fmt.Errorf(\"url %s not available in local store\", aciURL)\n\t\t}\n\t}\n\n\tif scheme != \"file\" && f.debug {\n\t\tstderr(\"rkt: fetching image from %s\", aciURL)\n\t}\n\n\tvar etag string\n\tif rem != nil {\n\t\tetag = rem.ETag\n\t}\n\tentity, aciFile, cd, err := f.fetch(appName, aciURL, ascURL, ascFile, etag)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif cd != nil && cd.useCached {\n\t\tif rem != nil {\n\t\t\treturn rem.BlobKey, nil\n\t\t} else {\n\t\t\t// should never happen\n\t\t\tpanic(\"asked to use cached image but remote is nil\")\n\t\t}\n\t}\n\tif scheme != \"file\" {\n\t\tdefer os.Remove(aciFile.Name())\n\t}\n\n\tif entity != nil && !f.insecureSkipVerify {\n\t\tstderr(\"rkt: signature verified:\")\n\t\tfor _, v := range entity.Identities {\n\t\t\tstderr(\" %s\", v.Name)\n\t\t}\n\t}\n\tkey, err := f.s.WriteACI(aciFile, latest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif scheme != \"file\" {\n\t\trem := store.NewRemote(aciURL, ascURL)\n\t\trem.BlobKey = key\n\t\trem.DownloadTime = time.Now()\n\t\tif cd != nil {\n\t\t\trem.ETag = cd.etag\n\t\t\trem.CacheMaxAge = cd.maxAge\n\t\t}\n\t\terr = f.s.WriteRemote(rem)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn key, nil\n}", "func updateImageResource(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {\n\t// Warning or errors can be collected in a slice type\n\tvar diags diag.Diagnostics\n\n\tclient := (meta.(Client)).Client\n\tname := rdEntryStr(d, \"name\")\n\tid := rdEntryStr(d, \"id\")\n\terrMsgPrefix := getErrMsgPrefix(\"Image\", name, id, \"Update\")\n\tif client == nil {\n\t\treturn diag.Errorf(\"%s nil Client\", errMsgPrefix)\n\t}\n\tcfg, err := getImage(client, name, id)\n\tif err != nil {\n\t\treturn diag.Errorf(\"%s err: %s\", errMsgPrefix, err.Error())\n\t}\n\tlog.Printf(\"[INFO] Updating Image: %s (ID: %s)\", name, cfg.ID)\n\terr = updateImageCfgFromResourceData(cfg, d)\n\tif err != nil {\n\t\treturn diag.Errorf(\"%s err: %s\", errMsgPrefix, err.Error())\n\t}\n\tclient.XRequestIdPrefix = \"TF-image-update\"\n\turlExtension := getImageUrl(name, id, \"update\")\n\trspData := &swagger_models.ZsrvResponse{}\n\t_, err = client.SendReq(\"PUT\", urlExtension, cfg, rspData)\n\tif err != nil {\n\t\treturn diag.Errorf(\"%s Request Failed. err: %s\", errMsgPrefix, err.Error())\n\t}\n\treturn diags\n}", "func (c *repoCacheManager) fetchImages(tags []string) (fetchImagesResult, error) {\n\timages := map[string]image.Info{}\n\n\t// Create a list of images that need updating\n\tvar toUpdate []imageToUpdate\n\n\t// Counters for reporting what happened\n\tvar missing, refresh int\n\tfor _, tag := range tags {\n\t\tif tag == \"\" {\n\t\t\treturn fetchImagesResult{}, fmt.Errorf(\"empty tag in fetched tags\")\n\t\t}\n\n\t\t// See if we have the manifest already cached\n\t\tnewID := c.repoID.ToRef(tag)\n\t\tkey := NewManifestKey(newID.CanonicalRef())\n\t\tbytes, deadline, err := c.cacheClient.GetKey(key)\n\t\t// If err, then we don't have it yet. Update.\n\t\tswitch {\n\t\tcase err != nil: // by and large these are cache misses, but any error shall count as \"not found\"\n\t\t\tif err != ErrNotCached {\n\t\t\t\tc.logger.Log(\"warning\", \"error from cache\", \"err\", err, \"ref\", newID)\n\t\t\t}\n\t\t\tmissing++\n\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})\n\t\tcase len(bytes) == 0:\n\t\t\tc.logger.Log(\"warning\", \"empty result from cache\", \"ref\", newID)\n\t\t\tmissing++\n\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})\n\t\tdefault:\n\t\t\tvar entry registry.ImageEntry\n\t\t\tif err := json.Unmarshal(bytes, &entry); err == nil {\n\t\t\t\tif c.trace {\n\t\t\t\t\tc.logger.Log(\"trace\", \"found cached manifest\", \"ref\", newID, \"last_fetched\", entry.LastFetched.Format(time.RFC3339), \"deadline\", deadline.Format(time.RFC3339))\n\t\t\t\t}\n\n\t\t\t\tif entry.ExcludedReason == \"\" {\n\t\t\t\t\timages[tag] = entry.Info\n\t\t\t\t\tif c.now.After(deadline) {\n\t\t\t\t\t\tpreviousRefresh := minRefresh\n\t\t\t\t\t\tlastFetched := entry.Info.LastFetched\n\t\t\t\t\t\tif !lastFetched.IsZero() {\n\t\t\t\t\t\t\tpreviousRefresh = deadline.Sub(lastFetched)\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: previousRefresh, previousDigest: entry.Info.Digest})\n\t\t\t\t\t\trefresh++\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif c.trace {\n\t\t\t\t\t\tc.logger.Log(\"trace\", \"excluded in cache\", \"ref\", newID, \"reason\", entry.ExcludedReason)\n\t\t\t\t\t}\n\t\t\t\t\tif c.now.After(deadline) {\n\t\t\t\t\t\ttoUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: excludedRefresh})\n\t\t\t\t\t\trefresh++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tresult := fetchImagesResult{\n\t\timagesFound: images,\n\t\timagesToUpdate: toUpdate,\n\t\timagesToUpdateRefreshCount: refresh,\n\t\timagesToUpdateMissingCount: missing,\n\t}\n\n\treturn result, nil\n}", "func (suite *APIImageInspectSuite) TestImageInspectOk(c *check.C) {\n\tvar (\n\t\trepo = environment.BusyboxRepo\n\t\ttag = \"1.24\"\n\n\t\tid = \"sha256:ca3d7d608b8a8bbaaac2c350bd0f9588cce0509ada74108d5c4b2afb24c46125\"\n\t\tdig = \"sha256:840f2b98a2540ff1d265782c42543dbec7218d3ab0e73b296d7dac846f146e27\"\n\t)\n\n\trepoTag := fmt.Sprintf(\"%s:%s\", repo, tag)\n\trepoDigest := fmt.Sprintf(\"%s@%s\", repo, dig)\n\n\tfor _, image := range []string{\n\t\tid,\n\t\trepoTag,\n\t\trepoDigest,\n\t\tfmt.Sprintf(\"%s:whatever@%s\", repo, dig),\n\t} {\n\t\tresp, err := request.Get(\"/images/\" + image + \"/json\")\n\t\tc.Assert(err, check.IsNil)\n\t\tCheckRespStatus(c, resp, 200)\n\n\t\tgot := types.ImageInfo{}\n\t\terr = request.DecodeBody(&got, resp.Body)\n\t\tc.Assert(err, check.IsNil)\n\n\t\t// TODO: More specific check is needed\n\t\tc.Assert(got.Config, check.NotNil)\n\t\tc.Assert(got.ID, check.Equals, id)\n\t\tc.Assert(got.CreatedAt, check.NotNil)\n\t\tc.Assert(got.Size, check.NotNil)\n\t\tc.Assert(reflect.DeepEqual(got.RepoTags, []string{repoTag}), check.Equals, true)\n\t\tc.Assert(reflect.DeepEqual(got.RepoDigests, []string{repoDigest}), check.Equals, true)\n\t}\n}", "func (m *MockAPI) UpdateImageStatus(arg0 context.Context, arg1 *models.Host, arg2 *models.ContainerImageAvailability, arg3 *gorm.DB) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateImageStatus\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func fetchUpdatableImageRepos(registry registry.Registry, updateable []*WorkloadUpdate, logger log.Logger) (ImageRepos, error) {\n\treturn FetchImageRepos(registry, workloadContainers(updateable), logger)\n}", "func pullMissingImage(ctx context.Context, apiClient client.CommonAPIClient, image string, force bool) error {\n\tif !force {\n\t\t_, inspectError := apiClient.ImageInspect(ctx, image)\n\t\tif inspectError == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif err, ok := inspectError.(client.RespError); !ok {\n\t\t\treturn inspectError\n\t\t} else if err.Code() != http.StatusNotFound {\n\t\t\treturn inspectError\n\t\t}\n\t}\n\n\tnamedRef, err := reference.Parse(image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnamedRef = reference.TrimTagForDigest(reference.WithDefaultTagIfMissing(namedRef))\n\n\tvar name, tag string\n\tif reference.IsNameTagged(namedRef) {\n\t\tname, tag = namedRef.Name(), namedRef.(reference.Tagged).Tag()\n\t} else {\n\t\tname = namedRef.String()\n\t}\n\n\tresponseBody, err := apiClient.ImagePull(ctx, name, tag, fetchRegistryAuth(namedRef.Name()))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to pull image: %v\", err)\n\t}\n\tdefer responseBody.Close()\n\n\treturn showProgress(responseBody)\n}", "func (s *FileStore) ImageStatus(name string) (*Image, error) {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn s.imageStatusUnlocked(name)\n}", "func (c *repoCacheManager) updateImages(ctx context.Context, images []imageToUpdate) (map[string]image.Info, int, int) {\n\t// The upper bound for concurrent fetches against a single host is\n\t// w.Burst, so limit the number of fetching goroutines to that.\n\tfetchers := make(chan struct{}, c.burst)\n\tawaitFetchers := &sync.WaitGroup{}\n\n\tctxc, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tvar successCount int\n\tvar manifestUnknownCount int\n\tvar result = map[string]image.Info{}\n\tvar warnAboutRateLimit sync.Once\nupdates:\n\tfor _, up := range images {\n\t\t// to avoid race condition, when accessing it in the go routine\n\t\tupCopy := up\n\t\tselect {\n\t\tcase <-ctxc.Done():\n\t\t\tbreak updates\n\t\tcase fetchers <- struct{}{}:\n\t\t}\n\t\tawaitFetchers.Add(1)\n\t\tgo func() {\n\t\t\tdefer func() { awaitFetchers.Done(); <-fetchers }()\n\t\t\tctxcc, cancel := context.WithTimeout(ctxc, c.clientTimeout)\n\t\t\tdefer cancel()\n\t\t\tentry, err := c.updateImage(ctxcc, upCopy)\n\t\t\tif err != nil {\n\t\t\t\tif err, ok := errors.Cause(err).(net.Error); (ok && err.Timeout()) || ctxcc.Err() == context.DeadlineExceeded {\n\t\t\t\t\t// This was due to a context timeout, don't bother logging\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tswitch {\n\t\t\t\tcase strings.Contains(err.Error(), \"429\"), strings.Contains(err.Error(), \"toomanyrequests\"):\n\t\t\t\t\t// abort the image tags fetching if we've been rate limited\n\t\t\t\t\twarnAboutRateLimit.Do(func() {\n\t\t\t\t\t\tc.logger.Log(\"warn\", \"aborting image tag fetching due to rate limiting, will try again later\")\n\t\t\t\t\t\tcancel()\n\t\t\t\t\t})\n\t\t\t\tcase strings.Contains(err.Error(), \"manifest unknown\"):\n\t\t\t\t\t// Registry is corrupted, keep going, this manifest may not be relevant for automatic updates\n\t\t\t\t\tc.Lock()\n\t\t\t\t\tmanifestUnknownCount++\n\t\t\t\t\tc.Unlock()\n\t\t\t\t\tc.logger.Log(\"warn\", fmt.Sprintf(\"manifest for tag %s missing in repository %s\", up.ref.Tag, up.ref.Name),\n\t\t\t\t\t\t\"impact\", \"flux will fail to auto-release workloads with matching images, ask the repository administrator to fix the inconsistency\")\n\t\t\t\tdefault:\n\t\t\t\t\tc.logger.Log(\"err\", err, \"ref\", up.ref)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc.Lock()\n\t\t\tsuccessCount++\n\t\t\tif entry.ExcludedReason == \"\" {\n\t\t\t\tresult[upCopy.ref.Tag] = entry.Info\n\t\t\t}\n\t\t\tc.Unlock()\n\t\t}()\n\t}\n\tawaitFetchers.Wait()\n\treturn result, successCount, manifestUnknownCount\n}", "func LoadLocalImage(app *AppData) error {\n\tapp.LocalImage = DockerImage{\n\t\tExists: false,\n\t}\n\tctx := context.Background()\n\tcli, err := client.NewClientWithOpts(client.FromEnv)\n\tcli.NegotiateAPIVersion(ctx)\n\tdefer cli.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tinspect, _, err := cli.ImageInspectWithRaw(ctx, app.From)\n\tif err != nil {\n\t\tif err.Error() == \"Error: No such image: \"+app.From {\n\t\t\tfmt.Printf(\"Repo not exists in local docker\\n\")\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tfmt.Printf(\"Repo exists in local docker\\n\")\n\tapp.LocalImage.Exists = true\n\tif len(inspect.RepoDigests) > 0 {\n\t\tapp.LocalImage.DockerDigest = inspect.RepoDigests[0]\n\t} else {\n\t\tapp.LocalImage.DockerDigest = inspect.ID\n\t}\n\n\t//Setting Docker Config values\n\tconfigData, err := json.Marshal(inspect.Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = pvjson.Unmarshal(configData, &app.LocalImage.DockerConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (imp *Importer) fetchLocalImages() {\n items, err := ioutil.ReadDir(STORE_DIR)\n if err != nil {\n imp.sendErr(err)\n return\n }\n\n for _, info := range items {\n if info.IsDir() { continue }\n filename := info.Name()\n\n file, err := os.Open(fmt.Sprintf(\"%s/%s\", STORE_DIR, filename))\n if err != nil {\n imp.sendErr(err)\n return\n }\n\n img, err := jpeg.Decode(file)\n if err != nil {\n log.Printf(\"Error decoding image file %s to jpeg\\n\", filename)\n continue\n }\n\n ext := filepath.Ext(filename)\n id := filename[:len(filename)-len(ext)]\n\n imp.send(&imagedata.ImageData{Id: id, Data: &img})\n }\n}", "func notifyNewImage(basePath, imageUrl string) error {\n\thook := os.Getenv(\"WEB_HOOK\")\n\tif hook == \"\" {\n\t\treturn nil\n\t}\n\n\ttype photo struct {\n\t\tUrl string `json:\"remote_url\"`\n\t}\n\n\ttype payload struct {\n\t\tPhoto *photo `json:\"photo\"`\n\t}\n\n\tbody := &payload{&photo{Url: imageUrl}}\n\tb, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfullURL := fmt.Sprintf(\"%s/buckets/%s/photos.json\", hook, basePath)\n\n\tlog.Infof(\"Notifying hook at %s\", fullURL)\n\n\treq, err := http.NewRequest(\"POST\", fullURL, bytes.NewBuffer(b))\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\treturn fmt.Errorf(\"expecting status code in 200 .. 299 got %d\", resp.StatusCode)\n\t}\n\tlog.Infof(\"Notification sent to %s\", hook)\n\treturn nil\n}", "func (f MockFetch) Fetch(targetServer Server) ServerStatus {\n\tif targetServer.ID == 196 {\n\t\treturn ServerStatus{targetServer.ID, false, \"404\", targetServer.URL, time.Now()}\n\t}\n\treturn ServerStatus{targetServer.ID, true, \"\", targetServer.URL, time.Now()}\n}", "func (o *VulnUpdateNotificationPayloadAllOf) GetImageDigestOk() (*string, bool) {\n\tif o == nil || o.ImageDigest == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ImageDigest, true\n}", "func (v *Virt) ImageLocalDigests(ctx context.Context, image string) (digests []string, err error) {\n\treturn\n}", "func diffImage(c *Client, desired, actual *Image, opts ...dcl.ApplyOption) ([]imageDiff, error) {\n\tif desired == nil || actual == nil {\n\t\treturn nil, fmt.Errorf(\"nil resource passed to diff - always a programming error: %#v, %#v\", desired, actual)\n\t}\n\n\tvar diffs []imageDiff\n\tif !dcl.IsZeroValue(desired.Description) && !dcl.StringCanonicalize(desired.Description, actual.Description) {\n\t\tc.Config.Logger.Infof(\"Detected diff in Description.\\nDESIRED: %v\\nACTUAL: %v\", desired.Description, actual.Description)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"Description\",\n\t\t})\n\t}\n\tif !reflect.DeepEqual(desired.DiskSizeGb, actual.DiskSizeGb) {\n\t\tc.Config.Logger.Infof(\"Detected diff in DiskSizeGb.\\nDESIRED: %v\\nACTUAL: %v\", desired.DiskSizeGb, actual.DiskSizeGb)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"DiskSizeGb\",\n\t\t})\n\t}\n\tif !dcl.IsZeroValue(desired.Family) && !dcl.StringCanonicalize(desired.Family, actual.Family) {\n\t\tc.Config.Logger.Infof(\"Detected diff in Family.\\nDESIRED: %v\\nACTUAL: %v\", desired.Family, actual.Family)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"Family\",\n\t\t})\n\t}\n\tif compareImageGuestOsFeatureSlice(c, desired.GuestOsFeature, actual.GuestOsFeature) {\n\t\tc.Config.Logger.Infof(\"Detected diff in GuestOsFeature.\\nDESIRED: %v\\nACTUAL: %v\", desired.GuestOsFeature, actual.GuestOsFeature)\n\t\ttoAdd, toRemove := compareImageGuestOsFeatureSets(c, desired.GuestOsFeature, actual.GuestOsFeature)\n\t\tif len(toAdd) > 0 {\n\t\t\tdiffs = append(diffs, imageDiff{\n\t\t\t\tRequiresRecreate: true,\n\t\t\t\tFieldName: \"GuestOsFeature\",\n\t\t\t})\n\t\t}\n\t\tif len(toRemove) > 0 {\n\t\t\tdiffs = append(diffs, imageDiff{\n\t\t\t\tRequiresRecreate: true,\n\t\t\t\tFieldName: \"GuestOsFeature\",\n\t\t\t})\n\t\t}\n\t}\n\tif compareImageImageEncryptionKey(c, desired.ImageEncryptionKey, actual.ImageEncryptionKey) {\n\t\tc.Config.Logger.Infof(\"Detected diff in ImageEncryptionKey.\\nDESIRED: %v\\nACTUAL: %v\", desired.ImageEncryptionKey, actual.ImageEncryptionKey)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"ImageEncryptionKey\",\n\t\t})\n\t}\n\tif !dcl.MapEquals(desired.Labels, actual.Labels, []string(nil)) {\n\t\tc.Config.Logger.Infof(\"Detected diff in Labels.\\nDESIRED: %v\\nACTUAL: %v\", desired.Labels, actual.Labels)\n\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tUpdateOp: &updateImageSetLabelsOperation{},\n\t\t\tFieldName: \"Labels\",\n\t\t})\n\n\t}\n\tif !dcl.StringSliceEquals(desired.License, actual.License) {\n\t\tc.Config.Logger.Infof(\"Detected diff in License.\\nDESIRED: %v\\nACTUAL: %v\", desired.License, actual.License)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"License\",\n\t\t})\n\t}\n\tif !dcl.IsZeroValue(desired.Name) && !dcl.StringCanonicalize(desired.Name, actual.Name) {\n\t\tc.Config.Logger.Infof(\"Detected diff in Name.\\nDESIRED: %v\\nACTUAL: %v\", desired.Name, actual.Name)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"Name\",\n\t\t})\n\t}\n\tif compareImageShieldedInstanceInitialState(c, desired.ShieldedInstanceInitialState, actual.ShieldedInstanceInitialState) {\n\t\tc.Config.Logger.Infof(\"Detected diff in ShieldedInstanceInitialState.\\nDESIRED: %v\\nACTUAL: %v\", desired.ShieldedInstanceInitialState, actual.ShieldedInstanceInitialState)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"ShieldedInstanceInitialState\",\n\t\t})\n\t}\n\tif !dcl.IsZeroValue(desired.SourceDisk) && !dcl.StringCanonicalize(desired.SourceDisk, actual.SourceDisk) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceDisk.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceDisk, actual.SourceDisk)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceDisk\",\n\t\t})\n\t}\n\tif compareImageSourceDiskEncryptionKey(c, desired.SourceDiskEncryptionKey, actual.SourceDiskEncryptionKey) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceDiskEncryptionKey.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceDiskEncryptionKey, actual.SourceDiskEncryptionKey)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceDiskEncryptionKey\",\n\t\t})\n\t}\n\tif !dcl.IsZeroValue(desired.SourceImage) && !dcl.StringCanonicalize(desired.SourceImage, actual.SourceImage) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceImage.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceImage, actual.SourceImage)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceImage\",\n\t\t})\n\t}\n\tif compareImageSourceImageEncryptionKey(c, desired.SourceImageEncryptionKey, actual.SourceImageEncryptionKey) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceImageEncryptionKey.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceImageEncryptionKey, actual.SourceImageEncryptionKey)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceImageEncryptionKey\",\n\t\t})\n\t}\n\tif !dcl.IsZeroValue(desired.SourceImageId) && !dcl.StringCanonicalize(desired.SourceImageId, actual.SourceImageId) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceImageId.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceImageId, actual.SourceImageId)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceImageId\",\n\t\t})\n\t}\n\tif !dcl.IsZeroValue(desired.SourceSnapshot) && !dcl.StringCanonicalize(desired.SourceSnapshot, actual.SourceSnapshot) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceSnapshot.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceSnapshot, actual.SourceSnapshot)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceSnapshot\",\n\t\t})\n\t}\n\tif compareImageSourceSnapshotEncryptionKey(c, desired.SourceSnapshotEncryptionKey, actual.SourceSnapshotEncryptionKey) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceSnapshotEncryptionKey.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceSnapshotEncryptionKey, actual.SourceSnapshotEncryptionKey)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceSnapshotEncryptionKey\",\n\t\t})\n\t}\n\tif !dcl.IsZeroValue(desired.SourceSnapshotId) && !dcl.StringCanonicalize(desired.SourceSnapshotId, actual.SourceSnapshotId) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceSnapshotId.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceSnapshotId, actual.SourceSnapshotId)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceSnapshotId\",\n\t\t})\n\t}\n\tif !reflect.DeepEqual(desired.SourceType, actual.SourceType) {\n\t\tc.Config.Logger.Infof(\"Detected diff in SourceType.\\nDESIRED: %v\\nACTUAL: %v\", desired.SourceType, actual.SourceType)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"SourceType\",\n\t\t})\n\t}\n\tif !dcl.StringSliceEquals(desired.StorageLocation, actual.StorageLocation) {\n\t\tc.Config.Logger.Infof(\"Detected diff in StorageLocation.\\nDESIRED: %v\\nACTUAL: %v\", desired.StorageLocation, actual.StorageLocation)\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tRequiresRecreate: true,\n\t\t\tFieldName: \"StorageLocation\",\n\t\t})\n\t}\n\tif compareImageDeprecated(c, desired.Deprecated, actual.Deprecated) {\n\t\tc.Config.Logger.Infof(\"Detected diff in Deprecated.\\nDESIRED: %v\\nACTUAL: %v\", desired.Deprecated, actual.Deprecated)\n\n\t\tdiffs = append(diffs, imageDiff{\n\t\t\tUpdateOp: &updateImageDeprecateOperation{},\n\t\t\tFieldName: \"Deprecated\",\n\t\t})\n\n\t}\n\t// We need to ensure that this list does not contain identical operations *most of the time*.\n\t// There may be some cases where we will need multiple copies of the same operation - for instance,\n\t// if a resource has multiple prerequisite-containing fields. For now, we don't know of any\n\t// such examples and so we deduplicate unconditionally.\n\n\t// The best way for us to do this is to iterate through the list\n\t// and remove any copies of operations which are identical to a previous operation.\n\t// This is O(n^2) in the number of operations, but n will always be very small,\n\t// even 10 would be an extremely high number.\n\tvar opTypes []string\n\tvar deduped []imageDiff\n\tfor _, d := range diffs {\n\t\t// Two operations are considered identical if they have the same type.\n\t\t// The type of an operation is derived from the name of the update method.\n\t\tif !dcl.StringSliceContains(fmt.Sprintf(\"%T\", d.UpdateOp), opTypes) {\n\t\t\tdeduped = append(deduped, d)\n\t\t\topTypes = append(opTypes, fmt.Sprintf(\"%T\", d.UpdateOp))\n\t\t} else {\n\t\t\tc.Config.Logger.Infof(\"Omitting planned operation of type %T since once is already scheduled.\", d.UpdateOp)\n\t\t}\n\t}\n\n\treturn deduped, nil\n}", "func TestNoUpdate(t *testing.T) {\n\tlocLoadFile, err := NewIpLocDictFile(\"testdata/iplocation.txt\", 1000000)\n\tif err != nil {\n\t\tt.Errorf(\"should return nil but return error[%s]\", err.Error())\n\t\treturn\n\t}\n\n\t//load ip location file\n\t_, err = locLoadFile.CheckAndLoad(\"\")\n\tif err != nil {\n\t\tt.Errorf(\"should return nil but return error[%s]\", err.Error())\n\t\treturn\n\t}\n\n\tversion := locLoadFile.version\n\t_, err = locLoadFile.CheckAndLoad(version)\n\tif err != ErrNoNeedUpdate {\n\t\tt.Errorf(\"should return ErrNoNeedUpdate but return error[%s]\", err.Error())\n\t\treturn\n\t}\n\n\tversion = version + \"1\"\n\t_, err = locLoadFile.CheckAndLoad(version)\n\tif err != nil {\n\t\tt.Errorf(\"should return nil but return error[%s]\", err.Error())\n\t\treturn\n\t}\n}", "func TestApply(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"hash error\", func(t *testing.T) {\n\t\ttask := fetch.Fetch{\n\t\t\tSource: \"https://github.com/asteris-llc/converge/releases/download/0.2.0/converge_0.2.0_darwin_amd64.tar.gz\",\n\t\t\tDestination: \"/tmp/converge.tar.gz\",\n\t\t\tHashType: \"invalid\",\n\t\t\tHash: \"notarealhashbutstringnonetheless\",\n\t\t}\n\t\tdefer os.Remove(task.Destination)\n\n\t\tstatus, err := task.Apply(context.Background())\n\n\t\tassert.EqualError(t, err, fmt.Sprintf(\"will not attempt file fetch: unsupported hashType %q\", task.HashType))\n\t\tassert.True(t, status.HasChanges())\n\t})\n\n\tt.Run(\"source error\", func(t *testing.T) {\n\t\tm := &MockDiff{}\n\t\ttask := fetch.Fetch{\n\t\t\tSource: \":test\",\n\t\t\tDestination: \"/tmp/fetch_test.txt\",\n\t\t\tForce: true,\n\t\t}\n\t\tdefer os.Remove(task.Destination)\n\n\t\tstat := resource.NewStatus()\n\t\tstat.RaiseLevel(resource.StatusWillChange)\n\t\tm.On(\"DiffFile\", nil, stat).Return(stat, nil)\n\n\t\tstatus, err := task.Apply(context.Background())\n\n\t\tassert.EqualError(t, err, fmt.Sprintf(\"could not parse source: parse %s: missing protocol scheme\", task.Source))\n\t\tassert.False(t, status.HasChanges())\n\t})\n\n\tt.Run(\"failed to fetch\", func(t *testing.T) {\n\t\tm := &MockDiff{}\n\t\ttask := fetch.Fetch{\n\t\t\tSource: \"\",\n\t\t\tDestination: \"/tmp/fetch_test.txt\",\n\t\t\tForce: true,\n\t\t}\n\t\tdefer os.Remove(task.Destination)\n\n\t\tstat := resource.NewStatus()\n\t\tm.On(\"DiffFile\", nil, stat).Return(stat, nil)\n\n\t\tstatus, err := task.Apply(context.Background())\n\n\t\tassert.EqualError(t, err, \"failed to fetch: source path must be a file\")\n\t\tassert.False(t, status.HasChanges())\n\t})\n\n\tt.Run(\"with checksum\", func(t *testing.T) {\n\t\tt.Run(\"file exists\", func(t *testing.T) {\n\t\t\tsrc, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(src.Name())\n\n\t\t\tdest, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(dest.Name())\n\n\t\t\thash, err := getHash(src.Name(), string(fetch.HashMD5))\n\t\t\trequire.NoError(t, err)\n\n\t\t\tm := &MockDiff{}\n\t\t\ttask := fetch.Fetch{\n\t\t\t\tSource: src.Name(),\n\t\t\t\tDestination: dest.Name(),\n\t\t\t\tHashType: string(fetch.HashMD5),\n\t\t\t\tHash: hex.EncodeToString(hash.Sum(nil)),\n\t\t\t}\n\t\t\tdefer os.Remove(task.Destination)\n\n\t\t\tstat := resource.NewStatus()\n\t\t\tm.On(\"DiffFile\", nil, stat).Return(stat, nil)\n\n\t\t\tstatus, err := task.Apply(context.Background())\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Contains(t, status.Messages(), \"file exists\")\n\t\t\tassert.False(t, status.HasChanges())\n\t\t})\n\n\t\tt.Run(\"force=true\", func(t *testing.T) {\n\t\t\tsrc, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(src.Name())\n\n\t\t\tdest, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(dest.Name())\n\n\t\t\thash, err := getHash(src.Name(), string(fetch.HashMD5))\n\t\t\trequire.NoError(t, err)\n\n\t\t\tm := &MockDiff{}\n\t\t\ttask := fetch.Fetch{\n\t\t\t\tSource: src.Name(),\n\t\t\t\tDestination: dest.Name(),\n\t\t\t\tHashType: string(fetch.HashMD5),\n\t\t\t\tHash: \"notarealhashbutstringnonetheless\",\n\t\t\t\tForce: true,\n\t\t\t}\n\t\t\tdefer os.Remove(task.Destination)\n\n\t\t\tstat := resource.NewStatus()\n\t\t\tm.On(\"DiffFile\", hash, stat).Return(stat, nil)\n\n\t\t\tstatus, err := task.Apply(context.Background())\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Contains(t, status.Messages(), \"fetched successfully\")\n\t\t\tassert.Equal(t, hex.EncodeToString(hash.Sum(nil)), status.Diffs()[\"checksum\"].Original())\n\t\t\tassert.Equal(t, task.Hash, status.Diffs()[\"checksum\"].Current())\n\t\t\tassert.True(t, status.HasChanges())\n\t\t})\n\n\t\tt.Run(\"force=false\", func(t *testing.T) {\n\t\t\tsrc, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(src.Name())\n\n\t\t\tdest, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(dest.Name())\n\n\t\t\thash, err := getHash(src.Name(), string(fetch.HashMD5))\n\t\t\trequire.NoError(t, err)\n\n\t\t\tm := &MockDiff{}\n\t\t\ttask := fetch.Fetch{\n\t\t\t\tSource: src.Name(),\n\t\t\t\tDestination: dest.Name(),\n\t\t\t\tHashType: string(fetch.HashMD5),\n\t\t\t\tHash: \"notarealhashbutstringnonetheless\",\n\t\t\t}\n\t\t\tdefer os.Remove(task.Destination)\n\n\t\t\tstat := resource.NewStatus()\n\t\t\tm.On(\"DiffFile\", hash, stat).Return(stat, nil)\n\n\t\t\tstatus, err := task.Apply(context.Background())\n\n\t\t\tassert.EqualError(t, err, \"will not attempt fetch: checksum mismatch\")\n\t\t\tassert.Contains(t, status.Messages(), \"checksum mismatch, use the \\\"force\\\" option to replace\")\n\t\t\tassert.True(t, status.HasChanges())\n\t\t})\n\t})\n\n\tt.Run(\"no checksum\", func(t *testing.T) {\n\t\tt.Run(\"force=true\", func(t *testing.T) {\n\t\t\tsrc, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(src.Name())\n\n\t\t\tdest, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(dest.Name())\n\n\t\t\tm := &MockDiff{}\n\t\t\ttask := fetch.Fetch{\n\t\t\t\tSource: src.Name(),\n\t\t\t\tDestination: dest.Name(),\n\t\t\t\tForce: true,\n\t\t\t}\n\t\t\tdefer os.Remove(task.Destination)\n\n\t\t\tstat := resource.NewStatus()\n\t\t\tm.On(\"DiffFile\", nil, stat).Return(stat, nil)\n\n\t\t\tstatus, err := task.Apply(context.Background())\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Contains(t, status.Messages(), \"fetched successfully\")\n\t\t\tassert.Equal(t, \"<force fetch>\", status.Diffs()[\"destination\"].Original())\n\t\t\tassert.Equal(t, task.Destination, status.Diffs()[\"destination\"].Current())\n\t\t\tassert.True(t, status.HasChanges())\n\t\t})\n\n\t\tt.Run(\"force=false\", func(t *testing.T) {\n\t\t\tsrc, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(src.Name())\n\n\t\t\tdest, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(dest.Name())\n\n\t\t\tm := &MockDiff{}\n\t\t\ttask := fetch.Fetch{\n\t\t\t\tSource: src.Name(),\n\t\t\t\tDestination: dest.Name(),\n\t\t\t}\n\t\t\tdefer os.Remove(task.Destination)\n\n\t\t\tstat := resource.NewStatus()\n\t\t\tm.On(\"DiffFile\", nil, stat).Return(stat, nil)\n\n\t\t\tstatus, err := task.Apply(context.Background())\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Contains(t, status.Messages(), \"file exists\")\n\t\t\tassert.False(t, status.HasChanges())\n\t\t})\n\t})\n\n\tt.Run(\"fetch new file\", func(t *testing.T) {\n\t\tt.Run(\"with checksum\", func(t *testing.T) {\n\t\t\tsrc, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(src.Name())\n\n\t\t\thash, err := getHash(src.Name(), string(fetch.HashMD5))\n\t\t\trequire.NoError(t, err)\n\n\t\t\tm := &MockDiff{}\n\t\t\ttask := fetch.Fetch{\n\t\t\t\tSource: src.Name(),\n\t\t\t\tDestination: \"/tmp/fetch_test2.txt\",\n\t\t\t\tHashType: string(fetch.HashMD5),\n\t\t\t\tHash: hex.EncodeToString(hash.Sum(nil)),\n\t\t\t}\n\t\t\tdefer os.Remove(task.Destination)\n\n\t\t\tstat := resource.NewStatus()\n\t\t\tm.On(\"DiffFile\", hash, stat).Return(stat, nil)\n\n\t\t\tstatus, err := task.Apply(context.Background())\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Contains(t, status.Messages(), \"fetched successfully\")\n\t\t\tassert.Equal(t, \"<absent>\", status.Diffs()[\"destination\"].Original())\n\t\t\tassert.Equal(t, task.Destination, status.Diffs()[\"destination\"].Current())\n\t\t\tassert.True(t, status.HasChanges())\n\t\t})\n\n\t\tt.Run(\"no checksum\", func(t *testing.T) {\n\t\t\tsrc, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(src.Name())\n\n\t\t\tm := &MockDiff{}\n\t\t\ttask := fetch.Fetch{\n\t\t\t\tSource: src.Name(),\n\t\t\t\tDestination: \"/tmp/fetch_test2.txt\",\n\t\t\t}\n\t\t\tdefer os.Remove(task.Destination)\n\n\t\t\tstat := resource.NewStatus()\n\t\t\tm.On(\"DiffFile\", nil, stat).Return(stat, nil)\n\n\t\t\tstatus, err := task.Apply(context.Background())\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Contains(t, status.Messages(), \"fetched successfully\")\n\t\t\tassert.Equal(t, \"<absent>\", status.Diffs()[\"destination\"].Original())\n\t\t\tassert.Equal(t, task.Destination, status.Diffs()[\"destination\"].Current())\n\t\t\tassert.True(t, status.HasChanges())\n\t\t})\n\t})\n\n\tt.Run(\"unarchive\", func(t *testing.T) {\n\t\tt.Run(\"dest=file\", func(t *testing.T) {\n\t\t\tsrc, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(src.Name())\n\n\t\t\tdest, err := ioutil.TempFile(\"\", \"fetch_test2.txt\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(dest.Name())\n\n\t\t\tm := &MockDiff{}\n\t\t\ttask := fetch.Fetch{\n\t\t\t\tSource: src.Name(),\n\t\t\t\tDestination: dest.Name(),\n\t\t\t\tUnarchive: true,\n\t\t\t}\n\t\t\tdefer os.Remove(task.Destination)\n\n\t\t\tstat := resource.NewStatus()\n\t\t\tm.On(\"DiffFile\", nil, stat).Return(stat, nil)\n\n\t\t\tstatus, err := task.Apply(context.Background())\n\n\t\t\tassert.EqualError(t, err, fmt.Sprintf(\"invalid destination %q for unarchiving, must be directory\", task.Destination))\n\t\t\tassert.True(t, status.HasChanges())\n\t\t})\n\n\t\tt.Run(\"dest=dir\", func(t *testing.T) {\n\t\t\tt.Run(\"dest not exist\", func(t *testing.T) {\n\t\t\t\tsrc, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tdefer os.Remove(src.Name())\n\n\t\t\t\tm := &MockDiff{}\n\t\t\t\ttask := fetch.Fetch{\n\t\t\t\t\tSource: src.Name(),\n\t\t\t\t\tDestination: \"/tmp/fetch_test12345678\",\n\t\t\t\t\tUnarchive: true,\n\t\t\t\t}\n\t\t\t\tdefer os.RemoveAll(task.Destination)\n\n\t\t\t\tstat := resource.NewStatus()\n\t\t\t\tm.On(\"DiffFile\", nil, stat).Return(stat, nil)\n\n\t\t\t\tstatus, err := task.Apply(context.Background())\n\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Contains(t, status.Messages(), \"fetched successfully\")\n\t\t\t\tassert.Equal(t, \"<absent>\", status.Diffs()[\"destination\"].Original())\n\t\t\t\tassert.Equal(t, task.Destination, status.Diffs()[\"destination\"].Current())\n\t\t\t\tassert.True(t, status.HasChanges())\n\t\t\t})\n\n\t\t\tt.Run(\"dest exists\", func(t *testing.T) {\n\t\t\t\tsrc, err := ioutil.TempFile(\"\", \"fetch_test.txt\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tdefer os.Remove(src.Name())\n\n\t\t\t\tdest, err := ioutil.TempDir(\"\", \"fetch_test\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tdefer os.Remove(dest)\n\n\t\t\t\tm := &MockDiff{}\n\t\t\t\ttask := fetch.Fetch{\n\t\t\t\t\tSource: src.Name(),\n\t\t\t\t\tDestination: dest,\n\t\t\t\t\tUnarchive: true,\n\t\t\t\t}\n\t\t\t\tdefer os.RemoveAll(task.Destination)\n\n\t\t\t\tstat := resource.NewStatus()\n\t\t\t\tm.On(\"DiffFile\", nil, stat).Return(stat, nil)\n\n\t\t\t\tstatus, err := task.Apply(context.Background())\n\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Contains(t, status.Messages(), \"fetched successfully\")\n\t\t\t\tassert.Equal(t, \"<absent>\", status.Diffs()[\"destination\"].Original())\n\t\t\t\tassert.Equal(t, task.Destination, status.Diffs()[\"destination\"].Current())\n\t\t\t\tassert.True(t, status.HasChanges())\n\t\t\t})\n\t\t})\n\t})\n\n\tt.Run(\"context\", func(t *testing.T) {\n\t\ttask := fetch.Fetch{}\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tcancel()\n\n\t\tstatus, err := task.Apply(ctx)\n\n\t\tassert.EqualError(t, err, \"context canceled\")\n\t\tassert.Nil(t, status)\n\t})\n}", "func FeedSetFetchSuccess(f *Feed) EntityUpdate {\n\tif f.failingSince == nil {\n\t\treturn noopEntityUpdate(f)\n\t}\n\tnewF := *f\n\tnewF.failingSince = nil\n\treturn newF.update()\n}", "func (sub *Sub) fetchMissingObjects(srpcClient *srpc.Client, image *image.Image,\n\tfreeSpace *uint64, pushComputedFiles bool) (\n\tbool, subStatus) {\n\tif image == nil {\n\t\treturn false, statusImageNotReady\n\t}\n\tlogger := sub.herd.logger\n\tsubObj := lib.Sub{\n\t\tHostname: sub.mdb.Hostname,\n\t\tClient: srpcClient,\n\t\tFileSystem: sub.fileSystem,\n\t\tComputedInodes: sub.computedInodes,\n\t\tObjectCache: sub.objectCache,\n\t\tObjectGetter: sub.herd.objectServer}\n\tobjectsToFetch, objectsToPush := lib.BuildMissingLists(subObj, image,\n\t\tpushComputedFiles, false, logger)\n\tif objectsToPush == nil {\n\t\treturn false, statusMissingComputedFile\n\t}\n\tvar returnAvailable bool = true\n\tvar returnStatus subStatus = statusSynced\n\tif len(objectsToFetch) > 0 {\n\t\tif !sub.checkForEnoughSpace(freeSpace, objectsToFetch) {\n\t\t\treturn false, statusNotEnoughFreeSpace\n\t\t}\n\t\tlogger.Printf(\"Calling %s:Subd.Fetch() for: %d objects\\n\",\n\t\t\tsub, len(objectsToFetch))\n\t\terr := client.Fetch(srpcClient, sub.herd.imageManager.String(),\n\t\t\tobjectcache.ObjectMapToCache(objectsToFetch))\n\t\tif err != nil {\n\t\t\tsrpcClient.Close()\n\t\t\tlogger.Printf(\"Error calling %s:Subd.Fetch(): %s\\n\", sub, err)\n\t\t\tif err == srpc.ErrorAccessToMethodDenied {\n\t\t\t\treturn false, statusFetchDenied\n\t\t\t}\n\t\t\treturn false, statusFailedToFetch\n\t\t}\n\t\treturnAvailable = false\n\t\treturnStatus = statusFetching\n\t}\n\tif len(objectsToPush) > 0 {\n\t\tsub.herd.cpuSharer.GrabSemaphore(sub.herd.pushSemaphore)\n\t\tdefer func() { <-sub.herd.pushSemaphore }()\n\t\tsub.status = statusPushing\n\t\terr := lib.PushObjects(subObj, objectsToPush, logger)\n\t\tif err != nil {\n\t\t\tif err == srpc.ErrorAccessToMethodDenied {\n\t\t\t\treturn false, statusPushDenied\n\t\t\t}\n\t\t\tif err == lib.ErrorFailedToGetObject {\n\t\t\t\treturn false, statusFailedToGetObject\n\t\t\t}\n\t\t\treturn false, statusFailedToPush\n\t\t}\n\t\tif returnAvailable {\n\t\t\t// Update local copy of objectcache, since there will not be\n\t\t\t// another Poll() before the update computation.\n\t\t\tfor hashVal := range objectsToPush {\n\t\t\t\tsub.objectCache = append(sub.objectCache, hashVal)\n\t\t\t}\n\t\t}\n\t}\n\treturn returnAvailable, returnStatus\n}", "func TestGetStatusByIPAddressAtTimestamp1(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"} // nolint\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tipAddress := \"88.77.66.55\" // nolint\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-10T08:29:35+00:00\")\n\tnetworkChangeEvents, err := dbStorage.FetchByIP(ctx, at, ipAddress)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func (o *ProdutoVM) GetRefImagemOk() (*int64, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RefImagem.Get(), o.RefImagem.IsSet()\n}", "func (f *SourceFetcher) Fetch(url string, namespace string) (*info.ImageProperties, bool, error) {\n\tc := make(chan FetchResult)\n\tgo f.uniqueFetchSource(c, url, namespace)\n\tr := <-c\n\treturn r.ImageDetails, r.Downloaded, r.Error\n}", "func FeedSetFetchFailed(t time.Time) func(f *Feed) EntityUpdate {\n\treturn func(f *Feed) EntityUpdate {\n\t\tif f.failingSince != nil {\n\t\t\treturn noopEntityUpdate(f)\n\t\t}\n\n\t\tnewF := *f\n\t\tnewF.failingSince = &t\n\t\treturn newF.update()\n\t}\n}", "func (i *Image) resolveStale() error {\n\tif !IsRestoringEnabled() {\n\t\treturn nil\n\t}\n\n\tif i.volatile {\n\t\treturn nil\n\t}\n\tif i.screen {\n\t\treturn nil\n\t}\n\tif !i.stale {\n\t\treturn nil\n\t}\n\treturn i.readPixelsFromGPU()\n}", "func ttrPullImage(ctx context.Context, client apiclient.APIClient, image string) error {\n\trc, err := client.ImagePull(ctx, ttrImageName(image), types.ImagePullOptions{RegistryAuth: \"{}\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rc != nil {\n\t\tbody, err := readAllAndClose(rc)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !strings.Contains(body, \"Status: Downloaded newer image\") {\n\t\t\treturn errors.New(\"image pull not successful\")\n\t\t}\n\t}\n\treturn nil\n}", "func shouldAttemptUpdate(ai time.Duration, now, lastAccessed time.Time, rf randFunc) bool {\n\treturn xFetch(now, lastAccessed.Add(ai), (ai / accessUpdateDeltaFactor), 1, rf)\n}", "func (sub *Sub) checkForUnsafeChange(request subproto.UpdateRequest) bool {\n\tif sub.requiredImage.Filter == nil {\n\t\treturn false // Sparse image: no deletions.\n\t}\n\tif len(sub.requiredImage.FileSystem.InodeTable) <\n\t\tlen(sub.fileSystem.InodeTable)>>1 {\n\t\treturn true\n\t}\n\tif len(request.PathsToDelete) > len(sub.fileSystem.InodeTable)>>1 {\n\t\treturn true\n\t}\n\treturn false\n}", "func (o *LocalDatabaseProvider) GetUpdatedOk() (*time.Time, bool) {\n\tif o == nil || o.Updated == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Updated, true\n}", "func (f *fetcher) fetchImage(img string, asc string, discover bool) (string, error) {\n\tif f.withDeps && !discover {\n\t\treturn \"\", fmt.Errorf(\"cannot fetch image's dependencies with discovery disabled\")\n\t}\n\thash, err := f.fetchSingleImage(img, asc, discover)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif f.withDeps {\n\t\terr = f.fetchImageDeps(hash)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn hash, nil\n}", "func TestUpdateStatusFalse(t *testing.T) {\n\t// Override checkStatus with our fake version\n\tstatusCheckStatus = fakeCheckStatus\n\tdefer func() {\n\t\tstatusCheckStatus = origCheckStatus\n\t}()\n\n\t// Run the msi installer with this status bypass to make status return dalse\n\tmsiItem.DisplayName = statusNoActionNoError\n\t// Run Update\n\tactualOutput := Install(msiItem, \"update\", \"https://example.com\", \"testdata/\", checkOnlyMode)\n\t// Check the result\n\texpectedOutput := \"Item not needed\"\n\tif have, want := actualOutput, expectedOutput; have != want {\n\t\tt.Errorf(\"\\n-----\\nhave\\n%s\\nwant\\n%s\\n-----\", have, want)\n\t}\n\n}", "func UpdateLocalInfoCR(c client.Client, localInfo aciv1.SnatLocalInfo) (reconcile.Result, error) {\n\n\terr := c.Update(context.TODO(), &localInfo)\n\tif err != nil {\n\t\tlog.Error(err, \"failed to update a snat locainfo cr\")\n\t\treturn reconcile.Result{}, err\n\t}\n\tlog.Info(\"Updated localinfo object\", \"SnatLocalInfo: \", localInfo)\n\treturn reconcile.Result{}, nil\n}", "func (imageService Service) ImageStatus(Id string) (Status string, err error) {\n\n\n\turl := strings.TrimSuffix(imageService.URL, \"/\") + \"/images/\" + Id\n\tvar headers http.Header\n\theaders, err = misc.GetHeader(url, imageService.TokenID, imageService.Client)\n\tif err != nil {\n\t\treturn \"\", err\n\t} else {\n\t\tfor header, value := range headers {\n\t\t\t//log.Printf (\"header '%s'='%s'\", header, value[0])\n\t\t\tif strings.ToLower(header) == \"x-image-meta-status\" {\n\t\t\t\tStatus = value[0]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn Status, nil\n}", "func (f *Frontend) fetchImage(i *img.Image) (*img.Image, error) {\n\tvar err error\n\n\t// go through image proxy to resize and cache the image\n\tkey := hmacKey(i.ID)\n\tu := fmt.Sprintf(\"%v/image/225x,s%v/%v\", f.Host, key, i.ID)\n\tfmt.Println(u)\n\n\tresp, err := f.Images.Client.Get(u)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbdy, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\ti.Base64 = base64.StdEncoding.EncodeToString(bdy)\n\treturn i, err\n}", "func TestDaemon_SyncStatus(t *testing.T) {\n\td, start, clean, _, _, _ := mockDaemon(t)\n\tstart()\n\tdefer clean()\n\tw := newWait(t)\n\n\tctx := context.Background()\n\t// Perform a release\n\tid := updateImage(ctx, d, t)\n\n\t// Get the commit id\n\tstat := w.ForJobSucceeded(d, id)\n\n\t// Note: I can't test for an expected number of commits > 0\n\t// because I can't control how fast the sync loop updates the cluster\n\n\t// Once sync'ed to the cluster, it should empty\n\tw.ForSyncStatus(d, stat.Result.Revision, 0)\n}", "func (mr *MockAPIMockRecorder) UpdateImageStatus(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateImageStatus\", reflect.TypeOf((*MockAPI)(nil).UpdateImageStatus), arg0, arg1, arg2, arg3)\n}", "func (o *V1DownloadSummary) GetResourceUpdatedOnOk() (*time.Time, bool) {\n\tif o == nil || o.ResourceUpdatedOn == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ResourceUpdatedOn, true\n}", "func TestRunPrepareLocal(t *testing.T) {\n\tnotAvailableMsg := \"not available in local store\"\n\tfoundMsg := \"using image in local store\"\n\n\tctx := newRktRunCtx()\n\tdefer ctx.cleanup()\n\n\tcmds := []string{\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --local --mds-register=false docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify run --local --mds-register=false docker://busybox:latest\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare --local docker://busybox\", ctx.cmd()),\n\t\tfmt.Sprintf(\"%s --insecure-skip-verify prepare --local docker://busybox:latest\", ctx.cmd()),\n\t}\n\n\t// 1. Try run/prepare with the image not available in the store, should get $notAvailableMsg.\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, notAvailableMsg); err != nil {\n\t\t\tt.Fatalf(\"%q should be found\", notAvailableMsg)\n\t\t}\n\t\tchild.Wait()\n\t}\n\n\t// 2. Fetch the image\n\timportImageAndFetchHash(t, ctx, \"docker://busybox\")\n\timportImageAndFetchHash(t, ctx, \"docker://busybox:latest\")\n\n\t// 3. Try run/prepare with the image available in the store, should get $foundMsg.\n\tfor _, cmd := range cmds {\n\t\tt.Logf(\"Running test %v\", cmd)\n\n\t\tchild, err := gexpect.Spawn(cmd)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Cannot exec rkt: %v\", err)\n\t\t}\n\t\tif err := expectWithOutput(child, foundMsg); err != nil {\n\t\t\tt.Fatalf(\"%q should be found\", foundMsg)\n\t\t}\n\t\tif err := child.Wait(); err != nil {\n\t\t\tt.Fatalf(\"rkt didn't terminate correctly: %v\", err)\n\t\t}\n\t}\n}", "func isImagePushingToECRInProgress(s string) bool {\n\treturn strings.Contains(s, \"denied: Your authorization token has expired. Reauthenticate and try again.\") ||\n\t\tstrings.Contains(s, \"no basic auth credentials\")\n}", "func TestGetStatusByHostnameAtTimestamp1(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"}\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\thostname := \"yahoo.com\" // nolint\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-10T08:29:35+00:00\")\n\tnetworkChangeEvents, err := dbStorage.FetchByHostname(ctx, at, hostname)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{ //nolint\n\t\t\tnil,\n\t\t\t[]string{\"88.77.66.55\"},\n\t\t\t[]string{\"yahoo.com\"},\n\t\t\t\"rtype\",\n\t\t\t\"aid\",\n\t\t\t\"region\",\n\t\t\t\"arn\",\n\t\t\tnil,\n\t\t\tdomain.AccountOwner{},\n\t\t},\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func (au *autoupdateFixt) PreTest(ctx context.Context, s *testing.FixtTestState) {\n\ts.Log(\"Running PreTest to make sure the image is the right one\")\n\n\t// Check the image version.\n\tif version, err := ImageVersion(ctx, s.DUT(), s.RPCHint()); err != nil {\n\t\ts.Fatal(\"Failed to read DUT image version before the test: \", err)\n\t} else if version != au.originalVersion {\n\t\t// We should not run the test in the wrong version.\n\t\ts.Fatalf(\"The DUT image version before the test is not the original one; got %s, want %s\", version, au.originalVersion)\n\t} else {\n\t\ts.Logf(\"The image version in the DUT is %s\", version)\n\t}\n}", "func (suite *APIImageInspectSuite) TestImageInspectNotFound(c *check.C) {\n\tresp, err := request.Get(\"/images/\" + \"TestImageInspectNotFound\" + \"/json\")\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, resp, 404)\n}", "func TestGetStatusByIPAddressAtTimestamp2(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"}\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\t// just reuse the existing struct\n\tfakeCloudAssetChange.ARN = \"arn2\"\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-11T08:29:35+00:00\") // August 11\n\tfakeCloudAssetChange.ChangeTime = timestamp2\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tipAddress := \"88.77.66.55\"\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-10T08:29:35+00:00\") // query is for status on August 10\n\tnetworkChangeEvents, err := dbStorage.FetchByIP(ctx, at, ipAddress)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func TestExpectedImgRef(t *testing.T) {\n\n\tv, isSet := os.LookupEnv(\"DOCKERHUB_PROXY\")\n\tif isSet {\n\t\tdefer os.Setenv(\"DOCKERHUB_PROXY\", v)\n\t}\n\n\tos.Unsetenv(\"DOCKERHUB_PROXY\")\n\tassert.Equal(t,\n\t\t\"index.docker.io/library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\",\n\t\tCompleteImageRef(\"library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\"))\n\n\tos.Setenv(\"DOCKERHUB_PROXY\", \"my-dockerhub-proxy.tld/dockerhub-proxy\")\n\tassert.Equal(t,\n\t\t\"my-dockerhub-proxy.tld/dockerhub-proxy/library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\",\n\t\tCompleteImageRef(\"library/hello-world@sha256:ebf526c198a14fa138634b9746c50ec38077ec9b3986227e79eb837d26f59dc6\"))\n\tos.Unsetenv(\"DOCKERHUB_PROXY\")\n}", "func (ss *Sources) localFetch(spec v1.SourceSpec) (string, error) {\n\tp := ss.repoPath(spec)\n\terr := os.MkdirAll(p, 0750)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t//TODO prevent target directory from accumulating unused files\n\t// remove all files before copy\n\t// or\n\t// walk target dir and diff with source dir\n\n\t// Copy local dir to repo path.\n\t// Ignore .git directory.\n\terr = otia10copy.Copy(spec.URL, p, otia10copy.Options{Skip: func(src string) bool {\n\t\treturn filepath.Base(src) == \".git\"\n\t}})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"fetch: %w\", err)\n\t}\n\n\th, err := ss.hashAll(spec.URL) // TODO use hashAll(p) when dir is properly synced (see previous to do)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ts := hex.EncodeToString(h.Sum(nil))\n\n\treturn s, err\n}", "func fetch(hash string, endpoint string, original []byte, ruid string, tuid string) error {\n\tctx, sp := spancontext.StartSpan(context.Background(), \"upload-and-sync.fetch\")\n\tdefer sp.Finish()\n\n\tlog.Info(\"http get request\", \"tuid\", tuid, \"ruid\", ruid, \"endpoint\", endpoint, \"hash\", hash)\n\n\tvar tn time.Time\n\treqUri := endpoint + \"/bzz:/\" + hash + \"/\"\n\treq, _ := http.NewRequest(\"GET\", reqUri, nil)\n\n\topentracing.GlobalTracer().Inject(\n\t\tsp.Context(),\n\t\topentracing.HTTPHeaders,\n\t\topentracing.HTTPHeadersCarrier(req.Header))\n\n\ttrace := client.GetClientTrace(commandName+\" - http get\", commandName, ruid, &tn)\n\n\treq = req.WithContext(httptrace.WithClientTrace(ctx, trace))\n\ttransport := http.DefaultTransport\n\n\t//transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\n\ttn = time.Now()\n\tres, err := transport.RoundTrip(req)\n\tif err != nil {\n\t\tlog.Error(err.Error(), \"ruid\", ruid)\n\t\treturn err\n\t}\n\tlog.Info(\"http get response\", \"tuid\", tuid, \"ruid\", ruid, \"endpoint\", endpoint, \"hash\", hash, \"code\", res.StatusCode, \"len\", res.ContentLength)\n\n\tif res.StatusCode != 200 {\n\t\terr := fmt.Errorf(\"expected status code %d, got %v\", 200, res.StatusCode)\n\t\tlog.Warn(err.Error(), \"ruid\", ruid)\n\t\treturn err\n\t}\n\n\tdefer res.Body.Close()\n\n\trdigest, err := digest(res.Body)\n\tif err != nil {\n\t\tlog.Warn(err.Error(), \"ruid\", ruid)\n\t\treturn err\n\t}\n\n\tif !bytes.Equal(rdigest, original) {\n\t\terr := fmt.Errorf(\"downloaded imported file md5=%x is not the same as the generated one=%x\", rdigest, original)\n\t\tlog.Warn(err.Error(), \"ruid\", ruid)\n\t\treturn err\n\t}\n\n\tlog.Trace(\"downloaded file matches random file\", \"ruid\", ruid, \"len\", res.ContentLength)\n\n\treturn nil\n}", "func (s *DockerHubPullSuite) TestPullNonExistingImage(c *check.C) {\n\ttestRequires(c, DaemonIsLinux)\n\tfor _, e := range []struct {\n\t\tImage string\n\t\tAlias string\n\t}{\n\t\t{\"library/asdfasdf:foobar\", \"asdfasdf:foobar\"},\n\t\t{\"library/asdfasdf:foobar\", \"library/asdfasdf:foobar\"},\n\t\t{\"library/asdfasdf:latest\", \"asdfasdf\"},\n\t\t{\"library/asdfasdf:latest\", \"asdfasdf:latest\"},\n\t\t{\"library/asdfasdf:latest\", \"library/asdfasdf\"},\n\t\t{\"library/asdfasdf:latest\", \"library/asdfasdf:latest\"},\n\t} {\n\t\tout, err := s.CmdWithError(\"pull\", e.Alias)\n\t\tc.Assert(err, checker.NotNil, check.Commentf(\"expected non-zero exit status when pulling non-existing image: %s\", out))\n\t\tc.Assert(out, checker.Contains, fmt.Sprintf(\"Error: image %s not found\", e.Image), check.Commentf(\"expected image not found error messages\"))\n\t}\n}", "func (mr *MockUpstreamIntfMockRecorder) CachedRemoteDigestOfLocalHeight() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CachedRemoteDigestOfLocalHeight\", reflect.TypeOf((*MockUpstreamIntf)(nil).CachedRemoteDigestOfLocalHeight))\n}", "func TestGetStatusByHostnameAtTimestamp2(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"} // nolint\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\t// just reuse the existing struct\n\tfakeCloudAssetChange.ARN = \"arn2\"\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-11T08:29:35+00:00\") // August 11\n\tfakeCloudAssetChange.ChangeTime = timestamp2\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\thostname := \"yahoo.com\" // nolint\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-10T08:29:35+00:00\") // query is for status on August 10\n\tnetworkChangeEvents, err := dbStorage.FetchByHostname(ctx, at, hostname)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}} // nolint\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func TestRemote(t *testing.T) {\n\trnd, err := random.Image(1024, 3)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ts, err := registry.TLS(\"gcr.io\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttr := s.Client().Transport\n\n\tsrc := \"gcr.io/test/compressed\"\n\tref, err := name.ParseReference(src)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := remote.Write(ref, rnd, remote.WithTransport(tr)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\timg, err := remote.Image(ref, remote.WithTransport(tr))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := validate.Image(img); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcf, err := img.ConfigFile()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tm, err := img.Manifest()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tlayer, err := img.LayerByDiffID(cf.RootFS.DiffIDs[0])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\td, err := layer.Digest()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif diff := cmp.Diff(d, m.Layers[0].Digest); diff != \"\" {\n\t\tt.Errorf(\"mismatched digest: %v\", diff)\n\t}\n}", "func (o *ImageImportManifest) HasLocalImageId() bool {\n\tif o != nil && o.LocalImageId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (c *IloClient) GetRemoteImageStatusDell() (ImageStatusDell, error) {\n\turl := c.Hostname + \"/redfish/v1/Managers/iDRAC.Embedded.1/VirtualMedia/CD\"\n\n\tresp, _, _, err := queryData(c, \"GET\", url, nil)\n\tif err != nil {\n\t\treturn ImageStatusDell{}, err\n\t}\n\n\tvar x ImageStatusDell\n\n\tjson.Unmarshal(resp, &x)\n\n\treturn x, nil\n}", "func (r *MockRepoManager) mockUpdate() {\n\tr.mtx.Lock()\n\tdefer r.mtx.Unlock()\n\tr.updateCount++\n}", "func (f FetchStruct) FetchFromRemote() *model.FetchResult {\n\trepo := f.Repo\n\tremoteBranch := f.RemoteBranch\n\tremoteName := f.RemoteName\n\n\tlocalRefSpec := \"+refs/heads/\" + remoteBranch\n\ttargetRefPsec := \"refs/remotes/\" + remoteName + \"/\" + remoteBranch\n\ttargetRemote, _ := repo.Remotes.Lookup(remoteName)\n\n\tif targetRemote == nil {\n\t\tlogger.Log(\"Target remote is unavailable\", global.StatusError)\n\t\treturn &model.FetchResult{\n\t\t\tStatus: global.FetchFromRemoteError,\n\t\t\tFetchedItems: nil,\n\t\t}\n\t}\n\n\tvar remoteCallbackObject RemoteCallbackInterface\n\tremoteCallbackObject = &RemoteCallbackStruct{\n\t\tRepoName: f.RepoName,\n\t\tUserName: f.UserName,\n\t\tPassword: f.Password,\n\t\tSSHKeyPath: f.SSHKeyPath,\n\t\tAuthOption: f.AuthOption,\n\t}\n\n\tfetchOption := &git2go.FetchOptions{\n\t\tRemoteCallbacks: remoteCallbackObject.RemoteCallbackSelector(),\n\t}\n\tlogger.Log(fmt.Sprintf(\"Fetching changes from -> %s - %s\", remoteName, localRefSpec+\":\"+targetRefPsec), global.StatusInfo)\n\terr := targetRemote.Fetch([]string{localRefSpec + \":\" + targetRefPsec}, fetchOption, \"\")\n\n\tremoteRef, remoteRefErr := repo.References.Lookup(targetRefPsec)\n\tif remoteRefErr == nil {\n\t\tremoteCommit, _ := repo.AnnotatedCommitFromRef(remoteRef)\n\t\tif remoteCommit != nil {\n\t\t\tmergeAnalysis, _, mergeErr := repo.MergeAnalysis([]*git2go.AnnotatedCommit{remoteCommit})\n\t\t\tif mergeErr != nil {\n\t\t\t\tlogger.Log(\"Fetch failed - \"+mergeErr.Error(), global.StatusError)\n\t\t\t\treturn &model.FetchResult{\n\t\t\t\t\tStatus: global.FetchFromRemoteError,\n\t\t\t\t\tFetchedItems: nil,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif mergeAnalysis&git2go.MergeAnalysisUpToDate != 0 {\n\t\t\t\t\tlogger.Log(\"No new changes to fetch from remote\", global.StatusWarning)\n\t\t\t\t\tmsg := \"No new changes to fetch from remote\"\n\t\t\t\t\treturn &model.FetchResult{\n\t\t\t\t\t\tStatus: global.FetchNoNewChanges,\n\t\t\t\t\t\tFetchedItems: []*string{&msg},\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlogger.Log(\"Fetch failed - \"+err.Error(), global.StatusError)\n\t\t\treturn &model.FetchResult{\n\t\t\t\tStatus: global.FetchFromRemoteError,\n\t\t\t\tFetchedItems: nil,\n\t\t\t}\n\t\t} else {\n\t\t\tmsg := \"Changes fetched from remote \" + remoteName\n\t\t\tlogger.Log(msg, global.StatusInfo)\n\t\t\treturn &model.FetchResult{\n\t\t\t\tStatus: global.FetchFromRemoteSuccess,\n\t\t\t\tFetchedItems: []*string{&msg},\n\t\t\t}\n\t\t}\n\t} else {\n\t\tlogger.Log(\"Fetch failed - \"+remoteRefErr.Error(), global.StatusError)\n\t\treturn &model.FetchResult{\n\t\t\tStatus: global.FetchFromRemoteError,\n\t\t\tFetchedItems: nil,\n\t\t}\n\t}\n}", "func (ts *TeamStatus) UpdateDesiredComponentImageCreatedTime(compName, image string, desiredImageTime DesiredImageTime) {\n\tif ts.DesiredComponentImageCreatedTime == nil {\n\t\tts.DesiredComponentImageCreatedTime = make(map[string]map[string]DesiredImageTime)\n\t}\n\n\tif _, ok := ts.DesiredComponentImageCreatedTime[compName]; !ok {\n\t\tts.DesiredComponentImageCreatedTime[compName] = map[string]DesiredImageTime{\n\t\t\timage: desiredImageTime,\n\t\t}\n\t\treturn\n\t}\n\n\tdescCreatedTime := SortByCreatedTimeDESC(ts.DesiredComponentImageCreatedTime[compName])\n\tif strings.EqualFold(descCreatedTime[0].Image, image) &&\n\t\tdescCreatedTime[0].ImageTime.IsImageMissing == desiredImageTime.IsImageMissing {\n\t\treturn\n\t}\n\n\tts.DesiredComponentImageCreatedTime[compName][image] = desiredImageTime\n}", "func Fetch(fileUrl string, destFile string) {\n\n\treferenceFileIndex, checksumLookup, fileSize, _ := fetchIndex(\"somewhere\")\n\n\tblockCount := fileSize / BLOCK_SIZE\n\tif fileSize%BLOCK_SIZE != 0 {\n\t\tblockCount++\n\t}\n\n\tfs := &gosync.BasicSummary{\n\t\tChecksumIndex: referenceFileIndex,\n\t\tChecksumLookup: checksumLookup,\n\t\tBlockCount: uint(blockCount),\n\t\tBlockSize: uint(BLOCK_SIZE),\n\t\tFileSize: fileSize,\n\t}\n\n\trsyncObject, err := gosync.MakeRSync(\n\t\tdestFile,\n\t\tfileUrl,\n\t\tdestFile,\n\t\tfs,\n\t)\n\n\terr = rsyncObject.Patch()\n\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %v\\n\", err)\n\t\treturn\n\t}\n\n\terr = rsyncObject.Close()\n\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %v\\n\", err)\n\t\treturn\n\t}\n\n}", "func VerifyIntegrity(dc *dockerclient.Client, notaryServerURL, imageRef string) bool {\n\n\tif notaryServerURL == \"\" {\n\t\tlog.Println(\"Notary URL is not specified in flavor.\")\n\t\treturn false\n\t}\n\n\t// Kubelet passes along image references as sha sums\n\t// we need to convert these back to readable names to proceed further\n\tif strings.HasPrefix(imageRef, imageNameShaPrefix) {\n\t\timage, err := getImageName(dc, imageRef)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error retrieving the image name and tag.\", err)\n\t\t\treturn false\n\t\t}\n\t\timageRef = image\n\t}\n\n\t// Make sense of the image reference\n\tregistryAddr, imageName, tag, err := util.GetRegistryAddr(imageRef)\n\tif err != nil {\n\t\tlog.Println(\"Failed in parsing Registry Address from Image reference.\", err, imageRef)\n\t\treturn false\n\t}\n\n\tfinalImageRef := \"\"\n\n\t// Handling use case where the tag name is specified with the sha256sum,\n\t// with this the tag parsed by GetRegistryAddr is blank,\n\t// we pass it as is in the form: registry:[port]/imagename@sha256:shasum\n\tif strings.Contains(imageRef, imageTagShaSeparator) {\n\t\tfinalImageRef = registryAddr + \"/\" + imageName + imageTagShaSeparator + strings.Split(imageRef, imageTagShaSeparator)[1]\n\t} else {\n\t\tfinalImageRef = registryAddr + \"/\" + imageName + \":\" + tag\n\t}\n\n\ttrustPullCmd := dockerContentTrustServer + notaryServerURL + \";\" + dockerPullCmd + \" \" + finalImageRef\n\tlog.Println(\"Docker trusted pull command: \", trustPullCmd)\n\n\ttrustPullCmdOut, trustPullCmdErr := exec.Command(\"bash\", \"-c\", trustPullCmd).Output()\n\tlog.Println(\"Trusted pull returned: \", string(trustPullCmdOut))\n\n\t// Was there an error? if yes, then assume not trusted and don't allow\n\tif trustPullCmdErr != nil {\n\t\tlog.Println(\"Trust Inspect returned error: \", trustPullCmdErr.Error())\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (v *IBM) SyncImage(config *types.Config, target lepton.Provider, image string) error {\n\tlog.Warn(\"not yet implemented\")\n\treturn nil\n}", "func prepull(ctx context.Context, req types.FunctionDeployment, client *containerd.Client, alwaysPull bool) (containerd.Image, error) {\n\tstart := time.Now()\n\tr, err := reference.ParseNormalizedNamed(req.Image)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timgRef := reference.TagNameOnly(r).String()\n\n\tsnapshotter := \"\"\n\tif val, ok := os.LookupEnv(\"snapshotter\"); ok {\n\t\tsnapshotter = val\n\t}\n\n\timage, err := service.PrepareImage(ctx, client, imgRef, snapshotter, alwaysPull)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to pull image %s\", imgRef)\n\t}\n\n\tsize, _ := image.Size(ctx)\n\tlog.Printf(\"Image for: %s size: %d, took: %fs\\n\", image.Name(), size, time.Since(start).Seconds())\n\n\treturn image, nil\n}", "func (c *imageLRUCache) UpdateImage(refOrID string) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\timg, err := c.imageService.GetImage(refOrID)\n\tif err != nil {\n\t\tlogrus.Warnf(\"error getting image: %v\", err)\n\t\treturn\n\t}\n\n\tif e, ok := c.images[img.ID()]; ok {\n\t\tc.evictList.MoveToFront(e)\n\t\tlogrus.Infof(\"Updated image %s, %d/%d (%.3f)\", img.ID(), c.level, c.capacity, c.percent())\n\t\treturn\n\t}\n\tlogrus.Infof(\"Image %s is not in cache\", img.ID())\n}", "func checkImage(image liferay.Image) {\n\texists := docker.CheckDockerImageExists(image.GetFullyQualifiedName())\n\n\tif exists == false {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"image\": image.GetFullyQualifiedName(),\n\t\t}).Warn(\"Image has NOT been pulled from Docker Hub\")\n\t\treturn\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"image\": image.GetFullyQualifiedName(),\n\t}).Info(\"Image has been pulled from Docker Hub\")\n}", "func createImageMirrorForInternalImages(prefix string, ref reference.DockerImageReference, mirrored bool) ([]string, error) {\n\tsource := ref.Exact()\n\n\tinitialDefaults := k8simage.GetOriginalImageConfigs()\n\texceptions := image.Exceptions.List()\n\tdefaults := map[k8simage.ImageID]k8simage.Config{}\n\nimageLoop:\n\tfor i, config := range initialDefaults {\n\t\tfor _, exception := range exceptions {\n\t\t\tif strings.Contains(config.GetE2EImage(), exception) {\n\t\t\t\tcontinue imageLoop\n\t\t\t}\n\t\t}\n\t\tdefaults[i] = config\n\t}\n\n\tupdated := k8simage.GetMappedImageConfigs(defaults, ref.Exact())\n\topenshiftDefaults := image.OriginalImages()\n\topenshiftUpdated := image.GetMappedImages(openshiftDefaults, imagesetup.DefaultTestImageMirrorLocation)\n\n\t// if we've mirrored, then the source is going to be our repo, not upstream's\n\tif mirrored {\n\t\tbaseRef, err := reference.Parse(imagesetup.DefaultTestImageMirrorLocation)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid default mirror location: %v\", err)\n\t\t}\n\n\t\t// calculate the mapping of upstream images by setting defaults to baseRef\n\t\tcovered := sets.NewString()\n\t\tfor i, config := range updated {\n\t\t\tdefaultConfig := defaults[i]\n\t\t\tpullSpec := config.GetE2EImage()\n\t\t\tif pullSpec == defaultConfig.GetE2EImage() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif covered.Has(pullSpec) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcovered.Insert(pullSpec)\n\t\t\te2eRef, err := reference.Parse(pullSpec)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid test image: %s: %v\", pullSpec, err)\n\t\t\t}\n\t\t\tif len(e2eRef.Tag) == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid test image: %s: no tag\", pullSpec)\n\t\t\t}\n\t\t\tconfig.SetRegistry(baseRef.Registry)\n\t\t\tconfig.SetName(baseRef.RepositoryName())\n\t\t\tconfig.SetVersion(e2eRef.Tag)\n\t\t\tdefaults[i] = config\n\t\t}\n\n\t\t// calculate the mapping for openshift images by populating openshiftUpdated\n\t\topenshiftUpdated = make(map[string]string)\n\t\tsourceMappings := image.GetMappedImages(openshiftDefaults, imagesetup.DefaultTestImageMirrorLocation)\n\t\ttargetMappings := image.GetMappedImages(openshiftDefaults, source)\n\n\t\tfor from, to := range targetMappings {\n\t\t\tif from == to {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif covered.Has(to) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcovered.Insert(to)\n\t\t\tfrom := sourceMappings[from]\n\t\t\topenshiftUpdated[from] = to\n\t\t}\n\t}\n\n\tcovered := sets.NewString()\n\tvar lines []string\n\tfor i := range updated {\n\t\ta, b := defaults[i], updated[i]\n\t\tfrom, to := a.GetE2EImage(), b.GetE2EImage()\n\t\tif from == to {\n\t\t\tcontinue\n\t\t}\n\t\tif covered.Has(from) {\n\t\t\tcontinue\n\t\t}\n\t\tcovered.Insert(from)\n\t\tlines = append(lines, fmt.Sprintf(\"%s %s%s\", from, prefix, to))\n\t}\n\n\tfor from, to := range openshiftUpdated {\n\t\tif from == to {\n\t\t\tcontinue\n\t\t}\n\t\tif covered.Has(from) {\n\t\t\tcontinue\n\t\t}\n\t\tcovered.Insert(from)\n\t\tlines = append(lines, fmt.Sprintf(\"%s %s%s\", from, prefix, to))\n\t}\n\n\tsort.Strings(lines)\n\treturn lines, nil\n}", "func TestGetStatusByIPAddressAtTimestamp4(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"}\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-12T08:29:35+00:00\") // August 12\n\thostnames2 := []string{\"blarg.com\"}\n\tfakeCloudAssetChange2 := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames2, timestamp2, `arn2`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange2); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tipAddress := \"88.77.66.55\"\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-11T08:29:35+00:00\") // query is for status on August 11\n\tnetworkChangeEvents, err := dbStorage.FetchByIP(ctx, at, ipAddress)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func (f *fetcher) fetchSingleImage(img string, asc string, discover bool) (string, error) {\n\tvar (\n\t\tascFile *os.File\n\t\terr error\n\t\tlatest bool\n\t)\n\tif asc != \"\" && f.ks != nil {\n\t\tascFile, err = os.Open(asc)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to open signature file: %v\", err)\n\t\t}\n\t\tdefer ascFile.Close()\n\t}\n\n\tu, err := url.Parse(img)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"not a valid image reference (%s)\", img)\n\t}\n\n\t// if img refers to a local file, ensure the scheme is file:// and make the url path absolute\n\t_, err = os.Stat(u.Path)\n\tif err == nil {\n\t\tu.Path, err = filepath.Abs(u.Path)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"unable to get abs path: %v\", err)\n\t\t}\n\t\tu.Scheme = \"file\"\n\t} else if !os.IsNotExist(err) {\n\t\treturn \"\", fmt.Errorf(\"unable to access %q: %v\", img, err)\n\t}\n\n\tif discover && u.Scheme == \"\" {\n\t\tif app := newDiscoveryApp(img); app != nil {\n\t\t\tvar discoveryError error\n\t\t\tif !f.local {\n\t\t\t\tstderr(\"rkt: searching for app image %s\", img)\n\t\t\t\tep, err := discoverApp(app, true)\n\t\t\t\tif err != nil {\n\t\t\t\t\tdiscoveryError = err\n\t\t\t\t} else {\n\t\t\t\t\t// No specified version label, mark it as latest\n\t\t\t\t\tif _, ok := app.Labels[\"version\"]; !ok {\n\t\t\t\t\t\tlatest = true\n\t\t\t\t\t}\n\t\t\t\t\treturn f.fetchImageFromEndpoints(app.Name.String(), ep, ascFile, latest)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif discoveryError != nil {\n\t\t\t\tstderr(\"discovery failed for %q: %v. Trying to find image in the store.\", img, discoveryError)\n\t\t\t}\n\t\t\tif f.local || discoveryError != nil {\n\t\t\t\treturn f.fetchImageFromStore(img)\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch u.Scheme {\n\tcase \"http\", \"https\", \"file\":\n\tcase \"docker\":\n\t\tdockerURL := common.ParseDockerURL(path.Join(u.Host, u.Path))\n\t\tif dockerURL.Tag == \"latest\" {\n\t\t\tlatest = true\n\t\t}\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"rkt only supports http, https, docker or file URLs (%s)\", img)\n\t}\n\treturn f.fetchImageFromURL(u.String(), u.Scheme, ascFile, latest)\n}", "func (f FetchStruct) windowsFetch() *model.FetchResult {\n\tvar args []string\n\n\tremoteName := f.RemoteName\n\trepoPath := f.RepoPath\n\tbranch := f.RemoteBranch\n\n\tif remoteName == \"\" && branch == \"\" {\n\t\targs = []string{\"fetch\"}\n\t} else {\n\t\tbranchReference := branch + \":\" + branch\n\t\targs = []string{\"fetch\", remoteName, branchReference}\n\t}\n\tcmd := utils.GetGitClient(repoPath, args)\n\tcmdStr, cmdErr := cmd.Output()\n\n\tif cmdErr != nil {\n\t\tlogger.Log(fmt.Sprintf(\"Fetch failed -> %s\", cmdErr.Error()), global.StatusError)\n\n\t\treturn &model.FetchResult{\n\t\t\tStatus: global.FetchFromRemoteError,\n\t\t\tFetchedItems: nil,\n\t\t}\n\t} else {\n\t\tlogger.Log(fmt.Sprintf(\"Changes fetched from remote - %s\\n%s\", remoteName, cmdStr), global.StatusInfo)\n\n\t\tmsg := fmt.Sprintf(\"Changes fetched from remote %v\", remoteName)\n\t\treturn &model.FetchResult{\n\t\t\tStatus: \"CHANGES FETCHED FROM REMOTE\",\n\t\t\tFetchedItems: []*string{&msg},\n\t\t}\n\t}\n}", "func FetchRemoteFile() {\n\n}", "func inspectImage(appCfg config.App) {\n\timage, err := serviceRuntime.InspectImage(appCfg.Version())\n\tif err != nil {\n\t\tlog.Println(\"error inspecting image\", appCfg.Version())\n\t\treturn\n\t}\n\n\tif utils.StripSHA(image.ID) != appCfg.VersionID() {\n\t\tlog.Printf(\"warning: %s image ID does not match config\", appCfg.Name())\n\t}\n}", "func inspectImage(appCfg config.App) {\n\timage, err := serviceRuntime.InspectImage(appCfg.Version())\n\tif err != nil {\n\t\tlog.Println(\"error inspecting image\", appCfg.Version())\n\t\treturn\n\t}\n\n\tif utils.StripSHA(image.ID) != appCfg.VersionID() {\n\t\tlog.Printf(\"warning: %s image ID does not match config\", appCfg.Name())\n\t}\n}", "func (m *MockUpstreamIntf) CachedRemoteDigestOfLocalHeight() blockdigest.Digest {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CachedRemoteDigestOfLocalHeight\")\n\tret0, _ := ret[0].(blockdigest.Digest)\n\treturn ret0\n}", "func (suite *TestManagerSuite) TestManagerUpdateStatus() {\n\tl, err := suite.m.GetBy(\"d1000\", \"ruuid\", []string{v1.MimeTypeNativeReport})\n\trequire.NoError(suite.T(), err)\n\trequire.Equal(suite.T(), 1, len(l))\n\n\toldSt := l[0].Status\n\n\terr = suite.m.UpdateStatus(\"tid001\", job.SuccessStatus.String(), 10000)\n\trequire.NoError(suite.T(), err)\n\n\tl, err = suite.m.GetBy(\"d1000\", \"ruuid\", []string{v1.MimeTypeNativeReport})\n\trequire.NoError(suite.T(), err)\n\trequire.Equal(suite.T(), 1, len(l))\n\n\tassert.NotEqual(suite.T(), oldSt, l[0].Status)\n\tassert.Equal(suite.T(), job.SuccessStatus.String(), l[0].Status)\n}", "func isStatusCached(localStore *sync.Map, name, status string) bool {\n\tif v, ok := localStore.Load(name); ok && v.(string) == status {\n\t\treturn true\n\t}\n\n\tlocalStore.Store(name, status)\n\treturn false\n}", "func TestGetStatusByIPAddressAtTimestamp3(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"}\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-11T08:29:35+00:00\") // August 11\n\thostnames2 := []string{\"blarg.com\"}\n\tfakeCloudAssetChange2 := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames2, timestamp2, `arn2`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange2); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tipAddress := \"88.77.66.55\"\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-12T08:29:35+00:00\") // query is for status on August 12\n\tnetworkChangeEvents, err := dbStorage.FetchByIP(ctx, at, ipAddress)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 2, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"blarg.com\"}, \"rtype\", \"aid\", \"region\", \"arn2\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func (t *IPDCChaincode) invoke_update_status(stub shim.ChaincodeStubInterface, args []string, map_specification map[string]interface{}) pb.Response {\r\n\r\n\tfmt.Println(\"***********Entering invoke_update_status***********\")\r\n\r\n\tif len(args) < 2 {\r\n\r\n\t\tfmt.Println(\"Error: Incorrect number of arguments\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(\"Error: Incorrect number of arguments\")\r\n\t}\r\n\r\n\tvar record_specification map[string]interface{}\r\n\r\n\tvar err error\r\n\r\n\terr = json.Unmarshal([]byte(args[0]), &record_specification)\r\n\r\n\tif err != nil {\r\n\r\n\t\tfmt.Println(\"Error in format of record.\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(\"Error in format of record.\")\r\n\t}\r\n\r\n\tadditional_json, ok := map_specification[\"additional_json\"]\r\n\r\n\tif ok {\r\n\r\n\t\tadditional_json_data, ok1 := additional_json.(map[string]interface{})\r\n\r\n\t\tif ok1 {\r\n\r\n\t\t\tfor spec, _ := range additional_json_data {\r\n\r\n\t\t\t\trecord_specification[spec] = additional_json_data[spec]\r\n\t\t\t}\r\n\t\t} else {\r\n\t\t\tfmt.Println(\"Invalid additional JSON fields in specification\")\r\n\r\n\t\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\t\treturn shim.Error(\"Invalid additional JSON fields in specification\")\r\n\t\t}\r\n\t}\r\n\r\n\tvar keys_map interface{}\r\n\r\n\tvar specs map[string]interface{}\r\n\r\n\tkeys_map, error_keys_map := t.get_keys_map(stub, record_specification)\r\n\r\n\tif error_keys_map != nil {\r\n\r\n\t\tfmt.Println(error_keys_map.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(error_keys_map.Error())\r\n\t}\r\n\r\n\tspecs, ok = keys_map.(map[string]interface{})\r\n\r\n\tif !ok {\r\n\r\n\t\tfmt.Println(\"Invalid keys_map specification.\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(\"Invalid keys_map specification.\")\r\n\t}\r\n\r\n\tif specs[\"primary_key\"] == nil {\r\n\r\n\t\tfmt.Println(\"There is no primary key specification.\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(\"Error : There is no primary key specification.\")\r\n\t}\r\n\r\n\tvar pk_spec []interface{}\r\n\r\n\tpk_spec, ok = specs[\"primary_key\"].([]interface{})\r\n\r\n\tif !ok {\r\n\r\n\t\tfmt.Println(\"Error in Primary key specification.\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(\"Error in Primary key specification.\")\r\n\t}\r\n\r\n\tkey, err_key := t.createInterfacePrimaryKey(record_specification, pk_spec)\r\n\r\n\tif err_key != nil {\r\n\r\n\t\tfmt.Println(err_key.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(err_key.Error())\r\n\r\n\t}\r\n\r\n\tvar valAsBytes []byte\r\n\r\n\tvalAsBytes, err = stub.GetState(key)\r\n\r\n\tif err != nil {\r\n\r\n\t\tfmt.Println(\"Error: Failed to get state for primary key. \" + err.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(\"Error: Failed to get state for primary key. \" + err.Error())\r\n\r\n\t} else if valAsBytes == nil {\r\n\r\n\t\tfmt.Println(\"Error: No value for key : \" + key)\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(\"Error: No value for primary key.\")\r\n\r\n\t}\r\n\r\n\terr = json.Unmarshal([]byte(valAsBytes), &record_specification)\r\n\r\n\tif err != nil {\r\n\r\n\t\tfmt.Println(\"Error in format of Blockchain record\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(\"Error in format of Blockchain record\")\r\n\r\n\t}\r\n\r\n\terr_del := t.delete_composite_keys(stub, specs, record_specification, key)\r\n\r\n\tif err_del != nil {\r\n\r\n\t\tfmt.Println(\"Error while deleting composite keys: \" + err_del.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(\"Error while deleting composite keys: \" + err_del.Error())\r\n\r\n\t}\r\n\r\n\tvar to_be_updated_map map[string]interface{}\r\n\r\n\terr = json.Unmarshal([]byte(args[1]), &to_be_updated_map)\r\n\r\n\tif err != nil {\r\n\r\n\t\tfmt.Println(\"Error in format of update map\")\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(\"Error in format of update map\")\r\n\r\n\t}\r\n\r\n\tfor spec, spec_val := range to_be_updated_map {\r\n\r\n\t\tvar spec_val_string, spec_ok = spec_val.(string)\r\n\r\n\t\tif !spec_ok {\r\n\r\n\t\t\tfmt.Println(\"Unable to parse value of status update\")\r\n\r\n\t\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\t\treturn shim.Error(\"Unable to parse value of status update\")\r\n\r\n\t\t}\r\n\r\n\t\tvar val_check, val_err = t.updatestatusvaliditycheck(spec, spec_val_string, map_specification)\r\n\r\n\t\tif val_check != 0 {\r\n\r\n\t\t\tfmt.Println(val_err.Error())\r\n\r\n\t\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\t\treturn shim.Error(val_err.Error())\r\n\t\t}\r\n\r\n\t\trecord_specification[spec] = spec_val_string\r\n\t}\r\n\r\n\tvar concatenated_record_json []byte\r\n\r\n\tconcatenated_record_json, err = json.Marshal(record_specification)\r\n\r\n\tif err != nil {\r\n\r\n\t\tfmt.Println(\"Error: Unable to Marshal Concatenated Record to JSON \" + err.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(\"Error: Unable to Marshal Concatenated Record to JSON \" + err.Error())\r\n\t}\r\n\r\n\terr = stub.PutState(key, []byte(concatenated_record_json))\r\n\r\n\tif err != nil {\r\n\r\n\t\tfmt.Println(\"Failed to put state : \" + err.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(\"Failed to put state : \" + err.Error())\r\n\t}\r\n\r\n\terr = t.create_composite_keys(stub, specs, record_specification, key)\r\n\r\n\tif err != nil {\r\n\r\n\t\tfmt.Println(\"Received error while creating composite keys\" + err.Error())\r\n\r\n\t\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\t\treturn shim.Error(\"Received error while creating composite keys\" + err.Error())\r\n\t}\r\n\r\n\tfmt.Println(\"***********Exiting invoke_update_status***********\")\r\n\r\n\treturn shim.Success(nil)\r\n\r\n}", "func (r *MockRepoManager) assertUpdate() {\n\tr.mtx.RLock()\n\tdefer r.mtx.RUnlock()\n\tassert.Equal(r.t, 0, r.updateCount)\n}", "func TestGetStatusByHostnameAtTimestamp3(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"} // nolint\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-11T08:29:35+00:00\") // August 11\n\tprivateIPs2 := []string{\"4.3.2.1\"}\n\tpublicIPs2 := []string{\"8.7.6.5\"}\n\tfakeCloudAssetChange2 := newFakeCloudAssetChange(privateIPs2, publicIPs2, hostnames, timestamp2, `arn2`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange2); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\thostname := \"yahoo.com\" // nolint\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-12T08:29:35+00:00\") // query is for status on August 12\n\tnetworkChangeEvents, err := dbStorage.FetchByHostname(ctx, at, hostname)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 2, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t\tdomain.CloudAssetDetails{nil, []string{\"8.7.6.5\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn2\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func calculateChanges(x, y imageRepos) imageRepos {\n\treturn nil\n}", "func (_m *MockRepository) UpdateCCLFFileImportStatus(ctx context.Context, fileID uint, importStatus string) error {\n\tret := _m.Called(ctx, fileID, importStatus)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, uint, string) error); ok {\n\t\tr0 = rf(ctx, fileID, importStatus)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func handleModify(ctx *verifierContext, config *types.VerifyImageConfig,\n\tstatus *types.VerifyImageStatus) {\n\n\t// Note no comparison on version\n\tchanged := false\n\n\tlog.Functionf(\"handleModify(%s) for %s, config.RefCount: %d, \"+\n\t\t\"status.RefCount: %d\",\n\t\tstatus.ImageSha256, config.Name, config.RefCount,\n\t\tstatus.RefCount)\n\n\t// Always update RefCount and Expired\n\tif status.RefCount != config.RefCount {\n\t\tlog.Functionf(\"handleModify RefCount change %s from %d to %d\",\n\t\t\tconfig.Name, status.RefCount, config.RefCount)\n\t\tstatus.RefCount = config.RefCount\n\t\tchanged = true\n\t}\n\tif status.Expired != config.Expired {\n\t\tlog.Functionf(\"handleModify Expired change %s from %t to %t\",\n\t\t\tconfig.Name, status.Expired, config.Expired)\n\t\tstatus.Expired = config.Expired\n\t\tchanged = true\n\t}\n\n\tif changed {\n\t\tpublishVerifyImageStatus(ctx, status)\n\t}\n\tlog.Functionf(\"handleModify done for %s. Status.RefCount=%d, Config.RefCount:%d\",\n\t\tconfig.Name, status.RefCount, config.RefCount)\n}", "func TestCreateRetryConflictTagDiff(t *testing.T) {\n\tfirstGet := true\n\tfirstUpdate := true\n\trestInstance := &REST{\n\t\tstrategy: NewStrategy(registryhostname.TestingRegistryHostnameRetriever(nil, \"\", testDefaultRegistryURL)),\n\t\timageRegistry: &fakeImageRegistry{\n\t\t\tcreateImage: func(ctx context.Context, image *imageapi.Image) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\timageStreamRegistry: &fakeImageStreamRegistry{\n\t\t\tgetImageStream: func(ctx context.Context, id string, options *metav1.GetOptions) (*imageapi.ImageStream, error) {\n\t\t\t\t// For the first get, return a stream with a latest tag pointing to \"original\"\n\t\t\t\tif firstGet {\n\t\t\t\t\tfirstGet = false\n\t\t\t\t\tstream := validImageStream()\n\t\t\t\t\tstream.Status = imageapi.ImageStreamStatus{\n\t\t\t\t\t\tTags: map[string]imageapi.TagEventList{\n\t\t\t\t\t\t\t\"latest\": {Items: []imageapi.TagEvent{{DockerImageReference: \"localhost:5000/someproject/somerepo:original\"}}},\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\treturn stream, nil\n\t\t\t\t}\n\t\t\t\t// For subsequent gets, return a stream with the latest tag changed to \"newer\"\n\t\t\t\tstream := validImageStream()\n\t\t\t\tstream.Status = imageapi.ImageStreamStatus{\n\t\t\t\t\tTags: map[string]imageapi.TagEventList{\n\t\t\t\t\t\t\"latest\": {Items: []imageapi.TagEvent{{DockerImageReference: \"localhost:5000/someproject/somerepo:newer\"}}},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\treturn stream, nil\n\t\t\t},\n\t\t\tupdateImageStreamStatus: func(ctx context.Context, repo *imageapi.ImageStream) (*imageapi.ImageStream, error) {\n\t\t\t\t// For the first update, return a conflict so that the stream\n\t\t\t\t// get/compare is retried.\n\t\t\t\tif firstUpdate {\n\t\t\t\t\tfirstUpdate = false\n\t\t\t\t\treturn nil, errors.NewConflict(imagegroup.Resource(\"imagestreams\"), repo.Name, fmt.Errorf(\"resource modified\"))\n\t\t\t\t}\n\t\t\t\treturn repo, nil\n\t\t\t},\n\t\t},\n\t}\n\tobj, err := restInstance.Create(apirequest.NewDefaultContext(), validNewMappingWithName(), rest.ValidateAllObjectFunc, false)\n\tif err == nil {\n\t\tt.Fatalf(\"expected an error\")\n\t}\n\tif !errors.IsConflict(err) {\n\t\tt.Errorf(\"expected a conflict error, got %v\", err)\n\t}\n\tif obj != nil {\n\t\tt.Fatalf(\"expected a nil result\")\n\t}\n}", "func testFetchURL() {\n\tconst url string = \"http://gopl.io\"\n\tfmt.Printf(\"\\nbytes of url (%s): %d\\n\", url, len(myFetchURL(url)))\n}", "func TestGetStatusByHostnameAtTimestamp4(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"} // nolint\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-12T08:29:35+00:00\") // August 12\n\tprivateIPs2 := []string{\"4.3.2.1\"}\n\tpublicIPs2 := []string{\"8.7.6.5\"}\n\tfakeCloudAssetChange2 := newFakeCloudAssetChange(privateIPs2, publicIPs2, hostnames, timestamp2, `arn2`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange2); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\thostname := \"yahoo.com\" // nolint\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-11T08:29:35+00:00\") // query is for status on August 11\n\tnetworkChangeEvents, err := dbStorage.FetchByHostname(ctx, at, hostname)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func TestUpdatingCheck(t *testing.T) {\n\tr := fstest.NewRun(t)\n\tfilePath := \"sub dir/local test\"\n\tr.WriteFile(filePath, \"content\", time.Now())\n\n\tfd, err := file.Open(path.Join(r.LocalName, filePath))\n\tif err != nil {\n\t\tt.Fatalf(\"failed opening file %q: %v\", filePath, err)\n\t}\n\tdefer func() {\n\t\trequire.NoError(t, fd.Close())\n\t}()\n\n\tfi, err := fd.Stat()\n\trequire.NoError(t, err)\n\to := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}}\n\twrappedFd := readers.NewLimitedReadCloser(fd, -1)\n\thash, err := hash.NewMultiHasherTypes(hash.Supported())\n\trequire.NoError(t, err)\n\tin := localOpenFile{\n\t\to: o,\n\t\tin: wrappedFd,\n\t\thash: hash,\n\t\tfd: fd,\n\t}\n\n\tbuf := make([]byte, 1)\n\t_, err = in.Read(buf)\n\trequire.NoError(t, err)\n\n\tr.WriteFile(filePath, \"content updated\", time.Now())\n\t_, err = in.Read(buf)\n\trequire.Errorf(t, err, \"can't copy - source file is being updated\")\n\n\t// turn the checking off and try again\n\tin.o.fs.opt.NoCheckUpdated = true\n\n\tr.WriteFile(filePath, \"content updated\", time.Now())\n\t_, err = in.Read(buf)\n\trequire.NoError(t, err)\n\n}", "func updateImage(w http.ResponseWriter, req *http.Request) {\n\n\t// Manage Cors\n\tsetCors(&w)\n\tif req.Method == \"OPTIONS\" {\n\t\treturn\n\t}\n\n\t// Authenticate user\n\tclaims, err := authRequest(req)\n\tif err != nil {\n\t\tlogger.Error(\"Unauthorized request to upload sending 401: %v\", err)\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tw.Write([]byte(\"401 - Unauthorized request, ensure you sign in and obtain the jwt auth token\"))\n\t\treturn\n\t}\n\n\tvars := mux.Vars(req)\n\t// validate url parameters and retrieve imageMeta\n\timageMeta, err := validateVars(vars)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"404 - Not found\") {\n\t\t\tlogger.Error(\"image data does not exist sending 404: %v\", err)\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tw.Write([]byte(\"404 - Not found, no image with that information available\"))\n\t\t\treturn\n\t\t}\n\t\tlogger.Error(\"Failed to validate vars sending 400: %v\", err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"400 - Bad request unable to parse url parameters\"))\n\t\treturn\n\t}\n\n\t// Ensure there is no uid miss match\n\tuidVal, err := strconv.Atoi(vars[\"uid\"])\n\tif uidVal != int(imageMeta.Uid) {\n\t\tlogger.Error(\"uid miss match when attempting to modify image sending 400\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"400 - Uid mismatch ensure you are using the correct image reference\"))\n\t\treturn\n\t}\n\n\t// Ensure user has access permissions\n\tif claims.Uid != int(imageMeta.Uid) {\n\t\tlogger.Error(\"unauthorized user attempting to modify image\")\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tw.Write([]byte(\"401 - Unauthorized, you do not have permissions to modify this image\"))\n\t\treturn\n\t}\n\n\t// decode json message into string map\n\t// string map must be used to account for empty values\n\tvar newParams map[string]string\n\tdecoder := json.NewDecoder(req.Body)\n\terr = decoder.Decode(&newParams)\n\tif err != nil {\n\t\tlogger.Error(\"failed to demarshal json body sending 400: %v\", err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"400 - unable to parse json, check your request\"))\n\t\treturn\n\t}\n\n\t// if request specified a new title that is at least one character update meta\n\tif title, ok := newParams[\"title\"]; ok && len(title) > 0 {\n\t\tfileExt := strings.Split(imageMeta.Encoding, \"/\")[1]\n\n\t\t// Manually assign extension even if one is already there\n\t\timageMeta.Title = fmt.Sprintf(\"%s.%s\", strings.Split(title, \".\")[0], fileExt)\n\t}\n\n\t// if request specified a new shareable value that is valid update meta\n\tif shareable, ok := newParams[\"shareable\"]; ok {\n\t\tif shareable == \"true\" {\n\t\t\timageMeta.Shareable = true\n\t\t} else if shareable == \"false\" {\n\t\t\timageMeta.Shareable = false\n\t\t}\n\t}\n\n\terr = UpdateImageData(imageMeta)\n\tif err != nil {\n\t\tlogger.Error(\"failed to update database with new meta sending 500: %v\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(\"500 - Failed to update database, try again later\"))\n\t\treturn\n\t}\n\n\t// marshal data into json to prep the query response\n\tjs, err := json.Marshal(imageMeta)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to marshal image meta sending 500: %v\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(\"500 - failed to marshal response, try again later\"))\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(js)\n\tlogger.Info(\"Successfully returned image meta request for UID: %v\", claims.Uid)\n\n\treturn\n\n}", "func (o *Ga4ghTumourboard) GetUpdatedOk() (string, bool) {\n\tif o == nil || o.Updated == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Updated, true\n}", "func (o *PcloudImagesGetallNotFound) IsSuccess() bool {\n\treturn false\n}" ]
[ "0.70878696", "0.63165396", "0.6095992", "0.6031656", "0.6025517", "0.55363476", "0.5509565", "0.548924", "0.54534054", "0.54503196", "0.5307194", "0.52465767", "0.5211706", "0.5209486", "0.5193103", "0.5190094", "0.5184434", "0.5166309", "0.5161391", "0.51610816", "0.51217157", "0.5098703", "0.5058024", "0.50310284", "0.50279105", "0.50172865", "0.50029945", "0.5002695", "0.4967577", "0.49502578", "0.49329108", "0.49166209", "0.49125555", "0.4903887", "0.4901571", "0.4894706", "0.48873013", "0.4886608", "0.4873878", "0.48735604", "0.48704007", "0.48593864", "0.4857604", "0.48388982", "0.48372453", "0.48344374", "0.48214442", "0.4817391", "0.48132354", "0.4812025", "0.48039237", "0.47981772", "0.47974676", "0.47840047", "0.47732455", "0.47520864", "0.47459373", "0.47409952", "0.4711697", "0.47009692", "0.46965414", "0.46734232", "0.46690193", "0.46611986", "0.4645123", "0.46447375", "0.46417344", "0.46408132", "0.46405336", "0.46334022", "0.46291387", "0.46228647", "0.46228456", "0.4619646", "0.46128333", "0.4609358", "0.46061194", "0.45953697", "0.45928618", "0.4587627", "0.45775616", "0.45746368", "0.45746368", "0.45635203", "0.4560441", "0.4550859", "0.4550563", "0.4541793", "0.4535259", "0.45351163", "0.45343724", "0.45341265", "0.4533968", "0.45289147", "0.45279598", "0.45233023", "0.45202515", "0.4516111", "0.45101115", "0.45100045" ]
0.69185567
1
NewProtocol creates a new protocol
func NewProtocol( store s.Store, epochCtx *epochctx.EpochCtx, rewardingConfig indexprotocol.Rewarding, gravityChainCfg indexprotocol.GravityChain, ) *Protocol { return &Protocol{ Store: store, RewardConfig: rewardingConfig, epochCtx: epochCtx, gravityChainCfg: gravityChainCfg, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewProtocol(bc blockchain.Blockchain) *Protocol { return &Protocol{bc} }", "func NewProtocol(action string, hID int64, note string) Protocol {\n\treturn Protocol{\n\t\tTime: time.Now(),\n\t\tAction: action,\n\t\tHeroID: hID,\n\t\tNote: note,\n\t}\n}", "func (s *Service) NewProtocol(n *onet.TreeNodeInstance, conf *onet.GenericConfig) (onet.ProtocolInstance, error) {\n\tswitch n.ProtocolName() {\n\tcase protoName:\n\t\tsuite := pairing.NewSuiteBn256()\n\t\treturn simpleblscosi.NewProtocol(n, s.vf, n.Tree().ID, s.atomicCoinReserved, s.coinToAtomic, s.distances, suite)\n\tcase \"Propagate\":\n\t\treturn s.mypi(n)\n\tdefault:\n\t\treturn nil, errors.New(\"This protocol does not exist\")\n\t}\n}", "func NewProtocol() stack.NetworkProtocol {\r\n\treturn &protocol{}\r\n}", "func NewProtocol(code byte) Protocol {\n\tvar p Protocol = Protocol{\n\t\tCommand: code,\n\t\tHeader: &bytes.Buffer{},\n\t\tEncoded: nil,\n\t\tMeta: NewProtoMeta(),\n\t\tId: NewUUID(),\n\t}\n\treturn p\n}", "func NewProtocol(depositGas DepositGas, cfg genesis.Staking) (*Protocol, error) {\n\th := hash.Hash160b([]byte(protocolID))\n\taddr, err := address.FromBytes(h[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tminStakeAmount, ok := new(big.Int).SetString(cfg.MinStakeAmount, 10)\n\tif !ok {\n\t\treturn nil, ErrInvalidAmount\n\t}\n\n\tregFee, ok := new(big.Int).SetString(cfg.RegistrationConsts.Fee, 10)\n\tif !ok {\n\t\treturn nil, ErrInvalidAmount\n\t}\n\n\tminSelfStake, ok := new(big.Int).SetString(cfg.RegistrationConsts.MinSelfStake, 10)\n\tif !ok {\n\t\treturn nil, ErrInvalidAmount\n\t}\n\n\treturn &Protocol{\n\t\taddr: addr,\n\t\tconfig: Configuration{\n\t\t\tVoteWeightCalConsts: cfg.VoteWeightCalConsts,\n\t\t\tRegistrationConsts: RegistrationConsts{\n\t\t\t\tFee: regFee,\n\t\t\t\tMinSelfStake: minSelfStake,\n\t\t\t},\n\t\t\tWithdrawWaitingPeriod: cfg.WithdrawWaitingPeriod,\n\t\t\tMinStakeAmount: minStakeAmount,\n\t\t\tBootstrapCandidates: cfg.BootstrapCandidates,\n\t\t},\n\t\tdepositGas: depositGas,\n\t}, nil\n}", "func NewProtocol(rw io.ReadWriteCloser) *Protocol {\n\treturn &Protocol{rw, &chanList{chans: map[byte]chan []byte{}}}\n}", "func NewProtocol(protocol string) (Protocol, error) {\n\tswitch strings.ToLower(protocol) {\n\tcase TCP.String():\n\t\treturn TCP, nil\n\tcase HTTP.String():\n\t\treturn HTTP, nil\n\tcase HTTPS.String():\n\t\treturn HTTPS, nil\n\t}\n\treturn 0, fmt.Errorf(\"protocol %s not support\", protocol)\n}", "func NewProtocol(idx *indexservice.Indexer, cfg indexprotocol.HermesConfig) *Protocol {\n\treturn &Protocol{\n\t\tindexer: idx,\n\t\thermesConfig: cfg,\n\t}\n}", "func NewProtocol(depositGas DepositGas, cfg genesis.Staking, candBucketsIndexer *CandidatesBucketsIndexer, reviseHeights ...uint64) (*Protocol, error) {\n\th := hash.Hash160b([]byte(protocolID))\n\taddr, err := address.FromBytes(h[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tminStakeAmount, ok := new(big.Int).SetString(cfg.MinStakeAmount, 10)\n\tif !ok {\n\t\treturn nil, ErrInvalidAmount\n\t}\n\n\tregFee, ok := new(big.Int).SetString(cfg.RegistrationConsts.Fee, 10)\n\tif !ok {\n\t\treturn nil, ErrInvalidAmount\n\t}\n\n\tminSelfStake, ok := new(big.Int).SetString(cfg.RegistrationConsts.MinSelfStake, 10)\n\tif !ok {\n\t\treturn nil, ErrInvalidAmount\n\t}\n\n\t// new vote reviser, revise ate greenland\n\tvoteReviser := NewVoteReviser(cfg.VoteWeightCalConsts, reviseHeights...)\n\n\treturn &Protocol{\n\t\taddr: addr,\n\t\tconfig: Configuration{\n\t\t\tVoteWeightCalConsts: cfg.VoteWeightCalConsts,\n\t\t\tRegistrationConsts: RegistrationConsts{\n\t\t\t\tFee: regFee,\n\t\t\t\tMinSelfStake: minSelfStake,\n\t\t\t},\n\t\t\tWithdrawWaitingPeriod: cfg.WithdrawWaitingPeriod,\n\t\t\tMinStakeAmount: minStakeAmount,\n\t\t\tBootstrapCandidates: cfg.BootstrapCandidates,\n\t\t},\n\t\tdepositGas: depositGas,\n\t\tcandBucketsIndexer: candBucketsIndexer,\n\t\tvoteReviser: voteReviser,\n\t}, nil\n}", "func NewProtocol() protocol.Protocol {\n\ts := &socket{\n\t\tpipes: make(map[uint32]*pipe),\n\t\tsurveys: make(map[uint32]*context),\n\t\tctxs: make(map[*context]struct{}),\n\t\tsendQLen: defaultQLen,\n\t\tnextID: uint32(time.Now().UnixNano()), // quasi-random\n\t}\n\ts.master = &context{\n\t\ts: s,\n\t\trecvq: make(chan *protocol.Message, defaultQLen),\n\t\tcloseq: make(chan struct{}),\n\t\trecvQLen: defaultQLen,\n\t\tsurvExpire: defaultSurveyTime,\n\t}\n\treturn s\n}", "func NewProtocol() protocol.Protocol {\n\ts := &socket{\n\t\tProtocol: xpub.NewProtocol(),\n\t}\n\treturn s\n}", "func NewProtocol(responses chan *Response) *Protocol {\n\tp := &Protocol{\n\t\tHostname: \"mailhog.example\",\n\t\tIdent: \"MailHog\",\n\t\tRevision: \"IMAP4rev1\",\n\t\tState: INVALID,\n\t\tMaximumLineLength: 8000,\n\t\tResponses: responses,\n\n\t\tCapabilities: map[string]Capability{\n\t\t\t\"IMAP4rev1\": &GenericCapability{name: \"IMAP4rev1\"},\n\t\t\t\"STARTTLS\": &GenericCapability{name: \"STARTTLS\", skipIfTLS: true},\n\t\t\t\"LOGINDISABLED\": &GenericCapability{name: \"IMAP4rev1\"},\n\t\t\t\"AUTH=PLAIN\": &GenericCapability{name: \"AUTH=PLAIN\", requireTLS: true},\n\t\t},\n\t}\n\treturn p\n}", "func New(store dapp.Store) *Protocol {\n\treturn &Protocol{store: store}\n}", "func (s *ServiceState) NewProtocol(tn *onet.TreeNodeInstance, conf *onet.GenericConfig) (onet.ProtocolInstance, error) {\n\n\tpi, err := dissent_protocol.NewDissentProtocol(tn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\twrapper := pi.(*dissent_protocol.DissentProtocol)\n\ts.setConfigToDissentProtocol(wrapper)\n\ts.DissentProtocol = wrapper\n\n\treturn pi, nil\n}", "func (o *Overlay) CreateProtocol(name string, t *Tree, sid ServiceID) (ProtocolInstance, error) {\n\tio := o.protoIO.getByName(name)\n\ttni := o.NewTreeNodeInstanceFromService(t, t.Root, ProtocolNameToID(name), sid, io)\n\tpi, err := o.server.protocolInstantiate(tni.token.ProtoID, tni)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = o.RegisterProtocolInstance(pi); err != nil {\n\t\treturn nil, err\n\t}\n\tgo pi.Dispatch()\n\treturn pi, err\n}", "func NewProtocol(conn *net.TCPConn) (Protocol, error) {\r\n\tr := &protocol{}\r\n\r\n\tr.conn = NewSocket(conn)\r\n\tr.chunkStreams = map[int]*ChunkStream{}\r\n\tr.buffer = NewRtmpBuffer(r.conn)\r\n\tr.handshake = &Handshake{}\r\n\r\n\tr.inChunkSize = RTMP_DEFAULT_CHUNK_SIZE\r\n\tr.outChunkSize = r.inChunkSize\r\n\tr.outHeaderFmt0 = NewRtmpStream(make([]byte, RTMP_MAX_FMT0_HEADER_SIZE))\r\n\tr.outHeaderFmt3 = NewRtmpStream(make([]byte, RTMP_MAX_FMT3_HEADER_SIZE))\r\n\r\n\t// r.msg_in_lock = &sync.Mutex{}\r\n\t// r.msg_out_lock = &sync.Mutex{}\r\n\tr.closeChan = make(chan struct{})\r\n\tr.msg_in_queue = make(chan *Message, RTMP_MSG_CHANNEL_BUFFER)\r\n\tr.msg_out_queue = make(chan *Message, RTMP_MSG_CHANNEL_BUFFER)\r\n\r\n\trand.Seed(time.Now().UnixNano())\r\n\r\n\treturn r, nil\r\n}", "func NewProtocol(conn io.ReadWriteCloser, handler ProtocolHandler, done <-chan struct{}) (*Protocol, error) {\n\tif conn == nil {\n\t\treturn nil, fmt.Errorf(\"Cannot create protocol in nil connection\")\n\t} else if handler == nil {\n\t\treturn nil, fmt.Errorf(\"Cannot create protocol with nil handler\")\n\t} else if done == nil {\n\t\treturn nil, fmt.Errorf(\"Cannot create protocol with nil done channel\")\n\t}\n\tmessages := make(chan Message)\n\tproto := &Protocol{\n\t\tr: gob.NewDecoder(conn),\n\t\tw: gob.NewEncoder(conn),\n\t\trecieved: messages,\n\t\tdone: done,\n\t\thandler: handler,\n\t}\n\tgo func() {\n\t\tdefer close(proto.recieved)\n\t\tfor {\n\t\t\tvar message Message\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tif err := proto.r.Decode(&message); err == nil {\n\t\t\t\t\tproto.recieved <- message\n\t\t\t\t} else if err == io.EOF {\n\t\t\t\t\tlog.Println(\"Disconnected: EOF\")\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\tgo func() {\n\t\tfor msg := range proto.recieved {\n\t\t\tswitch msg.Type {\n\t\t\tcase QUIT:\n\t\t\t\tproto.handler.HandleQuit()\n\t\t\tcase START_DECK:\n\t\t\t\tproto.handler.HandleStartDeck(msg.Deck, msg.Value)\n\t\t\tcase END_DECK:\n\t\t\t\tproto.handler.HandleEndDeck(msg.Deck)\n\t\t\tcase DECRYPT_CARD:\n\t\t\t\tproto.handler.HandleDecryptCard(msg.Index)\n\t\t\tcase ONE_CIPHER_CARD:\n\t\t\t\tproto.handler.HandleDecryptedCard(msg.Index, msg.Value)\n\t\t\tcase APP_MESSAGE:\n\t\t\t\tproto.handler.HandleAppMessage(msg.Data)\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}()\n\treturn proto, nil\n}", "func MakeProtocol(fluidBackup *FluidBackup, port int) *Protocol {\n\tthis := new(Protocol)\n\tthis.port = port\n\n\tthis.rpc = rpc.NewServer()\n\tthis.rpc.Register(this)\n\tl, e := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", port))\n\tthis.l = l\n\tif e != nil {\n\t\tLog.Error.Printf(\"Error while initializing RPC handler: %s\", e.Error())\n\t\treturn nil\n\t}\n\tgo func() {\n\t\tfor !fluidBackup.Stopping() {\n\t\t\tconn, _ := this.l.Accept()\n\n\t\t\tif conn != nil {\n\t\t\t\tthis.rpc.ServeConn(conn)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn this\n}", "func NewProtocol() protocol.Protocol {\n\ts := &socket{\n\t\tcloseQ: make(chan struct{}),\n\t\tnoPeerQ: make(chan struct{}),\n\t\tsendQ: make(chan *protocol.Message, defaultQLen),\n\t\tsendQLen: defaultQLen,\n\t\tpipes: make(map[uint32]*pipe),\n\t}\n\ts.cv = sync.NewCond(s)\n\tgo s.sender()\n\treturn s\n}", "func NewSetupProtocol(rwc io.ReadWriteCloser) *Protocol {\n\treturn &Protocol{rwc}\n}", "func (proto *Protocol) CREATE(command *Command) {\n\n}", "func New(ctx context.Context, opts ...Option) (*Protocol, error) {\n\tt := &Protocol{}\n\tt.incoming = make(chan pubsub.Message)\n\tif err := t.applyOptions(opts...); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif t.client == nil {\n\t\t// Auth to pubsub.\n\t\tclient, err := pubsub.NewClient(ctx, t.projectID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Success.\n\t\tt.client = client\n\t}\n\n\tif t.connectionsBySubscription == nil {\n\t\tt.connectionsBySubscription = make(map[string]*internal.Connection)\n\t}\n\n\tif t.connectionsByTopic == nil {\n\t\tt.connectionsByTopic = make(map[string]*internal.Connection)\n\t}\n\treturn t, nil\n}", "func NewProtocol(numCandidateDelegates, numDelegates, numSubEpochs uint64, opts ...Option) *Protocol {\n\tif numCandidateDelegates < numDelegates {\n\t\tnumCandidateDelegates = numDelegates\n\t}\n\tp := &Protocol{\n\t\tnumCandidateDelegates: numCandidateDelegates,\n\t\tnumDelegates: numDelegates,\n\t\tnumSubEpochs: numSubEpochs,\n\t}\n\tfor _, opt := range opts {\n\t\tif err := opt(p); err != nil {\n\t\t\tlog.S().Panicf(\"Failed to execute epoch protocol creation option %p: %v\", opt, err)\n\t\t}\n\t}\n\treturn p\n}", "func New(opts ...ProtocolOption) *Protocol {\n\tp := &Protocol{\n\t\tpingTimeout: 3 * time.Second,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\n\treturn p\n}", "func CreateProtocolInstance(action ActionProps, filter FilteringProps) *ProtocolInstance {\r\n\treturn &ProtocolInstance{\r\n\t\tAction: action,\r\n\t\tFilter: filter,\r\n\t}\r\n}", "func NewJSONProtocol() *JSONProtocol {\n\treturn &JSONProtocol{\n\t\ttypes: make(map[string]reflect.Type),\n\t\tnames: make(map[reflect.Type]string),\n\t}\n}", "func NewProtocolf(action string, hID int64, note string, a ...interface{}) Protocol {\n\treturn NewProtocol(action, hID, fmt.Sprintf(note, a...))\n}", "func newProtobuf(typeName string) *protobuf {\n\treturn &protobuf{\n\t\tprotobufFieldSequence: newProtobufFieldSequence(false),\n\t\tTypeName: typeName,\n\t}\n}", "func (s *Service) NewDefaultProtocol(n *onet.TreeNodeInstance) (onet.ProtocolInstance, error) {\n\tsuite := pairing.NewSuiteBn256()\n\treturn simpleblscosi.NewProtocol(n, s.vf, n.Tree().ID, s.atomicCoinReserved, s.coinToAtomic, s.distances, suite)\n}", "func newProtocolACKN(conn tcp.Connection, bodyLength uint32) (tcp.ProtocolMessage, error) {\n\tif bodyLength != 20 {\n\t\treturn nil, fmt.Errorf(\"protocol error: Corrupt message (ACKN size %d != 20)\", bodyLength)\n\t}\n\n\tmessage := make([]byte, 20)\n\tif _, err := conn.Read(message); err != nil {\n\t\treturn nil, err\n\t}\n\n\tnonce := string(message[:16])\n\tsequence := binary.BigEndian.Uint32(message[16:])\n\treturn &protocolACKN{ctx: conn.Context(), nonce: &nonce, sequence: sequence}, nil\n}", "func NewDefaultProtocol(n *onet.TreeNodeInstance) (onet.ProtocolInstance, error) {\n\tvf := func(a, b []byte) bool { return true }\n\treturn NewBlsCosi(n, vf, DefaultSubProtocolName, pairing.NewSuiteBn256())\n}", "func (b *Builder) RegisterProtocol(rp *rolldpos.Protocol) *Builder {\n\tb.rp = rp\n\treturn b\n}", "func NewProtocolException(name string, message string, params ...interface{}) error {\n\terr := ProtocolException{\n\t\tName: name,\n\t\tMessage: fmt.Sprintf(message, params...),\n\t\tStack: stack.Callers(1),\n\t}\n\treturn err\n}", "func NewICEProtocol(raw string) (ICEProtocol, error) {\n\tswitch {\n\tcase strings.EqualFold(iceProtocolUDPStr, raw):\n\t\treturn ICEProtocolUDP, nil\n\tcase strings.EqualFold(iceProtocolTCPStr, raw):\n\t\treturn ICEProtocolTCP, nil\n\tdefault:\n\t\treturn ICEProtocol(Unknown), fmt.Errorf(\"%w: %s\", errICEProtocolUnknown, raw)\n\t}\n}", "func NewProxyProtocol(name string) (deployment *appsv1.Deployment, svc *corev1.Service) {\n\tdeployment = &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: pointer.Int32Ptr(1),\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\"app\": name,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"app\": name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\tImage: \"quay.io/prometherion/proxy-protocol-app:latest\",\n\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"http\",\n\t\t\t\t\t\t\t\t\tContainerPort: 8080,\n\t\t\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tsvc = &corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tSpec: corev1.ServiceSpec{\n\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tPort: 8080,\n\t\t\t\t\tTargetPort: intstr.FromString(\"http\"),\n\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\tName: \"http\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: map[string]string{\n\t\t\t\t\"app\": name,\n\t\t\t},\n\t\t\tType: corev1.ServiceTypeClusterIP,\n\t\t},\n\t}\n\treturn\n}", "func RegisterProtocol(messageProtocolID uint16, p Protocol) {\n\tprotocolRegistry[messageProtocolID] = p\n}", "func (s *WebsocketServer) addProtocol(proto string, payloadType int, serializer serialize.Serializer) error {\n\tif payloadType != websocket.TextMessage && payloadType != websocket.BinaryMessage {\n\t\treturn fmt.Errorf(\"invalid payload type: %d\", payloadType)\n\t}\n\tif _, ok := s.protocols[proto]; ok {\n\t\treturn errors.New(\"protocol already registered: \" + proto)\n\t}\n\ts.protocols[proto] = protocol{payloadType, serializer}\n\ts.Upgrader.Subprotocols = append(s.Upgrader.Subprotocols, proto)\n\treturn nil\n}", "func (o *Overlay) StartProtocol(name string, t *Tree, sid ServiceID) (ProtocolInstance, error) {\n\tpi, err := o.CreateProtocol(name, t, sid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgo func() {\n\t\terr := pi.Start()\n\t\tif err != nil {\n\t\t\tlog.Error(\"Error while starting:\", err)\n\t\t}\n\t}()\n\treturn pi, err\n}", "func NewTBinaryProtocol(t TTransport, cfg *TConfiguration) TProtocol {\n\tp := &tBinaryProtocol{cfg: cfg.NonNil()}\n\tp.TExtraTransport = NewTExtraTransport(t, p.cfg)\n\treturn p\n}", "func newConstProto() *Instruction {\n\treturn &Instruction{\n\t\tType: ConstProtoInst,\n\t\tName: \"ConstProto\",\n\t}\n}", "func RegisterProtocol(name string, v interface{}) {\n\troot.Protocols[name] = v\n}", "func (p *Protocol) Protocol() noise.Protocol {\n\treturn noise.Protocol{\n\t\tBind: p.Bind,\n\t\tOnPeerConnected: p.OnPeerConnected,\n\t\tOnPingFailed: p.OnPingFailed,\n\t\tOnMessageSent: p.OnMessageSent,\n\t\tOnMessageRecv: p.OnMessageRecv,\n\t}\n}", "func NewTBinaryProtocolFactory(cfg *TConfiguration) TProtocolFactory {\n\treturn &tProtocolFactory{cfg, NewTBinaryProtocol}\n}", "func (*GenericFramework) NewMessage(ctx *MessageContext) {}", "func NewProtocols(in *yaml.Node, context *compiler.Context) (*Protocols, error) {\n\terrors := make([]error, 0)\n\tx := &Protocols{}\n\tm, ok := compiler.UnpackMap(in)\n\tif !ok {\n\t\tmessage := fmt.Sprintf(\"has unexpected value: %+v (%T)\", in, in)\n\t\terrors = append(errors, compiler.NewError(context, message))\n\t} else {\n\t\tallowedKeys := []string{\"resumable\", \"simple\"}\n\t\tvar allowedPatterns []*regexp.Regexp\n\t\tinvalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)\n\t\tif len(invalidKeys) > 0 {\n\t\t\tmessage := fmt.Sprintf(\"has invalid %s: %+v\", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, \", \"))\n\t\t\terrors = append(errors, compiler.NewError(context, message))\n\t\t}\n\t\t// Simple simple = 1;\n\t\tv1 := compiler.MapValueForKey(m, \"simple\")\n\t\tif v1 != nil {\n\t\t\tvar err error\n\t\t\tx.Simple, err = NewSimple(v1, compiler.NewContext(\"simple\", v1, context))\n\t\t\tif err != nil {\n\t\t\t\terrors = append(errors, err)\n\t\t\t}\n\t\t}\n\t\t// Resumable resumable = 2;\n\t\tv2 := compiler.MapValueForKey(m, \"resumable\")\n\t\tif v2 != nil {\n\t\t\tvar err error\n\t\t\tx.Resumable, err = NewResumable(v2, compiler.NewContext(\"resumable\", v2, context))\n\t\t\tif err != nil {\n\t\t\t\terrors = append(errors, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn x, compiler.NewErrorGroupOrNil(errors)\n}", "func NewProtocolServer(r io.Reader, w io.Writer, s Store) *ProtocolServer {\n\treturn &ProtocolServer{\n\t\tp: NewProtocol(r, w),\n\t\tstore: s,\n\t}\n}", "func NewProtocolReader(data []byte) *ProtocolReader {\n\treturn &ProtocolReader{\n\t\tbuf: data,\n\t\toff: 0,\n\t\tError: nil,\n\t}\n}", "func ProtocolFromString(protocol string) Protocol {\n\treturn Protocol(strings.ToLower(protocol))\n}", "func newPeer(conn *peerConn, ps []Protocol) Peer {\n\tp := &peer{\n\t\tconn: conn,\n\t\tid: conn.id,\n\t\trw: conn.rw,\n\t\tlogger: conn.logger,\n\t\tps: ps,\n\t\tclose: make(chan struct{}),\n\t\tquit: make(chan struct{}),\n\t\tpsCh: make(chan MessageReader),\n\t}\n\tnow := time.Now()\n\tp.lastTime = now.Unix()\n\treturn p\n}", "func NewRTGProtocol(n int, q, p []uint64, sigma float64) *RTGProtocol {\n\trtg := new(RTGProtocol)\n\trtg.ringQModCount = len(q)\n\trtg.ringPModulusBigint = big.NewInt(1)\n\tfor _, pi := range p {\n\t\trtg.ringPModulusBigint.Mul(rtg.ringPModulusBigint, new(big.Int).SetUint64(pi))\n\t}\n\trtg.alpha = len(p)\n\tif rtg.alpha != 0 {\n\t\trtg.beta = int(math.Ceil(float64(len(q)) / float64(len(p))))\n\t} else {\n\t\trtg.beta = 1\n\t}\n\tvar err error\n\trtg.ringQP, err = ring.NewRing(n, append(q, p...))\n\tif err != nil {\n\t\tpanic(err) // TODO error\n\t}\n\n\tprng, err := utils.NewPRNG()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\trtg.gaussianSampler = ring.NewGaussianSampler(prng)\n\trtg.sigma = sigma\n\n\trtg.tmpPoly = [2]*ring.Poly{rtg.ringQP.NewPoly(), rtg.ringQP.NewPoly()}\n\n\treturn rtg\n}", "func newFindNodeProtocol(service service.Service, rt RoutingTable) *findNodeProtocol {\n\n\tp := &findNodeProtocol{\n\t\trt: rt,\n\t\tpending: make(map[crypto.UUID]chan findNodeResults),\n\t\tingressChannel: service.RegisterDirectProtocol(protocol),\n\t\tservice: service,\n\t}\n\n\tif srv, ok := service.(localService); ok {\n\t\tp.log = srv.LocalNode().Log\n\t} else {\n\t\tp.log = log.AppLog\n\t}\n\n\tgo p.readLoop()\n\n\treturn p\n}", "func (c *S2SRegistrant) NewRequestProtocol() S2SRequestProtocol {\n\treturn S2SRequestProtocol{clients: c, CallOption: c.CallOption}\n}", "func NewUtilityProtocol(server *nex.Server) *UtilityProtocol {\n\tutilityProtocol := &UtilityProtocol{server: server}\n\n\tutilityProtocol.Setup()\n\n\treturn utilityProtocol\n}", "func (p OpenFlow10Protocol) NewHello(versionBitmap uint32) goloxi.Message {\n\treturn of10.NewHello()\n}", "func GetProtocol(name string) *Protocol {\n\treturn objc_getProtocol(name)\n}", "func init() {\n\txprotocol.RegisterProtocol(ProtocolName, &boltv2Protocol{})\n}", "func NewPermissionedProtocol(crypto crypto.ContextFactory, keychain keychain.KeyChain,\n\tsc smartcontractengine.SCContextFactory, broadcastAnswer bool) Protocol {\n\n\tp := permissionedProtocol{\n\t\trequests: sync.Map{}, //make(map[string]*request),\n\t\tcrypto: crypto,\n\t\tkeychain: keychain,\n\t\tsc: sc,\n\t\tbroadcastAnswer: broadcastAnswer,\n\t\tdeleteStaleReqCh: make(chan string, 1),\n\t}\n\n\tgo deleteNoneCompleteRequests(&p.requests, p.deleteStaleReqCh, context.TODO())\n\n\treturn &p\n}", "func NewConn(protoName string, target string, id int) (*Conn, error) {\n if target == \"\" {\n return nil, fmt.Errorf(\"No target given\")\n }\n\n c := &Conn {\n\n }\n\n if proto, exists := protocols[protoName]; !exists {\n return nil, fmt.Errorf(\"protocol not registered: %v\", protoName)\n } else {\n c.proto = proto\n }\n\n if ipAddr, err := net.ResolveIPAddr(c.proto.resolveStr, target); err != nil {\n return nil, fmt.Errorf(\"net.ResolveIPAddr %v:%v: %v\", c.proto.resolveStr, target, err)\n } else {\n // unprivileged icmp mode uses SOCK_DGRAM\n c.targetAddr = &net.UDPAddr{IP: ipAddr.IP, Zone: ipAddr.Zone}\n }\n\n if icmpConn, err := icmp.ListenPacket(c.proto.listenStr, \"\"); err != nil {\n return nil, fmt.Errorf(\"icmp.ListenPacket %v: %v\", c.proto.listenStr, err)\n } else {\n c.icmpConn = icmpConn\n }\n\n // store local address\n if _, id, err := decodeAddr(c.icmpConn.LocalAddr()); err != nil {\n return nil, fmt.Errorf(\"Unkonwn icmpConn.LocalAddr(): %v\", err)\n } else {\n c.id = id\n }\n\n return c, nil\n}", "func (c *InputService22ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (h *BasicHost) RegisterProtocol(\n\tpid common.Pid,\n\thandler ProtocolHandler,\n\tadapters ...ProtocolAdapter,\n) {\n\th.host.SetStreamHandler(pid.ProtocolID(), func(stream net.Stream) {\n\t\tdefer stream.Reset()\n\t\tmsg, err := common.ReadMessage(stream)\n\t\tif err != nil {\n\t\t\tlog.Println(\"failed to read message from stream :\", err)\n\t\t\treturn\n\t\t}\n\t\tgo handler.Handle(adapters...)(msg)\n\t})\n}", "func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewRequestWithProtocol(protocol string, db *gorm.DB) (*Request, error) {\n\tvar (\n\t\treq = &Request{\n\t\t\tProtocol: protocol,\n\t\t}\n\t\terr error\n\t)\n\terr = db.Create(req).Error\n\treturn req, err\n}", "func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewAggregationProtocol(n *onet.TreeNodeInstance) (onet.ProtocolInstance, error) {\n\t//initialize the local sum to 0 and channel\n\tst := &AggregationProtocol{\n\t\tTreeNodeInstance: n,\n\t\tFeedback: make(chan []*big.Int),\n\t\tSum: make([]*big.Int, 0),\n\t}\n\n\t//register the channel for announce\n\terr := st.RegisterChannel(&st.AnnounceChannel)\n\tif err != nil {\n\t\treturn nil, errors.New(\"couldn't register Announce data channel: \" + err.Error())\n\t}\n\n\t//register the channel for child response\n\terr = st.RegisterChannel(&st.ChildDataChannel)\n\tif err != nil {\n\t\treturn nil, errors.New(\"couldn't register Child Response channel\" + err.Error())\n\t}\n\n\treturn st, nil\n}", "func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewPermuteProtocol(params *bfv.Parameters) (refreshProtocol *PermuteProtocol) {\n\n\tcontext := newDbfvContext(params)\n\n\trefreshProtocol = new(PermuteProtocol)\n\trefreshProtocol.context = context\n\trefreshProtocol.tmp1 = context.ringQP.NewPoly()\n\trefreshProtocol.tmp2 = context.ringQP.NewPoly()\n\trefreshProtocol.hP = context.ringP.NewPoly()\n\n\trefreshProtocol.baseconverter = ring.NewFastBasisExtender(context.ringQ, context.ringP)\n\n\tvar m, pos, index1, index2 int\n\n\tindexMatrix := make([]uint64, params.N())\n\n\tlogN := params.LogN()\n\n\trowSize := params.N() >> 1\n\tm = (params.N() << 1)\n\tpos = 1\n\n\tfor i := 0; i < rowSize; i++ {\n\n\t\tindex1 = (pos - 1) >> 1\n\t\tindex2 = (m - pos - 1) >> 1\n\n\t\tindexMatrix[i] = utils.BitReverse64(uint64(index1), uint64(logN))\n\t\tindexMatrix[i|rowSize] = utils.BitReverse64(uint64(index2), uint64(logN))\n\n\t\tpos *= bfv.GaloisGen\n\t\tpos &= (m - 1)\n\t}\n\n\trefreshProtocol.indexMatrix = indexMatrix\n\trefreshProtocol.scaler = ring.NewRNSScaler(params.T(), context.ringQ)\n\n\tprng, err := utils.NewPRNG()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\trefreshProtocol.gaussianSampler = ring.NewGaussianSampler(prng)\n\trefreshProtocol.sigma = params.Sigma()\n\trefreshProtocol.uniformSampler = ring.NewUniformSampler(prng, context.ringT)\n\n\treturn\n}", "func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *Client) RunProtocol(ctx context.Context, proto *p2p.Protocol) error {\n\ttopicobj := pss.BytesToTopic([]byte(fmt.Sprintf(\"%s:%d\", proto.Name, proto.Version)))\n\ttopichex := topicobj.String()\n\tmsgC := make(chan pss.APIMsg)\n\tc.peerPool[topicobj] = make(map[string]*pssRPCRW)\n\tsub, err := c.rpc.Subscribe(ctx, \"pss\", msgC, \"receive\", topichex, false, false)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"pss event subscription failed: %v\", err)\n\t}\n\tc.subs = append(c.subs, sub)\n\terr = c.rpc.Call(nil, \"pss_addHandshake\", topichex)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"pss handshake activation failed: %v\", err)\n\t}\n\n\t// dispatch incoming messages\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase msg := <-msgC:\n\t\t\t\t// we only allow sym msgs here\n\t\t\t\tif msg.Asymmetric {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// we get passed the symkeyid\n\t\t\t\t// need the symkey itself to resolve to peer's pubkey\n\t\t\t\tvar pubkeyid string\n\t\t\t\terr = c.rpc.Call(&pubkeyid, \"pss_getHandshakePublicKey\", msg.Key)\n\t\t\t\tif err != nil || pubkeyid == \"\" {\n\t\t\t\t\tlog.Trace(\"proto err or no pubkey\", \"err\", err, \"symkeyid\", msg.Key)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// if we don't have the peer on this protocol already, create it\n\t\t\t\t// this is more or less the same as AddPssPeer, less the handshake initiation\n\t\t\t\tif c.peerPool[topicobj][pubkeyid] == nil {\n\t\t\t\t\tvar addrhex string\n\t\t\t\t\terr := c.rpc.Call(&addrhex, \"pss_getAddress\", topichex, false, msg.Key)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Trace(err.Error())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\taddrbytes, err := hexutil.Decode(addrhex)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Trace(err.Error())\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\taddr := pss.PssAddress(addrbytes)\n\t\t\t\t\trw, err := c.newpssRPCRW(pubkeyid, addr, topicobj)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tc.peerPool[topicobj][pubkeyid] = rw\n\t\t\t\t\tp := p2p.NewPeer(enode.ID{}, fmt.Sprintf(\"%v\", addr), []p2p.Cap{})\n\t\t\t\t\tgo proto.Run(p, c.peerPool[topicobj][pubkeyid])\n\t\t\t\t}\n\t\t\t\tgo func() {\n\t\t\t\t\tc.peerPool[topicobj][pubkeyid].msgC <- msg.Msg\n\t\t\t\t}()\n\t\t\tcase <-c.quitC:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tc.protos[topicobj] = proto\n\treturn nil\n}", "func execNewConn(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret := textproto.NewConn(args[0].(io.ReadWriteCloser))\n\tp.Ret(1, ret)\n}", "func New(device, addr string, port int) (*Transport, error) {\n return &Transport{\n device: device,\n addr: addr,\n port: port,\n sessions: make(map[string]*ssh.Session, 800),\n clients: make(map[string]*ssh.Client, 100),\n }, nil\n}", "func (c *InputService18ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService12ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService16ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func New(c *drycc.Client, appID string, name string, port int, protocol string) error {\n\tu := fmt.Sprintf(\"/v2/apps/%s/gateways/\", appID)\n\n\treq := api.GatewayCreateRequest{Name: name, Port: port, Protocol: protocol}\n\n\tbody, err := json.Marshal(req)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tres, reqErr := c.Request(\"POST\", u, body)\n\tif reqErr != nil && !drycc.IsErrAPIMismatch(reqErr) {\n\t\treturn reqErr\n\t}\n\tdefer res.Body.Close()\n\n\treturn reqErr\n}", "func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func InstallProtocol(scheme string, service common.GitUploadPackService) {\n\tif service == nil {\n\t\tpanic(\"nil service\")\n\t}\n\n\tKnownProtocols[scheme] = service\n}", "func (t Type) New() (Packet, error) {\r\n\tswitch t {\r\n\tcase CONNECT:\r\n\t\treturn NewConnectPacket(), nil\r\n\tcase CONNACK:\r\n\t\treturn NewConnackPacket(), nil\r\n\tcase PUBLISH:\r\n\t\treturn NewPublishPacket(), nil\r\n\tcase PUBACK:\r\n\t\treturn NewPubackPacket(), nil\r\n\tcase PUBREC:\r\n\t\treturn NewPubrecPacket(), nil\r\n\tcase PUBREL:\r\n\t\treturn NewPubrelPacket(), nil\r\n\tcase PUBCOMP:\r\n\t\treturn NewPubcompPacket(), nil\r\n\tcase SUBSCRIBE:\r\n\t\treturn NewSubscribePacket(), nil\r\n\tcase SUBACK:\r\n\t\treturn NewSubackPacket(), nil\r\n\tcase UNSUBSCRIBE:\r\n\t\treturn NewUnsubscribePacket(), nil\r\n\tcase UNSUBACK:\r\n\t\treturn NewUnsubackPacket(), nil\r\n\tcase PINGREQ:\r\n\t\treturn NewPingreqPacket(), nil\r\n\tcase PINGRESP:\r\n\t\treturn NewPingrespPacket(), nil\r\n\tcase DISCONNECT:\r\n\t\treturn NewDisconnectPacket(), nil\r\n\t}\r\n\r\n\treturn nil, fmt.Errorf(\"[Unknown] invalid packet type %d\", t)\r\n}", "func (broadcast *Broadcast) UpgradeProtocol(ctx context.Context, creator, link, reason string,\n\tprivKeyHex string, seq int64) (*model.BroadcastResponse, error) {\n\tmsg := model.UpgradeProtocolMsg{\n\t\tCreator: creator,\n\t\tLink: link,\n\t\tReason: reason,\n\t}\n\treturn broadcast.broadcastTransaction(ctx, msg, privKeyHex, seq, \"\", false)\n}", "func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewClient(protocol Protocol, pool Pool) (Client, error) {\n\tfactory, ok := clients[protocol]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"client for protocol '%v' does not exist\", protocol)\n\t}\n\n\treturn factory(pool)\n}", "func (c *OutputService13ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewSession(proto string) (*Session, error) {\n\truntime.LockOSThread() // without unlock to exit the Go thread\n\n\tcli, err := lib.NewClient()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"occamy-lib: new client error: %w\", err)\n\t}\n\n\ts := &Session{client: cli}\n\ts.client.InitLogLevel(config.Runtime.Mode)\n\terr = s.client.LoadProtocolPlugin(proto)\n\tif err != nil {\n\t\ts.close()\n\t\treturn nil, fmt.Errorf(\"occamy-lib: load protocol plugin failed: %w\", err)\n\t}\n\ts.ID = s.client.ID\n\treturn s, nil\n}", "func (c *OutputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func newMsg(t string) ([]byte, error) {\n\tswitch t {\n\tcase \"version\":\n\t\treturn newVersion()\n\tcase \"verack\":\n\t\treturn newVerack()\n\tcase \"getheaders\":\n\t\treturn newHeadersReq()\n\tcase \"getaddr\":\n\t\treturn newGetAddr()\n\n\tdefault:\n\t\treturn nil, errors.New(\"Unknown message type\")\n\t}\n}", "func NewPlugin(proto, path string, params ...string) *Plugin {\n\tif proto != \"unix\" && proto != \"tcp\" {\n\t\tpanic(\"Invalid protocol. Specify 'unix' or 'tcp'.\")\n\t}\n\tp := &Plugin{\n\t\texe: path,\n\t\tproto: proto,\n\t\tparams: params,\n\t\tinitTimeout: 2 * time.Second,\n\t\texitTimeout: 2 * time.Second,\n\t\thandler: NewDefaultErrorHandler(),\n\t\tmeta: meta(\"pingo\" + randstr(5)),\n\t\tobjsCh: make(chan *objects),\n\t\tconnCh: make(chan *conn),\n\t\tkillCh: make(chan *waiter),\n\t\texitCh: make(chan struct{}),\n\t}\n\treturn p\n}" ]
[ "0.82395333", "0.7932014", "0.7890012", "0.78431666", "0.76969916", "0.75500846", "0.7542913", "0.7482777", "0.74068224", "0.7114767", "0.7084508", "0.70643973", "0.7062169", "0.70373213", "0.694992", "0.6846281", "0.67778176", "0.6698597", "0.6665437", "0.6652466", "0.6492112", "0.64686596", "0.63982326", "0.63627225", "0.6258515", "0.612024", "0.60490876", "0.60185134", "0.5984407", "0.59439355", "0.5844237", "0.58059895", "0.5777244", "0.5757636", "0.5747449", "0.57371205", "0.5705863", "0.5674001", "0.5669954", "0.56649214", "0.56258166", "0.5595089", "0.55733144", "0.5565897", "0.5558684", "0.55369854", "0.552888", "0.5528008", "0.54860795", "0.54849744", "0.54829496", "0.5462213", "0.5456224", "0.5452232", "0.5422412", "0.5349711", "0.53491706", "0.5347251", "0.5336185", "0.53151655", "0.53137195", "0.52829635", "0.52829635", "0.52797025", "0.5278541", "0.5259796", "0.525754", "0.525754", "0.52544874", "0.5252915", "0.52504385", "0.5249443", "0.52378356", "0.52354723", "0.522032", "0.52173805", "0.5215064", "0.5215064", "0.52085614", "0.5206283", "0.5206072", "0.519529", "0.519529", "0.51856923", "0.51856923", "0.51794153", "0.51794153", "0.51733595", "0.51733595", "0.5164603", "0.5158439", "0.5155867", "0.51544243", "0.5150881", "0.5120463", "0.5115024", "0.5106583", "0.5105095", "0.51034236", "0.51008457" ]
0.7420354
8
Initialize initializes rewards protocol
func (p *Protocol) Initialize(context.Context, *sql.Tx, *indexprotocol.Genesis) error { return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (agent *Agent) init(state_size int, action_size int, dqn bool) Agent {\n\tagent.learning_rate = 0.95\n\tagent.action_size = action_size\n\tagent.state_size = state_size\n\tagent.epsilon = 1.0\n\tagent.epsilon_min = 0.01\n\tagent.epsilon_decay = 0.995\n\tagent.gamma = 0.95\n\tagent.HISTORY_LENGTH = 2000\n\tagent.memory = make([]history, agent.HISTORY_LENGTH)\n\tagent.model = *neural.NewNetwork(state_size, []int{state_size, state_size, action_size})\n\tagent.model.RandomizeSynapses()\n\tagent.target_model = *neural.NewNetwork(state_size, []int{state_size, state_size, action_size})\n\tagent.target_model.RandomizeSynapses()\n\tagent.DQN = dqn\n\n\treturn *agent\n}", "func (x *fastReflection_MsgWithdrawDelegatorReward) New() protoreflect.Message {\n\treturn new(fastReflection_MsgWithdrawDelegatorReward)\n}", "func (x *fastReflection_ValidatorOutstandingRewardsRecord) New() protoreflect.Message {\n\treturn new(fastReflection_ValidatorOutstandingRewardsRecord)\n}", "func (x *fastReflection_MsgWithdrawDelegatorRewardResponse) New() protoreflect.Message {\n\treturn new(fastReflection_MsgWithdrawDelegatorRewardResponse)\n}", "func (res *respondent) Initialize() {\n\tres.socketType = utils.Respondent\n}", "func init() {\n\tgo webhook.ProcessRouteStatus(controller)\n}", "func (c *Eth) Initialize(con util.XapiClient) {\n c.con = con\n}", "func (room *GameRoom) init() {\n\tif room.whiteList == nil {\n\t\troom.whiteList = make(map[string]*User)\n\t\troom.blackList = make(map[string]*User)\n\t\troom.inEffectCard = []*Card{}\n\t\troom.turnPlayers = []*User{}\n\t\troom.Turn = 0\n\t\troom.Clock = 11\n\t\troom.IsRouletteTurn = true\n\t\troom.IsRouletteTurn = false\n\t\troom.Level = 1\n\t}\n}", "func init() {\n\tnetwork.RegisterMessages(\n\t\t&CreateGenesisBlock{}, &CreateGenesisBlockResponse{},\n\t\t&AddTxRequest{}, &AddTxResponse{},\n\t)\n}", "func (r *Ricochet) Init() {\n\tr.newconns = make(chan *OpenConnection)\n\tr.networkResolver = utils.NetworkResolver{}\n\tr.rni = new(utils.RicochetNetwork)\n}", "func (_m *IReplyType) Initialize(ctx context.T, result agentcontracts.DocumentResult, replyUUID uuid.UUID) {\n\t_m.Called(ctx, result, replyUUID)\n}", "func init() {\n\tnetwork.RegisterMessage(AnnounceAggregation{})\n\tnetwork.RegisterMessage(ReplySumCipherBytes{})\n\tonet.GlobalProtocolRegister(AggregationProtocolName, NewAggregationProtocol)\n}", "func Init(capacity int) Relay {\n\treturn Relay{\n\t\trmap: make(map[string]*list.List),\n\t\trwmutex: &sync.RWMutex{},\n\t\tcapacity: capacity,\n\t}\n}", "func (pm *personManagement) Init(stub shim.ChaincodeStubInterface) peer.Response {\n\tpm.actions = map[string]PersonAction{\n\t\t\"addPerson\": pm.AddPerson,\n\t}\n\n\tfmt.Println(\"Chaincode has been initialized\")\n\tfmt.Println(\"Following actions are available\")\n\tfor action := range pm.actions {\n\t\tfmt.Printf(\"\\t\\t%s\\n\", action)\n\t}\n\treturn shim.Success(nil)\n}", "func (c *Credits) Init() {\n\tc.nextStateID = \"credits\"\n\tc.alfa = 0\n\tc.state = enter\n\tc.uicredits.Reset()\n}", "func (_Token *TokenTransactor) SetupRewards(opts *bind.TransactOpts, multiplier *big.Int, anualRewardRates []*big.Int, lowerBounds []*big.Int, upperBounds []*big.Int) (*types.Transaction, error) {\n\treturn _Token.contract.Transact(opts, \"setupRewards\", multiplier, anualRewardRates, lowerBounds, upperBounds)\n}", "func (g *Glutton) Init() (err error) {\n\n\tctx := context.Background()\n\tg.ctx, g.cancel = context.WithCancel(ctx)\n\n\tgluttonServerPort := uint(g.conf.GetInt(\"glutton_server\"))\n\n\t// Initiate the freki processor\n\tg.processor, err = freki.New(viper.GetString(\"interface\"), g.rules, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Initiating glutton server\n\tg.processor.AddServer(freki.NewUserConnServer(gluttonServerPort))\n\t// Initiating log producer\n\tif g.conf.GetBool(\"enableGollum\") {\n\t\tg.producer = producer.Init(g.id.String(), g.conf.GetString(\"gollumAddress\"))\n\t}\n\t// Initiating protocol handlers\n\tg.mapProtocolHandlers()\n\tg.registerHandlers()\n\n\terr = g.processor.Init()\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn nil\n}", "func (c *FwRouter) Initialize(con util.XapiClient) {\n\tc.con = con\n\tc.ns = namespace.New(singular, plural, con)\n}", "func (t *AnswerChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {\n\tfmt.Println(\"Answer Store Channel Is Starting Up\")\n\tfuncName, args := stub.GetFunctionAndParameters()\n\tvar err error\n\ttxId := stub.GetTxID()\n\n\tfmt.Println(\" Init() is running\")\n\tfmt.Println(\" Transaction ID: \", txId)\n\tfmt.Println(\" GetFunctionAndParameters() function: \", funcName)\n\tfmt.Println(\" GetFunctionAndParameters() args count: \", len(args))\n\tfmt.Println(\" GetFunctionAndParameters() args found: \", args)\n\n\t// expecting 1 arg for instantiate or upgrade\n\tif len(args) == 2 {\n\t\tfmt.Println(\" GetFunctionAndParameters() : Number of arguments\", len(args))\n\t}\n\n\terr = stub.PutState(args[0], []byte(args[1]))\n\tif err != nil {\n\t\treturn shim.Error(err.Error()) //self-test fail\n\t}\n\n\tfmt.Println(\"Ready for action\") //self-test pass\n\treturn shim.Success(nil)\n}", "func (shuffle *Action) Initialize() {}", "func (_Contract *ContractTransactor) Initialize(opts *bind.TransactOpts, _governableContract common.Address, _proposalVerifier common.Address) (*types.Transaction, error) {\n\treturn _Contract.contract.Transact(opts, \"initialize\", _governableContract, _proposalVerifier)\n}", "func (a *acceptor) init() error {\n\tinstanceID, err := a.state.load()\n\tif err != nil {\n\t\tlNLErr(\"Load State fail, error: %v\", err)\n\t\treturn err\n\t}\n\n\tif instanceID == 0 {\n\t\tlPLGImp(a.conf.groupIdx, \"Empty database\")\n\t}\n\n\ta.setInstanceID(instanceID)\n\n\tlPLGImp(a.conf.groupIdx, \"OK\")\n\n\treturn nil\n}", "func (m *Mob) Initialize() {\n\tm.CurrentHP = m.MaxHP\n\n\t// initialize position data\n\tm.position = &context.Position{\n\t\tInitialX: int(m.SpawnX),\n\t\tInitialY: int(m.SpawnY),\n\t\tCurrentX: int(m.SpawnX),\n\t\tCurrentY: int(m.SpawnY),\n\t\tFinalX: int(m.SpawnX),\n\t\tFinalY: int(m.SpawnY),\n\t}\n\n\tm.SetState(MobStateFind)\n}", "func init() {\n\tactions = make(map[string]InitFunc)\n}", "func initReply(addr string, seq uint16, payload []byte) {\n\t// Read and insert new member to the memberlist\n\tvar member Member\n\tbuf := bytes.NewReader(payload)\n\terr := binary.Read(buf, binary.BigEndian, &member)\n\tprintError(err)\n\t// Update state of the new member\n\t// ...\n\tCurrentList.Insert(&member)\n\taddUpdate2Cache(&member, MemUpdateJoin)\n\n\t// Put the entire memberlist to the Init Reply's payload\n\tvar memBuffer bytes.Buffer // Temp buf to store member's binary value\n\tvar binBuffer bytes.Buffer\n\n\tfor i := 0; i < CurrentList.Size(); i += 1 {\n\t\tmember_, _ := CurrentList.RetrieveByIdx(i)\n\n\t\tbinary.Write(&memBuffer, binary.BigEndian, member_)\n\t\tbinBuffer.Write(memBuffer.Bytes())\n\t\tmemBuffer.Reset() // Clear buffer\n\t}\n\n\t// Send pigggback Init Reply\n\tackWithPayload(addr, seq, binBuffer.Bytes(), MemInitReply, 0x00)\n}", "func init() {\n\tcore.AddAcl(new(AclHmac))\n}", "func (c *contract) _init(ctx sdk.Context) error {\n\n\t// Save a key with the name \"name\" and string value \"Fun_Token\" in the state of the contract\n\tc.State.WriteStringByKey(ctx, \"name\", \"Fun_Token\")\n\n\t// Save a key with the name \"symbol\" and string value \"FUN\"\n\tc.State.WriteStringByKey(ctx, \"symbol\", \"FUN\")\n\n\t// Save a key with the name \"totalSupply\" and uint64 value \"FUN\" and return\n\treturn c.State.WriteUint64ByKey(ctx, \"totalSupply\", 1000000000)\n}", "func (_Token *TokenTransactorSession) SetupRewards(multiplier *big.Int, anualRewardRates []*big.Int, lowerBounds []*big.Int, upperBounds []*big.Int) (*types.Transaction, error) {\n\treturn _Token.Contract.SetupRewards(&_Token.TransactOpts, multiplier, anualRewardRates, lowerBounds, upperBounds)\n}", "func (p *Pocket2RM) Init() {\n\tif p.init {\n\t\treturn\n\t}\n\tp.Config = confer.NewConfig()\n\tp.Config.ReadPaths(ConfigFile)\n\tp.ConsumerKey = p.Config.GetString(\"consumer_key\")\n\n\tp.openDatabase()\n\t\n\tp.AccessToken = nil\n\taccessToken := &AccessToken{}\n\terr := LoadJSONFromFile(PocketAccessFile, accessToken)\n\tif err == nil {\n\t\tp.AccessToken = accessToken\n\t}\n\n\trmAccessToken := &RMToken{}\n\terr = LoadJSONFromFile(RMAccessFile, rmAccessToken)\n\tif err == nil {\n\t\tp.rmAccessToken = rmAccessToken\n\t}\n\t\n\tp.init = true\n}", "func init() {\n\tmc.senders = make(map[string]func(GoogleID, string) (bool, error))\n\tmc.inited = true\n}", "func (br *BGPReflector) Init() (err error) {\n\treturn nil\n}", "func (s *Session) Init() {\n\ts.missRatioMMA = 0\n\ts.r = nil\n}", "func (_Token *TokenSession) SetupRewards(multiplier *big.Int, anualRewardRates []*big.Int, lowerBounds []*big.Int, upperBounds []*big.Int) (*types.Transaction, error) {\n\treturn _Token.Contract.SetupRewards(&_Token.TransactOpts, multiplier, anualRewardRates, lowerBounds, upperBounds)\n}", "func init() {\n\tfframes = make(map[string]Frame)\n\tfmethods = make(map[string]func(string))\n}", "func (s *Session) Init() {\n\ts.setEthereumRPCPath()\n\ts.setPayloadRPCPath()\n}", "func (_DelegationController *DelegationControllerTransactorSession) Initialize(contractsAddress common.Address) (*types.Transaction, error) {\n\treturn _DelegationController.Contract.Initialize(&_DelegationController.TransactOpts, contractsAddress)\n}", "func (_Mcapscontroller *McapscontrollerTransactor) Initialize(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Mcapscontroller.contract.Transact(opts, \"initialize\")\n}", "func InitiateActionBuffer(){\n\tActionBuffer=make([]Action,constants.MaxActions)\n}", "func (b *AnnealingEpsilonGreedy) Init(nArms int) {\n\tb.counts = make([]uint64, nArms, nArms)\n\tb.values = make([]float64, nArms, nArms)\n}", "func (this *HTTPBehavior) Initialize(dest string) error {\n\tthis.HTTPPostTemplate = this.Config.HTTPPostTemplate\n\tthis.firstEventTemplate = template.Must(template.New(\"first_event\").Parse(`{{.}}`))\n\tthis.subsequentEventTemplate = template.Must(template.New(\"subsequent_event\").Parse(\"\\n, {{.}}\"))\n\n\tthis.headers = make(map[string]string)\n\n\tthis.dest = dest\n\n\t/* add authorization token, if applicable */\n\tif this.Config.HTTPAuthorizationToken != nil {\n\t\tthis.headers[\"Authorization\"] = *this.Config.HTTPAuthorizationToken\n\t}\n\n\tthis.headers[\"Content-Type\"] = *this.Config.HTTPContentType\n\tif this.Config.CompressHTTPPayload {\n\t\tthis.headers[\"Content-Encoding\"] = \"gzip\"\n\t}\n\n\tthis.client = &http.Client{\n\t\tTransport: this.CreateTransport(),\n\t\tTimeout: 120 * time.Second, // default timeout is 2 minutes for the entire exchange\n\t}\n\n\treturn nil\n}", "func init() {\n\t// Initialization goes here\n}", "func (_Harberger *HarbergerTransactor) Initialize(opts *bind.TransactOpts, _currencyManager common.Address) (*types.Transaction, error) {\n\treturn _Harberger.contract.Transact(opts, \"initialize\", _currencyManager)\n}", "func (_DelegationController *DelegationControllerSession) Initialize(contractsAddress common.Address) (*types.Transaction, error) {\n\treturn _DelegationController.Contract.Initialize(&_DelegationController.TransactOpts, contractsAddress)\n}", "func (c *SimpleChaincode) init(stub shim.ChaincodeStubInterface, args []string) pb.Response{\n\tfmt.Println(\"DONE !!!\")\n\treturn shim.Success(nil)\n}", "func (_Mcapscontroller *McapscontrollerTransactorSession) Initialize() (*types.Transaction, error) {\n\treturn _Mcapscontroller.Contract.Initialize(&_Mcapscontroller.TransactOpts)\n}", "func (_Mcapscontroller *McapscontrollerSession) Initialize() (*types.Transaction, error) {\n\treturn _Mcapscontroller.Contract.Initialize(&_Mcapscontroller.TransactOpts)\n}", "func (a52 *A52_equation_generator) initialize(frame_number [21]int) {\n\n\tvanillaA52 := new(A52)\n\tvanillaA52.Session_key = a52.Session_key\n\tvanillaA52.Initialize(frame_number)\n\n\ta52.R1.Values = vanillaA52.R1.Register\n\ta52.R2.Values = vanillaA52.R2.Register\n\ta52.R3.Values = vanillaA52.R3.Register\n\n\ta52.R4.Register = vanillaA52.R4.Register\n}", "func (s *Speaker) Init() error { return nil }", "func init() {\n\tresetConnection()\n}", "func (_DelayedWithdrawal *DelayedWithdrawalTransactor) Initialize(opts *bind.TransactOpts, sender common.Address) (*types.Transaction, error) {\n\treturn _DelayedWithdrawal.contract.Transact(opts, \"initialize\", sender)\n}", "func (m *Mixtape) init() {\n\tm.Users = []User{}\n\tm.Playlists = []Playlist{}\n\tm.Songs = []Song{}\n}", "func (b *Bot) Init() (err error) {\n\t// set global default timezone\n\tif b.config.Timezone != \"\" {\n\t\ttime.Local, err = time.LoadLocation(b.config.Timezone)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Info(\"Connecting to slack...\")\n\tb.auth, err = b.slackClient.AuthTest()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"auth error\")\n\t}\n\tclient.AuthResponse = *b.auth\n\tclient.AllChannels, err = b.loadChannels()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error while fetching public channels\")\n\t}\n\n\terr = b.loadSlackData()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Infof(\"Loaded %d allowed users and %d channels\", len(b.allowedUsers), len(client.AllChannels))\n\tlog.Infof(\"Bot user: @%s with ID %s on workspace %s\", b.auth.User, b.auth.UserID, b.auth.URL)\n\n\tcommands := b.commands.GetCommandNames()\n\tlog.Infof(\"Initialized %d commands:\", len(commands))\n\tlog.Info(strings.Join(commands, \", \"))\n\n\treturn nil\n}", "func (tr *TestRecorder) init() {}", "func (_Dospayment *DospaymentTransactor) Initialize(opts *bind.TransactOpts, _bridgeAddr common.Address, _guardianFundsAddr common.Address, _tokenAddr common.Address) (*types.Transaction, error) {\n\treturn _Dospayment.contract.Transact(opts, \"initialize\", _bridgeAddr, _guardianFundsAddr, _tokenAddr)\n}", "func (t *Chaincode) Init(stub shim.ChaincodeStubInterface) peer.Response {\r\n\t// Get the args from the transaction proposal\r\n\targs := stub.GetStringArgs()\r\n\tif len(args) != 0 {\r\n\t\treturn shim.Error(\"Incorrect arguments. Constructor doesn't expect arguments!\")\r\n\t}\r\n\tlogger.Info(\"successfully initialized\")\r\n\treturn shim.Success(nil)\r\n}", "func (o *Gift) Initialize() {\n\to.giftBase.Initialize()\n\t// Add your own initializations here\n}", "func init() {\n\t// Bind native to golang bridge functions\n\tmcdrv.BindNativeToGoFunctions(\n\t\tHandleFoundPeer,\n\t\tReceiveFromPeer,\n\t)\n}", "func (_Contract *ContractTransactorSession) Initialize(_governableContract common.Address, _proposalVerifier common.Address) (*types.Transaction, error) {\n\treturn _Contract.Contract.Initialize(&_Contract.TransactOpts, _governableContract, _proposalVerifier)\n}", "func (c *PanoPoli) Initialize(i util.XapiClient) {\n c.Nat = &nat.PanoNat{}\n c.Nat.Initialize(i)\n\n c.PolicyBasedForwarding = &pbf.PanoPbf{}\n c.PolicyBasedForwarding.Initialize(i)\n\n c.Security = &security.PanoSecurity{}\n c.Security.Initialize(i)\n}", "func (_DelegationController *DelegationControllerTransactor) Initialize(opts *bind.TransactOpts, contractsAddress common.Address) (*types.Transaction, error) {\n\treturn _DelegationController.contract.Transact(opts, \"initialize\", contractsAddress)\n}", "func init() {\n\trClient = &client.RestClient{\n\t\tMessagesResourceURI: getMmsURI() + \"/messages\",\n\t\tHealthResoruceURI: getMmsURI() + \"/health\",\n\t\tHTTPClient: &http.Client{},\n\t}\n}", "func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {\n\tfmt.Println(\"Marbles Is Starting Up\")\n\tfuncName, args := stub.GetFunctionAndParameters()\n\ttxId := stub.GetTxID()\n\n\tfmt.Println(\"Init() is running\")\n\tfmt.Println(\"Transaction ID:\", txId)\n\tfmt.Println(\" GetFunctionAndParameters() function:\", funcName)\n\tfmt.Println(\" GetFunctionAndParameters() args count:\", len(args))\n\tfmt.Println(\" GetFunctionAndParameters() args found:\", args)\n\n\tt.enroll_donor(stub, []string{\"d1\", \"김현욱\", \"010-1234-5678\"})\n\tt.enroll_npo(stub, []string{\"n1\",\"프리즈밍\"})\n\tt.enroll_npo(stub, []string{\"n2\",\"비영리스타트업\"})\n\tt.enroll_npo(stub, []string{\"n3\",\"서울시NPO지원센터\"})\n\tt.enroll_npo(stub, []string{\"n4\",\"아름다운가게\"})\n\tt.enroll_recipient(stub, []string{\"r1\",\"윤지성\",\"Permanent\"})\n\n\n\n\tfmt.Println(\"Ready for action\") //self-test pass\n\treturn shim.Success(nil)\n}", "func Initialize(ctx contract.Context, initState *InitializationState) error {\n\tctx.Logger().Info(\"DPOSv3 Initialize\")\n\n\t// set new State\n\tif err := saveState(ctx, initState.State); err != nil {\n\t\treturn err\n\t}\n\n\t// set new Candidates\n\tif err := saveCandidateList(ctx, initState.Candidates); err != nil {\n\t\treturn err\n\t}\n\n\t// set new Delegations\n\tfor _, delegation := range initState.Delegations {\n\t\tif err := SetDelegation(ctx, delegation); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// set new Statistics\n\tfor _, statistic := range initState.Statistics {\n\t\tif err := SetStatistic(ctx, statistic); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (cc *Game) Init(stub shim.ChaincodeStubInterface) peer.Response {\n\treturn shim.Success(nil)\n}", "func (_Univ2 *Univ2Transactor) Initialize(opts *bind.TransactOpts, _token0 common.Address, _token1 common.Address) (*types.Transaction, error) {\n\treturn _Univ2.contract.Transact(opts, \"initialize\", _token0, _token1)\n}", "func Init() (s Self) {\n\ts.Capitals = readJSON(capitalsEmbed)\n\treturn s\n}", "func (_AnchorChain *AnchorChainTransactor) Init(opts *bind.TransactOpts, hub common.Address) (*types.Transaction, error) {\n\treturn _AnchorChain.contract.Transact(opts, \"init\", hub)\n}", "func (s *Session) Initialize(options options.Options) {\n\ts.Options = options\n\ts.InitStateStoreOrFail(\"\")\n\ts.InitLogger()\n\ts.InitStats()\n\ts.InitThreads()\n\ts.Signatures = signatures.LoadSignatures()\n}", "func init() {\n\t//todo...\n}", "func (t *Transaction) Init() {\n\tt.ID = bson.NewObjectId()\n\tt.CreatedAt = time.Now().Unix()\n\tt.Status = \"received\"\n\tt.Attempts = 0\n}", "func init() {\n\txprotocol.RegisterProtocol(ProtocolName, &boltv2Protocol{})\n}", "func initialise() {\n\taction[0] = \"stone\"\n\taction[1] = \"paper\"\n\taction[2] = \"scissors\"\n\tcanDefeat[0] = 2\n\tcanDefeat[1] = 0\n\tcanDefeat[2] = 1\n\tp1TotalScore = 0\n\tp2TotalScore = 0\n\tp3TotalScore = 0\n\tp4TotalScore = 0\n\n}", "func (h *Handler) Initialize() {\n\tws.Initialize()\n\tgo ws.Instance.Execute()\n\n\th.Router = mux.NewRouter()\n}", "func initializeReply(buf []byte) *InitializeReply {\n\tv := new(InitializeReply)\n\tb := 1 // skip reply determinant\n\n\tb += 1 // padding\n\n\tv.Sequence = xgb.Get16(buf[b:])\n\tb += 2\n\n\tv.Length = xgb.Get32(buf[b:]) // 4-byte units\n\tb += 4\n\n\tv.MajorVersion = buf[b]\n\tb += 1\n\n\tv.MinorVersion = buf[b]\n\tb += 1\n\n\tb += 22 // padding\n\n\treturn v\n}", "func (ps *PrjnStru) Init(prjn emer.Prjn) {\n\tps.LeabraPrj = prjn.(LeabraPrjn)\n}", "func Make(peers []*labrpc.ClientEnd, me int,\n\tpersister *Persister, applyCh chan ApplyMsg) *Raft {\n\trf := &Raft{}\n\trf.peers = peers\n\trf.persister = persister\n\trf.me = me\n\t// initialize from state persisted before a crash\n\trf.readPersist(persister.ReadRaftState())\n\t/// Start as a follower.\n\trf.currentState = \"FOLLOWER\"\n\trf.commitIndex=1\n\trf.lastApplied=1 // Initializing this to 1 as we have added dummy 0th entry\n\trf.votedFor=-1\n\t//05/12\n\t//Let the leader start with 0. When a candidate transitions to the candidate state it increments this value.\n\trf.currentTerm=0\n\t//Initialize the log.\n\t//This is a dummy entry\n\trf.log = append(rf.log, LogEntry{LastLogTerm: 0})\n\trf.applyCh = applyCh\n\trf.debug(\"++++++++++++++++++++++++++Length of the log during initialization---> %d \\n\",len(rf.log))\n\trf.electionTimer = time.NewTimer((400 + time.Duration(rand.Intn(300))) * time.Millisecond)\n\t// Your initialization code here (2A, 2B, 2C).\n\tgo rf.conductElection()\n\t//Send heart beat to everybody else\n\treturn rf\n}", "func (_L2CrossDomainMessenger *L2CrossDomainMessengerTransactor) Initialize(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _L2CrossDomainMessenger.contract.Transact(opts, \"initialize\")\n}", "func init() {\n\tpolochon.RegisterModule(&Guessit{})\n}", "func (c *Client) init() {\n\tc.headers = make(chan *types.Header)\n\tc.blocks = make(chan *types.Block)\n\tc.transactions = make(chan *interfaces.TxWithBlock)\n\n\tc.shutdown = make(chan struct{})\n\tc.loopExit = make(chan struct{})\n\tc.errs = make(chan error, 1)\n\tc.sendErrorOnce = new(sync.Once)\n}", "func Init() (err error) {\n\n\tbot, er := newBot(\"config.json\")\n\tif er != nil {\n\t\terr = er\n\t\treturn\n\t}\n\n\ts, er := discordgo.New(\"Bot \" + bot.Token)\n\tif er != nil {\n\t\terr = er\n\t\treturn\n\t}\n\n\ts.AddHandler(bot.MessageHandler)\n\n\ter = s.Open()\n\tif er != nil {\n\t\terr = er\n\t\treturn\n\t}\n\treturn\n}", "func (s *Scope) Initialize() error {\n\tvar ok bool\n\tvar right, up, forward raytracing.Vector\n\n\tif s.Target != nil {\n\t\tforward, ok = s.Target.Subtract(s.Position).Normalize()\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"target and position are the same\")\n\t\t}\n\t\tvertical := raytracing.Vector{X: 0, Y: 1, Z: 0}\n\n\t\tif forward.IsVertical() {\n\t\t\tright = raytracing.Vector{X: 1, Y: 0, Z: 0}\n\t\t} else {\n\t\t\tright = forward.Cross(vertical)\n\t\t}\n\n\t\tup = right.Cross(forward)\n\n\t\tvar err error\n\t\tif up, err = up.Rotate(-s.Roll, forward); err != nil {\n\t\t\treturn fmt.Errorf(\"scope rotation failed: %s\", err)\n\t\t}\n\t\tright = forward.Cross(up)\n\n\t\ts.Up = &up\n\t\ts.Right = &right\n\t\ts.Forward = &forward\n\t}\n\n\tright, ok = s.Right.Normalize()\n\tif !ok {\n\t\treturn fmt.Errorf(\"vector 'right' is a zero vector\")\n\t}\n\tup, ok = s.Up.Normalize()\n\tif !ok {\n\t\treturn fmt.Errorf(\"vector 'up' is a zero vector\")\n\t}\n\tforward, ok = s.Forward.Normalize()\n\tif !ok {\n\t\treturn fmt.Errorf(\"vector 'forward' is a zero vector\")\n\t}\n\n\ts.Up = &up\n\ts.Right = &right\n\ts.Forward = &forward\n\n\treturn nil\n}", "func (_AnchorChain *AnchorChainTransactorSession) Init(hub common.Address) (*types.Transaction, error) {\n\treturn _AnchorChain.Contract.Init(&_AnchorChain.TransactOpts, hub)\n}", "func init() {\n\tcallbacks = make(map[ModuleType]*ConfigCallback, 8)\n\tmodules = make(map[string]ModuleType, 32)\n}", "func (x *fastReflection_ValidatorCurrentRewardsRecord) New() protoreflect.Message {\n\treturn new(fastReflection_ValidatorCurrentRewardsRecord)\n}", "func (s *DiscordState) Init() error {\n\ts.Session = new(discordgo.Session)\n\t\n\tfmt.Printf(\"\\nConnecting…\")\n\n\tdg, err := discordgo.New(Config.Username, Config.Password)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Open the websocket and begin listening.\n\tdg.Open()\n\n\t//Retrieve GuildID's from current User\n\t//need index of Guilds[] rather than UserGuilds[] (maybe)\n\tGuilds, err := dg.UserGuilds(0, \"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.Guilds = Guilds\n\n\ts.Session = dg\n\n\ts.User, _ = s.Session.User(\"@me\")\n\n\tfmt.Printf(\" PASSED!\\n\")\n\n\treturn nil\n}", "func (q *QLearning) Initialize() {\n\tq.learningRate = 0.7\n\tq.epsilon = 0.3\n\n\tq.actns = 2\n\tq.workdays = 5\n\n\tq.qt = make(QTable)\n}", "func (es *eeStack) init(caps int) {\r\n\t((*eeValues)(es)).init(caps)\r\n}", "func (p *OnPrem) Initialize() error {\n\treturn nil\n}", "func (p *OnPrem) Initialize() error {\n\treturn nil\n}", "func (m *Manager) init(ledgerIndex iotago.MilestoneIndex) error {\n\tm.currentLock.Lock()\n\tdefer m.currentLock.Unlock()\n\n\tcurrentProtoParams, err := m.storage.ProtocolParameters(ledgerIndex)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.current = currentProtoParams\n\tm.loadPending(ledgerIndex)\n\n\treturn nil\n}", "func (_AnchorChain *AnchorChainSession) Init(hub common.Address) (*types.Transaction, error) {\n\treturn _AnchorChain.Contract.Init(&_AnchorChain.TransactOpts, hub)\n}", "func (s *Sim) Init() {\n\ts.System = make([]*poly.Chain,0)\n\ts.solver = new(solver.RK4)\n\ts.solver.Mods = make([]solver.Modifier,0)\n\ts.solverInit = false\n}", "func (a *GCRLoginAgent) init() {\n\tif a.In == nil {\n\t\ta.In = os.Stdin\n\t}\n\tif a.Out == nil {\n\t\ta.Out = os.Stdout\n\t}\n\tif a.OpenBrowser == nil {\n\t\ta.OpenBrowser = webbrowser.Open\n\t}\n}", "func (s *SigMentionHandler) Initialize(config *github.Config, features *features.Features) error {\n\treturn nil\n}", "func (d *Dispatcher) Initialize(ctx context.Context, ep *endpoint.Endpoint) error {\n\td.validators = ep.SenderValidators\n\n\tvar unicast, multicast ax.MessageTypeSet\n\n\tfor mt := range d.Routes {\n\t\tif mt.IsCommand() {\n\t\t\tunicast = unicast.Add(mt)\n\t\t} else if mt.IsEvent() {\n\t\t\tmulticast = multicast.Add(mt)\n\t\t} else {\n\t\t\tunicast = unicast.Add(mt)\n\t\t\tmulticast = multicast.Add(mt)\n\t\t}\n\t}\n\n\tif err := ep.InboundTransport.Subscribe(ctx, endpoint.OpSendUnicast, unicast); err != nil {\n\t\treturn err\n\t}\n\n\treturn ep.InboundTransport.Subscribe(ctx, endpoint.OpSendMulticast, multicast)\n}", "func init() {\n\tMoods = map[string]MoodState{\n\t\t\"neutral\": MoodStateNeutral,\n\t\t\"happy\": MoodStateHappy,\n\t\t\"sad\": MoodStateSad,\n\t\t\"angry\": MoodStateAngry,\n\t\t\"hopeful\": MoodStateHopeful,\n\t\t\"thrilled\": MoodStateThrilled,\n\t\t\"bored\": MoodStateBored,\n\t\t\"shy\": MoodStateShy,\n\t\t\"comical\": MoodStateComical,\n\t\t\"cloudnine\": MoodStateOnCloudnine,\n\t}\n}", "func (m *Method) Init() {\n\tm.Comments = make([]string, 0)\n\tm.Attributes = make([]*Attribute, 0)\n\tm.Parameters = make([]*Field, 0)\n}", "func Init(config *cfg.Config, logger log.Logger, conns proxy.AppConns) *RelayController {\n\tinitOnce.Do(func() {\n\t\ttemp := strings.Split(config.RPC.ListenAddress, \":\")\n\t\tlocalURL := \"http://127.0.0.1:\" + temp[len(temp)-1]\n\n\t\tgRelay = &RelayController{\n\t\t\tLocalURL: localURL,\n\t\t\tcurrentNodeAddress: getNodeAddress(config, \"\", \"\", 0),\n\t\t\tconfig: config,\n\t\t\tabciClient: conns,\n\t\t\tlogger: logger,\n\t\t}\n\n\t\tgRelay.init()\n\n\t\tlogger.Info(\"RELAY init\", \"gRelay\", gRelay)\n\t})\n\n\treturn gRelay\n}", "func Init(r *Rope) {\n\tr.Head = knot{\n\t\theight: 1,\n\t\tnexts: make([]skipknot, MaxHeight),\n\t}\n}", "func init() {\n\ttoken = nep17.Token{\n\t\tName: \"Awesome NEO Token\",\n\t\tSymbol: \"ANT\",\n\t\tDecimals: decimals,\n\t\tOwner: owner,\n\t\tTotalSupply: 11000000 * multiplier,\n\t\tCirculationKey: \"TokenCirculation\",\n\t}\n\tctx = storage.GetContext()\n}" ]
[ "0.5713748", "0.5581605", "0.5409013", "0.5395823", "0.53937817", "0.5388697", "0.53284436", "0.52667344", "0.52412826", "0.5227323", "0.51680535", "0.5149312", "0.5135256", "0.51343644", "0.51328707", "0.5130539", "0.51271385", "0.5116818", "0.5077328", "0.50768316", "0.5060023", "0.50181025", "0.5015243", "0.49986953", "0.4989429", "0.49848652", "0.49696404", "0.4960565", "0.49399245", "0.49396646", "0.49286196", "0.49180916", "0.4913114", "0.49080268", "0.49073434", "0.49069995", "0.48996338", "0.48968697", "0.4895493", "0.48920414", "0.48877805", "0.48834726", "0.48749095", "0.48707378", "0.48689625", "0.48609984", "0.48561773", "0.48527828", "0.48469824", "0.48454276", "0.48416796", "0.4841316", "0.4832003", "0.48292693", "0.4829012", "0.4828448", "0.4827994", "0.48233208", "0.48206344", "0.48188263", "0.4811837", "0.48110303", "0.4809164", "0.48005885", "0.4798902", "0.47988018", "0.47973764", "0.4792609", "0.47903165", "0.47827643", "0.47808963", "0.4778679", "0.47765082", "0.4774913", "0.4772507", "0.47684586", "0.47676775", "0.47621948", "0.47577402", "0.4752922", "0.4747479", "0.47472668", "0.4746665", "0.47464764", "0.4745233", "0.4744764", "0.47441277", "0.4739283", "0.4739283", "0.47284234", "0.4725654", "0.47226074", "0.4722066", "0.4713035", "0.4706366", "0.4706349", "0.47055537", "0.4702163", "0.47019276", "0.46933445" ]
0.48114556
61
getRewardHistory reads reward history
func (p *Protocol) getRewardHistory(actionHash string) ([]*RewardHistory, error) { db := p.Store.GetDB() getQuery := fmt.Sprintf(selectRewardHistory, RewardHistoryTableName) stmt, err := db.Prepare(getQuery) if err != nil { return nil, errors.Wrap(err, "failed to prepare get query") } defer stmt.Close() rows, err := stmt.Query(actionHash) if err != nil { return nil, errors.Wrap(err, "failed to execute get query") } var rewardHistory RewardHistory parsedRows, err := s.ParseSQLRows(rows, &rewardHistory) if err != nil { return nil, errors.Wrap(err, "failed to parse results") } if len(parsedRows) == 0 { return nil, indexprotocol.ErrNotExist } var rewardHistoryList []*RewardHistory for _, parsedRow := range parsedRows { rewards := parsedRow.(*RewardHistory) rewardHistoryList = append(rewardHistoryList, rewards) } return rewardHistoryList, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (as AccountStorage) GetRewardHistory(\n\tctx sdk.Context, me types.AccountKey, bucketSlot int64) (*RewardHistory, sdk.Error) {\n\tstore := ctx.KVStore(as.key)\n\trewardHistoryBytes := store.Get(getRewardHistoryKey(me, bucketSlot))\n\tif rewardHistoryBytes == nil {\n\t\treturn nil, nil\n\t}\n\thistory := new(RewardHistory)\n\tif err := as.cdc.UnmarshalJSON(rewardHistoryBytes, history); err != nil {\n\t\treturn nil, ErrFailedToUnmarshalRewardHistory(err)\n\t}\n\treturn history, nil\n}", "func (_Token *TokenSession) BaseRewardHistory(index *big.Int) (*big.Int, *big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseRewardHistory(&_Token.CallOpts, index)\n}", "func (_Token *TokenCallerSession) BaseRewardHistory(index *big.Int) (*big.Int, *big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseRewardHistory(&_Token.CallOpts, index)\n}", "func (_Token *TokenCaller) BaseRewardHistory(opts *bind.CallOpts, index *big.Int) (*big.Int, *big.Int, *big.Int, *big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t\tret1 = new(*big.Int)\n\t\tret2 = new(*big.Int)\n\t\tret3 = new(*big.Int)\n\t)\n\tout := &[]interface{}{\n\t\tret0,\n\t\tret1,\n\t\tret2,\n\t\tret3,\n\t}\n\terr := _Token.contract.Call(opts, out, \"baseRewardHistory\", index)\n\treturn *ret0, *ret1, *ret2, *ret3, err\n}", "func getHistory(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\ttype AuditHistory struct {\n\t\tTxId string `json:\"txId\"`\n\t\tValue Marble `json:\"value\"`\n\t}\n\tvar history []AuditHistory;\n\tvar marble Marble\n\n\tif len(args) != 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\tmarbleId := args[0]\n\tfmt.Printf(\"- start getHistoryForMarble: %s\\n\", marbleId)\n\n\t// Get History\n\tresultsIterator, err := stub.GetHistoryForKey(marbleId)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tdefer resultsIterator.Close()\n\n\tfor resultsIterator.HasNext() {\n\t\thistoryData, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\n\t\tvar tx AuditHistory\n\t\ttx.TxId = historyData.TxId //copy transaction id over\n\t\tjson.Unmarshal(historyData.Value, &marble) //un stringify it aka JSON.parse()\n\t\tif historyData.Value == nil { //marble has been deleted\n\t\t\tvar emptyMarble Marble\n\t\t\ttx.Value = emptyMarble //copy nil marble\n\t\t} else {\n\t\t\tjson.Unmarshal(historyData.Value, &marble) //un stringify it aka JSON.parse()\n\t\t\ttx.Value = marble //copy marble over\n\t\t}\n\t\thistory = append(history, tx) //add this tx to the list\n\t}\n\tfmt.Printf(\"- getHistoryForMarble returning:\\n%s\", history)\n\n\t//change to array of bytes\n\thistoryAsBytes, _ := json.Marshal(history) //convert to array of bytes\n\treturn shim.Success(historyAsBytes)\n}", "func ViewReward(rw http.ResponseWriter, r *http.Request) {\n\t// get the token\n\treqToken := r.Header.Get(\"Authorization\")\n\t\n\t// get the claims\n\tclaims, isNotValid := GetClaims(reqToken, rw)\n\tif isNotValid {\n\t\treturn\n\t}\n\n\tdt, err := db.GetUserRewards(claims.Roll)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write(Rsp(err.Error(), \"Server Error\"))\n\t\treturn\n\t}\n\trw.WriteHeader(http.StatusOK)\n\tres := c.RespData{\n\t\tMessage: \"All data\",\n\t\tData: dt,\n\t}\n\tjson.NewEncoder(rw).Encode(res)\n}", "func (_IStakingRewards *IStakingRewardsTransactor) GetReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _IStakingRewards.contract.Transact(opts, \"getReward\")\n}", "func get_history(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tif len(args) != 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\tkey := args[0]\n\tfmt.Printf(\"- start getHistory: %s\\n\", key)\n\n\t// Get History\n\tresultsIterator, err := stub.GetHistoryForKey(key)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tdefer resultsIterator.Close()\n\n\tresults, err := ConvHistoryResult(resultsIterator)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tfmt.Println(\"end getHistory\")\n\n\treturn shim.Success(results)\n}", "func getHistory(source string) ([]TransferData, error) {\n\turl := fmt.Sprintf(\"%s/history?duration=%s\", source, url.QueryEscape(AgentRouter.CronInterval))\n\tresp := utils.FetchResponse(url, []byte{})\n\tif resp.Error != nil {\n\t\treturn nil, resp.Error\n\t}\n\tvar transferRecords []TransferData\n\terr := json.Unmarshal(resp.Data, &transferRecords)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn transferRecords, nil\n}", "func (_XStaking *XStakingTransactor) GetReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _XStaking.contract.Transact(opts, \"getReward\")\n}", "func (s *HeroesServiceChaincode) gethistory(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tif len(args) < 2 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 3\")\n\t}\n\n\tvegKey := args[1]\n\tfmt.Printf(\"##### start History of Record: %s\\n\", vegKey)\n\n\tresultsIterator, err := stub.GetHistoryForKey(vegKey)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tdefer resultsIterator.Close()\n\n\t// buffer is a JSON array containing historic values for the marble\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"[\")\n\n\tbArrayMemberAlreadyWritten := false\n\tfor resultsIterator.HasNext() {\n\t\tresponse, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t\t// Add a comma before array members, suppress it for the first array member\n\t\tif bArrayMemberAlreadyWritten == true {\n\t\t\tbuffer.WriteString(\",\")\n\t\t}\n\t\tbuffer.WriteString(\"{\\\"TxId\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(response.TxId)\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\", \\\"Value\\\":\")\n\t\t// if it was a delete operation on given key, then we need to set the\n\t\t//corresponding value null. Else, we will write the response.Value\n\t\t//as-is (as the Value itself a JSON marble)\n\t\tif response.IsDelete {\n\t\t\tbuffer.WriteString(\"null\")\n\t\t} else {\n\t\t\tbuffer.WriteString(string(response.Value))\n\t\t}\n\n\t\tbuffer.WriteString(\", \\\"Timestamp\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(time.Unix(response.Timestamp.Seconds, int64(response.Timestamp.Nanos)).String())\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\", \\\"IsDelete\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(strconv.FormatBool(response.IsDelete))\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\"}\")\n\t\tbArrayMemberAlreadyWritten = true\n\t}\n\tbuffer.WriteString(\"]\")\n\n\tfmt.Printf(\"- getHistoryForVegetable returning:\\n%s\\n\", buffer.String())\n\n\treturn shim.Success(buffer.Bytes())\n}", "func (_IStakingRewards *IStakingRewardsTransactorSession) GetReward() (*types.Transaction, error) {\n\treturn _IStakingRewards.Contract.GetReward(&_IStakingRewards.TransactOpts)\n}", "func (_IStakingRewards *IStakingRewardsSession) GetReward() (*types.Transaction, error) {\n\treturn _IStakingRewards.Contract.GetReward(&_IStakingRewards.TransactOpts)\n}", "func GetRewardEventsInfo(fromBlock *big.Int, toBlock *big.Int) []*RewardInfo {\n\n\tvar logEpochRewardSig = []byte(\"EpochRewardsDistributedToVoters(address,uint256)\")\n\tvar logEpochRewardSigHash = crypto.Keccak256Hash(logEpochRewardSig)\n\tvar TopicsFilter = [][]common.Hash{{logEpochRewardSigHash}}\n\n\tcontractAddress := common.HexToAddress(WrapperContractDeploymentAddress[NetActive][Election])\n\n\tquery := ethereum.FilterQuery{\n\t\tFromBlock: fromBlock,\n\t\tToBlock: toBlock,\n\t\tTopics: TopicsFilter,\n\n\t\tAddresses: []common.Address{\n\t\t\tcontractAddress,\n\t\t},\n\t}\n\n\tlogs, err := atlasEthClient.FilterLogs(context.Background(), query)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trewards_info := make([]*RewardInfo, 0, len(logs))\n\n\tcontractAbi, err := abi.JSON(strings.NewReader(string(binding.ElectionABI)))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, vLog := range logs {\n\n\t\tvar epochRewardEvent EpochRewardEvent\n\t\terr := contractAbi.Unpack(&epochRewardEvent, \"EpochRewardsDistributedToVoters\", vLog.Data)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tri := &RewardInfo{Group: common.HexToAddress(vLog.Topics[1].Hex()).String(),\n\t\t\tGroupHash: vLog.Topics[1],\n\t\t\tRewardValue: epochRewardEvent.Value,\n\t\t\tBlockNumber: new(big.Int).SetUint64(vLog.BlockNumber)}\n\n\t\tAddAtlasToRewardInfo(ri)\n\n\t\trewards_info = append(rewards_info, ri)\n\t}\n\n\treturn rewards_info\n}", "func (_TrialRulesAbstract *TrialRulesAbstractCaller) GetReward(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _TrialRulesAbstract.contract.Call(opts, out, \"getReward\")\n\treturn *ret0, err\n}", "func (as AccountStorage) GetReward(ctx sdk.Context, accKey types.AccountKey) (*Reward, sdk.Error) {\n\tstore := ctx.KVStore(as.key)\n\trewardByte := store.Get(getRewardKey(accKey))\n\tif rewardByte == nil {\n\t\treturn nil, ErrRewardNotFound()\n\t}\n\treward := new(Reward)\n\tif err := as.cdc.UnmarshalJSON(rewardByte, reward); err != nil {\n\t\treturn nil, ErrFailedToUnmarshalReward(err)\n\t}\n\treturn reward, nil\n}", "func (t *SupplyChaincode) getTradeHistory(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\n\tif len(args) < 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\tmarbleName := args[0]\n\n\tfmt.Printf(\"- start getHistoryForMarble: %s\\n\", marbleName)\n\n/*\n// TODO: not implemented in current stable version. Waiting for v1.0 beta release.\n\tresultsIterator, err := stub.GetHistoryForKey(marbleName)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tdefer resultsIterator.Close()\n\n\t// buffer is a JSON array containing historic values for the marble\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"[\")\n\n\tbArrayMemberAlreadyWritten := false\n\tfor resultsIterator.HasNext() {\n\t\ttxID, historicValue, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t\t// Add a comma before array members, suppress it for the first array member\n\t\tif bArrayMemberAlreadyWritten == true {\n\t\t\tbuffer.WriteString(\",\")\n\t\t}\n\t\tbuffer.WriteString(\"{\\\"TxId\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(txID)\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\", \\\"Value\\\":\")\n\t\t// historicValue is a JSON marble, so we write as-is\n\t\tbuffer.WriteString(string(historicValue))\n\t\tbuffer.WriteString(\"}\")\n\t\tbArrayMemberAlreadyWritten = true\n\t}\n\tbuffer.WriteString(\"]\")\n\n\tfmt.Printf(\"- getHistoryForMarble returning:\\n%s\\n\", buffer.String())\n\n\treturn shim.Success(buffer.Bytes())\n*/\n\n\tqueryString := args[0]\n\tqueryResults, err := stub.GetState(queryString)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\t\n\treturn shim.Success(queryResults)\t\n}", "func (room *RoomRecorder) history() []*action_.PlayerAction {\n\troom.historyM.RLock()\n\tv := room._history\n\troom.historyM.RUnlock()\n\treturn v\n}", "func (s *Client) GetHistory(ctx context.Context, scripthash string) ([]*GetMempoolResult, error) {\n\tvar resp GetMempoolResp\n\n\terr := s.request(ctx, \"blockchain.scripthash.get_history\", []interface{}{scripthash}, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp.Result, err\n}", "func (_Lmc *LmcSession) GetUserAccumulatedReward(_userAddress common.Address, tokenIndex *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserAccumulatedReward(&_Lmc.CallOpts, _userAddress, tokenIndex)\n}", "func (_Lmc *LmcCallerSession) GetUserAccumulatedReward(_userAddress common.Address, tokenIndex *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserAccumulatedReward(&_Lmc.CallOpts, _userAddress, tokenIndex)\n}", "func (c RewardsController) GetRewards(page int) revel.Result {\n\n\tif !c.GetCurrentUser() {\n\t\treturn c.ForbiddenResponse()\n\t}\n\n\t//ChangeRewardsModel() // Remove when finish production\n\n\tvar reward models.Reward\n\tif Reward, ok := app.Mapper.GetModel(&reward); ok {\n\t\tvar rewards = []models.Reward{}\n\t\tvar match = bson.M{\"$and\": []bson.M{\n\t\t\tbson.M{\"$or\": []bson.M{\n\t\t\t\tbson.M{\"user_id\": c.CurrentUser.GetID().Hex()},\n\t\t\t\tbson.M{\"users\": bson.M{\"$elemMatch\": bson.M{\"$eq\": c.CurrentUser.GetID().Hex()}}},\n\t\t\t}},\n\t\t\tbson.M{\"is_visible\": true},\n\t\t\tbson.M{\"resource_type\": bson.M{\"$ne\": core.ModelTypeChallenge}},\n\t\t}}\n\t\tif page <= 1 {\n\t\t\tpage = 1\n\t\t}\n\t\tvar pipe = mgomap.Aggregate{}.Match(match).Sort(bson.M{\"updated_at\": -1}).Skip((page - 1) * core.LimitRewards).Limit(core.LimitRewards)\n\n\t\tif err := Reward.Pipe(pipe, &rewards); err != nil {\n\t\t\treturn c.ErrorResponse(c.Message(\"error.notFound\", \"Rewards\"), \"No rewards Found\", 400)\n\t\t}\n\t\treturn c.SuccessResponse(rewards, \"success\", core.ModelsType[core.ModelReward], serializers.RewardSerializer{Lang: c.Request.Locale})\n\n\t}\n\treturn c.ServerErrorResponse()\n}", "func getHistory(stub shim.ChaincodeStubInterface, args []string) (string, error) {\n\tif len(args) != 1 {\n\t\treturn \"\", fmt.Errorf(\"Incorrect arguments. Expecting a key\")\n\t}\n\n\tresultsIterator, err := stub.GetHistoryForKey(args[0])\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to get asset: %s with error: %s\", args[0], err)\n\t}\n\tif resultsIterator == nil {\n\t\treturn \"\", fmt.Errorf(\"history not found: %s\", args[0])\n\t}\n\n\tdefer resultsIterator.Close()\n\n\t// buffer is a JSON array containing historic values for the marble\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"[\")\n\n\tbArrayMemberAlreadyWritten := false\n\tfor resultsIterator.HasNext() {\n\t\tresponse, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn \"iterating error\", err\n\t\t}\n\t\t// Add a comma before array members, suppress it for the first array member\n\t\tif bArrayMemberAlreadyWritten == true {\n\t\t\tbuffer.WriteString(\",\")\n\t\t}\n\t\tbuffer.WriteString(\"{\\\"TxId\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(response.TxId)\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\", \\\"Value\\\":\")\n\t\t// if it was a delete operation on given key, then we need to set the\n\t\t//corresponding value null. Else, we will write the response.Value\n\t\t//as-is (as the Value itself a JSON marble)\n\t\tif response.IsDelete {\n\t\t\tbuffer.WriteString(\"null\")\n\t\t} else {\n\t\t\tbuffer.WriteString(string(response.Value))\n\t\t}\n\n\t\tbuffer.WriteString(\", \\\"Timestamp\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(time.Unix(response.Timestamp.Seconds, int64(response.Timestamp.Nanos)).String())\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\", \\\"IsDelete\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(strconv.FormatBool(response.IsDelete))\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\"}\")\n\t\tbArrayMemberAlreadyWritten = true\n\t}\n\tbuffer.WriteString(\"]\")\n\n\tfmt.Printf(\"- getHistoryForMarble returning:\\n%s\\n\", buffer.String())\n\n\treturn buffer.String(), nil\n}", "func getAccumulatedRewards(ctx sdk.Context, distKeeper types.DistributionKeeper, delegation stakingtypes.Delegation) ([]wasmvmtypes.Coin, error) {\n\t// Try to get *delegator* reward info!\n\tparams := distributiontypes.QueryDelegationRewardsRequest{\n\t\tDelegatorAddress: delegation.DelegatorAddress,\n\t\tValidatorAddress: delegation.ValidatorAddress,\n\t}\n\tcache, _ := ctx.CacheContext()\n\tqres, err := distKeeper.DelegationRewards(sdk.WrapSDKContext(cache), &params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// now we have it, convert it into wasmvm types\n\trewards := make([]wasmvmtypes.Coin, len(qres.Rewards))\n\tfor i, r := range qres.Rewards {\n\t\trewards[i] = wasmvmtypes.Coin{\n\t\t\tDenom: r.Denom,\n\t\t\tAmount: r.Amount.TruncateInt().String(),\n\t\t}\n\t}\n\treturn rewards, nil\n}", "func (_XStaking *XStakingSession) GetReward() (*types.Transaction, error) {\n\treturn _XStaking.Contract.GetReward(&_XStaking.TransactOpts)\n}", "func (_TrialRulesAbstract *TrialRulesAbstractCallerSession) GetReward() (*big.Int, error) {\n\treturn _TrialRulesAbstract.Contract.GetReward(&_TrialRulesAbstract.CallOpts)\n}", "func (_XStaking *XStakingTransactorSession) GetReward() (*types.Transaction, error) {\n\treturn _XStaking.Contract.GetReward(&_XStaking.TransactOpts)\n}", "func (_Lmc *LmcCaller) GetUserAccumulatedReward(opts *bind.CallOpts, _userAddress common.Address, tokenIndex *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Lmc.contract.Call(opts, &out, \"getUserAccumulatedReward\", _userAddress, tokenIndex)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_TrialRulesAbstract *TrialRulesAbstractSession) GetReward() (*big.Int, error) {\n\treturn _TrialRulesAbstract.Contract.GetReward(&_TrialRulesAbstract.CallOpts)\n}", "func (l *Linenoise) historyList() []string {\n\treturn l.history\n}", "func (t *MedChain) getHistory(stub shim.ChaincodeStubInterface, recordKey string) []byte {\n\n\t\tfmt.Printf(\"- start getHistory: %s\\n\", recordKey)\n\n\t\tresultsIterator, err := stub.GetHistoryForKey(recordKey)\n\t\tif err != nil {\n\t\t\terrors.New(err.Error())\n\t\t//\treturn shim.Error(err.Error())\n\t\t}\n\t\tdefer resultsIterator.Close()\n\n\t\t// buffer is a JSON array containing historic values for the key/value pair\n\t\tvar buffer bytes.Buffer\n\t\tbuffer.WriteString(\"[\")\n\n\t\tbArrayMemberAlreadyWritten := false\n\t\tfor resultsIterator.HasNext() {\n\t\t\tresponse, err := resultsIterator.Next()\n\t\t\tif err != nil {\n\t\t\t\terrors.New(err.Error())\n\t\t\t\t//return shim.Error(err.Error())\n\t\t\t}\n\t\t\t// Add a comma before array members, suppress it for the first array member\n\t\t\tif bArrayMemberAlreadyWritten == true {\n\t\t\t\tbuffer.WriteString(\",\")\n\t\t\t}\n\t\t\tbuffer.WriteString(\"{\\\"TxId\\\":\")\n\t\t\tbuffer.WriteString(\"\\\"\")\n\t\t\tbuffer.WriteString(response.TxId)\n\t\t\tbuffer.WriteString(\"\\\"\")\n\n\t\t\tbuffer.WriteString(\", \\\"Value\\\":\")\n\t\t\t// if it was a delete operation on given key, then we need to set the\n\t\t\t//corresponding value null. Else, we will write the response.Value\n\t\t\t//as-is (as the Value itself a JSON vehiclePart)\n\t\t\tif response.IsDelete {\n\t\t\t\tbuffer.WriteString(\"null\")\n\t\t\t} else {\n\t\t\t\tbuffer.WriteString(string(response.Value))\n\t\t\t}\n\n\t\t\tbuffer.WriteString(\", \\\"Timestamp\\\":\")\n\t\t\tbuffer.WriteString(\"\\\"\")\n\t\t\tbuffer.WriteString(time.Unix(response.Timestamp.Seconds, int64(response.Timestamp.Nanos)).String())\n\t\t\tbuffer.WriteString(\"\\\"\")\n\n\t\t\tbuffer.WriteString(\", \\\"IsDelete\\\":\")\n\t\t\tbuffer.WriteString(\"\\\"\")\n\t\t\tbuffer.WriteString(strconv.FormatBool(response.IsDelete))\n\t\t\tbuffer.WriteString(\"\\\"\")\n\n\t\t\tbuffer.WriteString(\"}\")\n\t\t\tbArrayMemberAlreadyWritten = true\n\t\t}\n\t\tbuffer.WriteString(\"]\")\n\n\t\tfmt.Printf(\"- getHistoryForRecord returning:\\n%s\\n\", buffer.String())\n\n\t\treturn buffer.Bytes()\n\t}", "func (i *historyItem) getHistory() ([]*historyRow, error) {\n\trows := []*historyRow{}\n\n\tfi, err := os.Open(i.path)\n\tif err != nil {\n\t\treturn rows, err\n\t}\n\tdefer fi.Close()\n\n\tbr := bufio.NewReader(fi)\n\tfor {\n\t\ta, _, c := br.ReadLine()\n\t\tif c == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tr := &historyRow{}\n\t\t// ignore\n\t\terr := json.Unmarshal(a, r)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\trows = append(rows, r)\n\t}\n\n\treturn rows, nil\n}", "func (p *Protocol) getAccountReward(epochNumber uint64, candidateName string) (*AccountReward, error) {\n\tdb := p.Store.GetDB()\n\n\tgetQuery := fmt.Sprintf(selectAccountReward,\n\t\tAccountRewardTableName)\n\tstmt, err := db.Prepare(getQuery)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to prepare get query\")\n\t}\n\tdefer stmt.Close()\n\n\trows, err := stmt.Query(epochNumber, candidateName)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to execute get query\")\n\t}\n\n\tvar accountReward AccountReward\n\tparsedRows, err := s.ParseSQLRows(rows, &accountReward)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to parse results\")\n\t}\n\n\tif len(parsedRows) == 0 {\n\t\treturn nil, indexprotocol.ErrNotExist\n\t}\n\n\tif len(parsedRows) > 1 {\n\t\treturn nil, errors.New(\"only one row is expected\")\n\t}\n\n\treturn parsedRows[0].(*AccountReward), nil\n}", "func (cc CoinCap) GetHistory(baseID, interval string, timeFrom, timeTo int64) (history []CCHistoryItem, err error) {\n\tbaseID = strings.ToLower(strings.Join(strings.Split(baseID, \" \"), \"-\"))\n\turl := fmt.Sprintf(\"%s/assets/%s/history?interval=%s&start=%d&end=%d\",\n\t\tcc.BaseURL, baseID, interval, timeFrom, timeTo)\n\tresponse, err := http.Get(url)\n\tif err != nil {\n\t\treturn\n\t}\n\tresult := struct {\n\t\tError string `json:\"error\"`\n\t\tData []CCHistoryItem `json:\"Data\"`\n\t}{}\n\terr = json.NewDecoder(response.Body).Decode(&result)\n\tif err != nil {\n\t\treturn\n\t}\n\tif result.Error != \"\" {\n\t\terr = errors.New(result.Error)\n\t\treturn\n\t}\n\thistory = result.Data\n\treturn\n}", "func (b *BinanceWorker) GetHistoryTrades(symbol string, start, end int64, number int) {\n\ttrades, err := b.Cli.NewAggTradesService().\n\t\tSymbol(symbol).StartTime(start).EndTime(end).\n\t\tDo(context.Background())\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tvar sum int = 0\n\tvar qul float64 = 0.\n\tvar allQul float64 = 0.\n\tfor _, t := range trades {\n\t\ti, _ := strconv.ParseFloat(t.Quantity, 64)\n\t\tallQul += i\n\t\tif t.IsBuyerMaker {\n\t\t\tsum++\n\t\t\tqul += i\n\t\t}\n\n\t}\n\n\tfmt.Println(float64(float64(sum)/float64(len(trades)))*100, float64(float64(qul)/float64(allQul))*100)\n\t// err = json.Unmarshal(jsonBlob, &rankings)\n\t// if err != nil {\n\t// \t// nozzle.printError(\"opening config file\", err.Error())\n\t// }\n\torders, err := json.Marshal(trades)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\t//fmt.Println(string(orders))\n\t// rankingsJson, _ := json.Marshal(rankings)\n\terr = ioutil.WriteFile(fmt.Sprintf(\"%d.json\", number), orders, 0644)\n\t// fmt.Printf(\"%+v\", rankings)\n}", "func (p *Protocol) updateRewardHistory(tx *sql.Tx, epochNumber uint64, actionHash string, rewardInfoMap map[string]*RewardInfo) error {\n\tvalStrs := make([]string, 0, len(rewardInfoMap))\n\tvalArgs := make([]interface{}, 0, len(rewardInfoMap)*7)\n\tfor rewardAddress, rewards := range rewardInfoMap {\n\t\tblockReward := rewards.BlockReward.String()\n\t\tepochReward := rewards.EpochReward.String()\n\t\tfoundationBonus := rewards.FoundationBonus.String()\n\n\t\tvar candidateName string\n\t\t// If more than one candidates share the same reward address, just use the first candidate as their delegate\n\t\tif len(p.RewardAddrToName[rewardAddress]) > 0 {\n\t\t\tcandidateName = p.RewardAddrToName[rewardAddress][0]\n\t\t}\n\n\t\tvalStrs = append(valStrs, \"(?, ?, ?, ?, CAST(? as DECIMAL(65, 0)), CAST(? as DECIMAL(65, 0)), CAST(? as DECIMAL(65, 0)))\")\n\t\tvalArgs = append(valArgs, epochNumber, actionHash, rewardAddress, candidateName, blockReward, epochReward, foundationBonus)\n\t}\n\tinsertQuery := fmt.Sprintf(insertRewardHistory, RewardHistoryTableName, strings.Join(valStrs, \",\"))\n\n\tif _, err := tx.Exec(insertQuery, valArgs...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (_Lmc *LmcCallerSession) GetUserRewardDebt(_userAddress common.Address, _index *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserRewardDebt(&_Lmc.CallOpts, _userAddress, _index)\n}", "func GetRideHistory(w http.ResponseWriter, r *http.Request) {\n\n\tw.Header().Set(\"Content-type\", \"application/json\")\n\tparams := mux.Vars(r)\n\tstrVar := params[\"id\"]\n\tCustomerid, _ := strconv.Atoi(strVar)\n\tvar customer models.Customer\n\tvar db1 *gorm.DB = db.Init()\n\tif err := db1.Where(\"customerid =? \", Customerid).Find(&customer).Error; err != nil {\n\t\thttp.Error(w, \"Customer has not registered!\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar customerhistory []models.Customerhistory\n\n\tif err := db1.Where(\"customerid= ? \", Customerid).Find(&customerhistory).Error; err != nil {\n\t\thttp.Error(w, \"Customer has not taken a ride yet!\", http.StatusBadRequest)\n\t\treturn\n\n\t}\n\tjson.NewEncoder(w).Encode(customerhistory)\n}", "func (i *Inspector) History(qname string, n int) ([]*DailyStats, error) {\n\tif err := base.ValidateQueueName(qname); err != nil {\n\t\treturn nil, err\n\t}\n\tstats, err := i.rdb.HistoricalStats(qname, n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar res []*DailyStats\n\tfor _, s := range stats {\n\t\tres = append(res, &DailyStats{\n\t\t\tQueue: s.Queue,\n\t\t\tProcessed: s.Processed,\n\t\t\tFailed: s.Failed,\n\t\t\tDate: s.Time,\n\t\t})\n\t}\n\treturn res, nil\n}", "func (_Lmc *LmcSession) GetUserRewardDebt(_userAddress common.Address, _index *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserRewardDebt(&_Lmc.CallOpts, _userAddress, _index)\n}", "func accumulateRewards(config *params.ChainConfig, state *state.DB, header *types.Header) {\n\t// TODO: implement mining rewards\n}", "func (k Querier) Rewards(c context.Context, req *types.QueryRewardsRequest) (*types.QueryRewardsResponse, error) {\n\tif req == nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"invalid request\")\n\t}\n\n\tif req.StakingCoinDenom != \"\" {\n\t\tif err := sdk.ValidateDenom(req.StakingCoinDenom); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tctx := sdk.UnwrapSDKContext(c)\n\tstore := ctx.KVStore(k.storeKey)\n\tvar rewards []types.Reward\n\tvar pageRes *query.PageResponse\n\tvar err error\n\n\tif req.Farmer != \"\" {\n\t\tvar farmerAcc sdk.AccAddress\n\t\tfarmerAcc, err = sdk.AccAddressFromBech32(req.Farmer)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tstorePrefix := types.GetRewardsByFarmerIndexKey(farmerAcc)\n\t\tindexStore := prefix.NewStore(store, storePrefix)\n\t\tpageRes, err = query.FilteredPaginate(indexStore, req.Pagination, func(key, value []byte, accumulate bool) (bool, error) {\n\t\t\t_, stakingCoinDenom := types.ParseRewardsByFarmerIndexKey(append(storePrefix, key...))\n\t\t\tif req.StakingCoinDenom != \"\" {\n\t\t\t\tif stakingCoinDenom != req.StakingCoinDenom {\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treward, found := k.GetReward(ctx, stakingCoinDenom, farmerAcc)\n\t\t\tif !found { // TODO: remove this check\n\t\t\t\treturn false, fmt.Errorf(\"reward not found\")\n\t\t\t}\n\t\t\tif accumulate {\n\t\t\t\trewards = append(rewards, reward)\n\t\t\t}\n\t\t\treturn true, nil\n\t\t})\n\t} else {\n\t\tvar storePrefix []byte\n\t\tif req.StakingCoinDenom != \"\" {\n\t\t\tstorePrefix = types.GetRewardsByStakingCoinDenomKey(req.StakingCoinDenom)\n\t\t} else {\n\t\t\tstorePrefix = types.RewardKeyPrefix\n\t\t}\n\t\trewardStore := prefix.NewStore(store, storePrefix)\n\n\t\tpageRes, err = query.Paginate(rewardStore, req.Pagination, func(key, value []byte) error {\n\t\t\tstakingCoinDenom, farmerAcc := types.ParseRewardKey(append(storePrefix, key...))\n\t\t\trewardCoins, err := k.UnmarshalRewardCoins(value)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trewards = append(rewards, types.Reward{\n\t\t\t\tFarmer: farmerAcc.String(),\n\t\t\t\tStakingCoinDenom: stakingCoinDenom,\n\t\t\t\tRewardCoins: rewardCoins.RewardCoins,\n\t\t\t})\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\treturn &types.QueryRewardsResponse{Rewards: rewards, Pagination: pageRes}, nil\n}", "func (l *Linenoise) historyGet(idx int) string {\n\treturn l.history[len(l.history)-1-idx]\n}", "func (s *SmartContract) getLCHistory(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {\n \n\t lcId := args[0];\n \n\t resultsIterator, err := APIstub.GetHistoryForKey(lcId)\n\t if err != nil {\n\t\t return shim.Error(\"Error retrieving LC history.\")\n\t }\n\t defer resultsIterator.Close()\n \n\t // buffer is a JSON array containing historic values for the marble\n\t var buffer bytes.Buffer\n\t buffer.WriteString(\"[\")\n \n\t bArrayMemberAlreadyWritten := false\n\t for resultsIterator.HasNext() {\n\t\t response, err := resultsIterator.Next()\n\t\t if err != nil {\n\t\t\t return shim.Error(\"Error retrieving LC history.\")\n\t\t }\n\t\t // Add a comma before array members, suppress it for the first array member\n\t\t if bArrayMemberAlreadyWritten == true {\n\t\t\t buffer.WriteString(\",\")\n\t\t }\n\t\t buffer.WriteString(\"{\\\"TxId\\\":\")\n\t\t buffer.WriteString(\"\\\"\")\n\t\t buffer.WriteString(response.TxId)\n\t\t buffer.WriteString(\"\\\"\")\n \n\t\t buffer.WriteString(\", \\\"Value\\\":\")\n\t\t // if it was a delete operation on given key, then we need to set the\n\t\t //corresponding value null. Else, we will write the response.Value\n\t\t //as-is (as the Value itself a JSON marble)\n\t\t if response.IsDelete {\n\t\t\t buffer.WriteString(\"null\")\n\t\t } else {\n\t\t\t buffer.WriteString(string(response.Value))\n\t\t }\n \n\t\t buffer.WriteString(\", \\\"Timestamp\\\":\")\n\t\t buffer.WriteString(\"\\\"\")\n\t\t buffer.WriteString(time.Unix(response.Timestamp.Seconds, int64(response.Timestamp.Nanos)).String())\n\t\t buffer.WriteString(\"\\\"\")\n \n\t\t buffer.WriteString(\", \\\"IsDelete\\\":\")\n\t\t buffer.WriteString(\"\\\"\")\n\t\t buffer.WriteString(strconv.FormatBool(response.IsDelete))\n\t\t buffer.WriteString(\"\\\"\")\n \n\t\t buffer.WriteString(\"}\")\n\t\t bArrayMemberAlreadyWritten = true\n\t }\n\t buffer.WriteString(\"]\")\n \n\t fmt.Printf(\"- getLCHistory returning:\\n%s\\n\", buffer.String())\n \n\t return shim.Success(buffer.Bytes())\n }", "func GetReward(a Action, feedback Action) float64 {\n\tif a == feedback {\n\t\treturn 1\n\t}\n\treturn -1\n}", "func (s *Client) GetHistory(username string) (*sessions.History, error) {\n\tdata := &sessions.History{\n\t\tInput: []string{},\n\t\tReply: []string{},\n\t}\n\n\tfor i := 0; i < sessions.HistorySize; i++ {\n\t\tdata.Input = append(data.Input, \"undefined\")\n\t\tdata.Reply = append(data.Reply, \"undefined\")\n\t}\n\n\trows, err := s.db.Query(\"SELECT input,reply FROM history WHERE user_id = (SELECT id FROM users WHERE username = ?) ORDER BY timestamp ASC LIMIT 10;\", username)\n\tif err != nil {\n\t\treturn data, err\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar input, reply string\n\t\terr := rows.Scan(&input, &reply)\n\t\tif err != nil {\n\t\t\tlog.Println(\"[ERROR]\", err)\n\t\t\tcontinue\n\t\t}\n\t\tdata.Input = data.Input[:len(data.Input)-1] // Pop\n\t\tdata.Input = append([]string{strings.TrimSpace(input)}, data.Input...) // Unshift\n\t\tdata.Reply = data.Reply[:len(data.Reply)-1] // Pop\n\t\tdata.Reply = append([]string{strings.TrimSpace(reply)}, data.Reply...) // Unshift\n\n\t}\n\n\treturn data, nil\n}", "func (path *Path) Rewards() map[*Reward]int {\n\treturn path.rewards\n}", "func (o *Convergence) AccessHistory() *History {\n\treturn o.Hist\n}", "func getHistory(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\ttype AuditHistory struct {\n\t\tTxId string `json:\"txId\"`\n\t\tValue Listing `json:\"value\"`\n\t}\n\tvar history []AuditHistory\n\tvar listing Listing\n\n\tif len(args) != 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\tlistingId := args[0]\n\tfmt.Printf(\"- start getHistoryForListing: %s\\n\", listingId)\n\n\t// Get History\n\tresultsIterator, err := stub.GetHistoryForKey(listingId)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tdefer resultsIterator.Close()\n\n\tfor resultsIterator.HasNext() {\n\t\thistoryData, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\n\t\tvar tx AuditHistory\n\t\ttx.TxId = historyData.TxId //copy transaction id over\n\t\tjson.Unmarshal(historyData.Value, &listing) //un stringify it aka JSON.parse()\n\t\tif historyData.Value == nil { //listing has been deleted\n\t\t\tvar emptyListing Listing\n\t\t\ttx.Value = emptyListing //copy nil listing\n\t\t} else {\n\t\t\tjson.Unmarshal(historyData.Value, &listing) //un stringify it aka JSON.parse()\n\t\t\ttx.Value = listing //copy listing over\n\t\t}\n\t\thistory = append(history, tx) //add this tx to the list\n\t}\n\tfmt.Printf(\"- getHistoryForListing returning:\\n%s\", history)\n\n\t//change to array of bytes\n\thistoryAsBytes, _ := json.Marshal(history) //convert to array of bytes\n\treturn shim.Success(historyAsBytes)\n}", "func (t *BenchmarkerChaincode) GetHistoryForKey(stub shim.ChaincodeStubInterface, seed, keySizeLo, keySizeHi int) pb.Response {\n\tvar (\n\t\tkm NoopKeyMapper\n\t)\n\n\tkey := km.GetKeys(seed, 1, keySizeLo, keySizeHi)[0]\n\n\tresultsIterator, err := stub.GetHistoryForKey(key)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tdefer resultsIterator.Close()\n\n\tfmt.Printf(\"GetHistoryForKey: Getting history for key '%s'\\n\", key)\n\n\t// buffer is a JSON array containing historic values for the marble\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"[\")\n\n\tbArrayMemberAlreadyWritten := false\n\tfor resultsIterator.HasNext() {\n\t\tresponse, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t\t// Add a comma before array members, suppress it for the first array member\n\t\tif bArrayMemberAlreadyWritten == true {\n\t\t\tbuffer.WriteString(\",\")\n\t\t}\n\t\tbuffer.WriteString(\"{\\\"TxId\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(response.TxId)\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\", \\\"Value\\\":\")\n\t\t// if it was a delete operation on given key, then we need to set the\n\t\t//corresponding value null. Else, we will write the response.Value\n\t\t//as-is (as the Value itself a JSON marble)\n\t\tif response.IsDelete {\n\t\t\tbuffer.WriteString(\"null\")\n\t\t} else {\n\t\t\tbuffer.WriteString(string(response.Value))\n\t\t}\n\n\t\tbuffer.WriteString(\", \\\"Timestamp\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(time.Unix(response.Timestamp.Seconds, int64(response.Timestamp.Nanos)).String())\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\", \\\"IsDelete\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(strconv.FormatBool(response.IsDelete))\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\"}\")\n\t\tbArrayMemberAlreadyWritten = true\n\t}\n\tbuffer.WriteString(\"]\")\n\n\treturn shim.Success(buffer.Bytes())\n}", "func (h *MethodCallHistory) GetHistory() []*MethodCall {\n\tdefer h.RUnlock()\n\th.RLock()\n\treturn h.history\n}", "func (s *SimpleChaincode) pat_gethistory(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tif len(args) < 2 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 3\")\n\t}\n\n\tpatKey := args[1]\n\tfmt.Printf(\"##### start History of Record: %s\\n\", patKey)\n\n\tresultsIterator, err := stub.GetHistoryForKey(patKey)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tdefer resultsIterator.Close()\n\n\t// buffer is a JSON array containing historic values for the marble\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"[\")\n\n\tbArrayMemberAlreadyWritten := false\n\tfor resultsIterator.HasNext() {\n\t\tresponse, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t\t// Add a comma before array members, suppress it for the first array member\n\t\tif bArrayMemberAlreadyWritten == true {\n\t\t\tbuffer.WriteString(\",\")\n\t\t}\n\t\tbuffer.WriteString(\"{\\\"TxId\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(response.TxId)\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\", \\\"Value\\\":\")\n\t\t// if it was a delete operation on given key, then we need to set the\n\t\t//corresponding value null. Else, we will write the response.Value\n\t\t//as-is (as the Value itself a JSON marble)\n\t\tif response.IsDelete {\n\t\t\tbuffer.WriteString(\"null\")\n\t\t} else {\n\t\t\tbuffer.WriteString(string(response.Value))\n\t\t}\n\n\t\tbuffer.WriteString(\", \\\"Timestamp\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(time.Unix(response.Timestamp.Seconds, int64(response.Timestamp.Nanos)).String())\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\", \\\"IsDelete\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(strconv.FormatBool(response.IsDelete))\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\"}\")\n\t\tbArrayMemberAlreadyWritten = true\n\t}\n\tbuffer.WriteString(\"]\")\n\n\tfmt.Printf(\"- getHistoryForPatient returning:\\n%s\\n\", buffer.String())\n\n\treturn shim.Success(buffer.Bytes())\n}", "func (m *MemoryRewardStorage) GetAll() []rewards.Reward {\n\treturn m.rewards\n}", "func (c *BalanceClient) History() (*BalanceTransactionListResponse, error) {\n\treturn c.HistoryWithFilters(Filters{})\n}", "func GetHistory(s *aklib.DBConfig) ([]*History, error) {\n\tvar hist []*History\n\treturn hist, s.DB.View(func(txn *badger.Txn) error {\n\t\terr := db.Get(txn, nil, &hist, db.HeaderWalletHistory)\n\t\tif err == badger.ErrKeyNotFound {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t})\n}", "func (s *Server) History(w http.ResponseWriter, r *http.Request) {\n\tteam, err := s.currentTeam(w, r)\n\tif err != nil {\n\t\ts.unauthorized(w, err)\n\t\treturn\n\t}\n\n\tscores, err := models.NewScore().History(team.ID)\n\tif err != nil {\n\t\tError(w, http.StatusNotFound, err, \"failed to get history\")\n\t}\n\tJSON(w, http.StatusOK, scores)\n}", "func (f *PolicyServiceGetRetentionPolicyOverviewFunc) History() []PolicyServiceGetRetentionPolicyOverviewFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]PolicyServiceGetRetentionPolicyOverviewFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}", "func (s *server) GetHistory(ctx context.Context, req *transactionpb.GetHistoryRequest) (*transactionpb.GetHistoryResponse, error) {\n\tlog := logrus.WithFields(logrus.Fields{\n\t\t\"method\": \"GetHistory\",\n\t\t\"account\": base64.StdEncoding.EncodeToString(req.AccountId.Value),\n\t})\n\n\titems, err := s.loader.getItems(ctx, req.AccountId.Value, req.Cursor, req.Direction)\n\tif err != nil {\n\t\tlog.WithError(err).Warn(\"failed to get history transactions\")\n\t\treturn nil, status.Error(codes.Internal, \"failed to get transactions\")\n\t}\n\n\treturn &transactionpb.GetHistoryResponse{\n\t\tResult: transactionpb.GetHistoryResponse_OK,\n\t\tItems: items,\n\t}, nil\n}", "func (s *State) GetHistory(height uint32) (*StateKeyFrame, error) {\n\ts.mtx.RLock()\n\tdefer s.mtx.RUnlock()\n\n\t// Seek to state to target height.\n\tif err := s.history.SeekTo(height); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Take a snapshot of the history.\n\treturn s.snapshot(), nil\n}", "func (m *MemoryRewardStorage) Get(id int) (rewards.Reward, error) {\n\tvar reward rewards.Reward\n\n\tfor _, r := range m.rewards {\n\t\tif r.ID == id {\n\t\t\treturn r, nil\n\t\t}\n\t}\n\n\treturn reward, rewards.ErrNotFound\n}", "func (g *Generator) History() []ispec.History {\n\tcopy := []ispec.History{}\n\tfor _, v := range g.image.History {\n\t\tcopy = append(copy, v)\n\t}\n\treturn copy\n}", "func (f *AutoIndexingServiceNumRepositoriesWithCodeIntelligenceFunc) History() []AutoIndexingServiceNumRepositoriesWithCodeIntelligenceFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]AutoIndexingServiceNumRepositoriesWithCodeIntelligenceFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}", "func GetHistory(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\ttype KeyModificationWrapper struct {\n\t\tRealValue interface{} `json:\"InterfaceValue\"`\n\t\tTx queryresult.KeyModification\n\t}\n\tvar sliceReal []KeyModificationWrapper\n\n\tvar history []queryresult.KeyModification\n\tvar value interface{}\n\n\tif len(args) != 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\tkey := args[0]\n\tfmt.Printf(\"- start GetHistory: %s\\n\", key)\n\n\t// Get History\n\tresultsIterator, err := stub.GetHistoryForKey(key)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tdefer resultsIterator.Close()\n\n\tfor resultsIterator.HasNext() {\n\t\thistoryData, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\n\t\tvar singleReal KeyModificationWrapper\n\t\tvar tx queryresult.KeyModification\n\t\tsingleReal.Tx.TxId = historyData.TxId //copy transaction id over\n\t\tjson.Unmarshal(historyData.Value, &value) //un stringify it aka JSON.parse()\n\t\tif historyData.Value == nil { //value has been deleted\n\t\t\tvar emptyBytes []byte\n\t\t\tsingleReal.Tx.Value = emptyBytes //copy nil value\n\t\t} else {\n\t\t\tjson.Unmarshal(historyData.Value, &value) //un stringify it aka JSON.parse()\n\t\t\tsingleReal.Tx.Value = historyData.Value //copy value over\n\t\t\tsingleReal.Tx.Timestamp = historyData.Timestamp\n\t\t\tsingleReal.Tx.IsDelete = historyData.IsDelete\n\t\t\tsingleReal.RealValue = value\n\t\t}\n\t\thistory = append(history, tx) //add this Tx to the list\n\t\tsliceReal = append(sliceReal, singleReal)\n\t}\n\t// fmt.Printf(\"- getHistoryForService returning:\\n%s\", history)\n\tPrettyPrintHistory(history)\n\n\t//change to array of bytes\n\t// historyAsBytes, _ := json.Marshal(history) //convert to array of bytes\n\n\trealAsBytes, _ := json.Marshal(sliceReal)\n\treturn shim.Success(realAsBytes)\n}", "func getLiftHistory(c *echo.Context) error {\n\tid, err := strconv.ParseUint(c.Param(\"ID\"), 10, 64)\n\tif err != nil {\n\t\treturn jsonResp(c, err.Error())\n\t}\n\tlift := LiftHistory{DeviceID: c.Param(\"DeviceID\"), ID: id}\n\n\thas, err := g_engine.Get(&lift)\n\tif err != nil {\n\t\treturn jsonResp(c, err.Error())\n\t}\n\tif !has {\n\t\treturn jsonResp(c, \"not exists\")\n\t}\n\n\treturn c.JSON(http.StatusOK, lift)\n}", "func (k Keeper) GetReward(ctx sdk.Context, stakingCoinDenom string, farmerAcc sdk.AccAddress) (reward types.Reward, found bool) {\n\tstore := ctx.KVStore(k.storeKey)\n\tbz := store.Get(types.GetRewardKey(stakingCoinDenom, farmerAcc))\n\tif bz == nil {\n\t\treturn reward, false\n\t}\n\tvar rewardCoins types.RewardCoins\n\tk.cdc.MustUnmarshal(bz, &rewardCoins)\n\treturn types.Reward{\n\t\tFarmer: farmerAcc.String(),\n\t\tStakingCoinDenom: stakingCoinDenom,\n\t\tRewardCoins: rewardCoins.RewardCoins,\n\t}, true\n}", "func (f *ReleaseStoreGetLatestBatchFunc) History() []ReleaseStoreGetLatestBatchFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]ReleaseStoreGetLatestBatchFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}", "func (f *PipelineRunFunc) History() []PipelineRunFuncCall {\n\treturn f.history\n}", "func (_Token *TokenCallerSession) BaseRewardHistoryLength() (*big.Int, error) {\n\treturn _Token.Contract.BaseRewardHistoryLength(&_Token.CallOpts)\n}", "func (sc *LoanMarketShareContract) History(ctx contractapi.TransactionContextInterface, key string) ([]LoanMarketShareHistory, error) {\n\n\titer, err := ctx.GetStub().GetHistoryForKey(key)\n\tif err != nil {\n return nil, err\n\t}\n\tdefer func() { _ = iter.Close() }()\n\n\tvar results []LoanMarketShareHistory\n\tfor iter.HasNext() {\n\t\tstate, err := iter.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tentryObj := new(LoanMarketShareObj)\n\t\tif errNew := json.Unmarshal(state.Value, entryObj); errNew != nil {\n\t\t\treturn nil, errNew\n\t\t}\n\n\t\tentry := LoanMarketShareHistory{\n\t\t\tTxID:\t\tstate.GetTxId(),\n\t\t\tTimestamp:\ttime.Unix(state.GetTimestamp().GetSeconds(), 0),\n\t\t\tLoanMarketShare:\tentryObj,\n\t\t}\n\n\t\tresults = append(results, entry)\n\t}\n\treturn results, nil\n}", "func (s *BlocksService) Reward(ctx context.Context) (*BlocksReward, *http.Response, error) {\n\tvar responseStruct *BlocksReward\n\tresp, err := s.client.SendRequest(ctx, \"GET\", \"blocks/getReward\", nil, &responseStruct)\n\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn responseStruct, resp, err\n}", "func (c4 *Connect4) GetReward() int {\n\tif c4.Winner == nil {\n\t\treturn 0\n\t} else if *c4.Winner == 1 {\n\t\treturn 1\n\t}\n\treturn -1\n}", "func (f *ReleaseStoreGetArtifactsFunc) History() []ReleaseStoreGetArtifactsFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]ReleaseStoreGetArtifactsFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}", "func (c *gRPCClient) AccountRewards(address gosmtypes.Address, offset uint32, maxResults uint32) ([]*apitypes.Reward, uint32, error) {\n\tgsc := c.getGlobalStateServiceClient()\n\tresp, err := gsc.AccountDataQuery(context.Background(), &apitypes.AccountDataQueryRequest{\n\t\tFilter: &apitypes.AccountDataFilter{\n\t\t\tAccountId: &apitypes.AccountId{Address: address.Bytes()},\n\t\t\tAccountDataFlags: uint32(apitypes.AccountDataFlag_ACCOUNT_DATA_FLAG_REWARD),\n\t\t},\n\n\t\tMaxResults: maxResults,\n\t\tOffset: offset,\n\t})\n\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\trewards := make([]*apitypes.Reward, 0)\n\n\tfor _, data := range resp.AccountItem {\n\t\tr := data.GetReward()\n\t\tif r != nil {\n\t\t\trewards = append(rewards, r)\n\t\t}\n\t}\n\n\treturn rewards, resp.TotalResults, nil\n}", "func getHistory(ctx context.Context, args *map[string]interface{}) error {\n\tvar resultRows []Operation\n\n\tha, ok := (*args)[\"historyArgs\"].(historyArgs)\n\tif !ok {\n\t\treturn fmt.Errorf(\"unable to cast arguments to type historyArgs\")\n\t}\n\n\texportType := ha.exportType\n\n\t// Get everything from the fhir_endpoints_info_history table for the given URL\n\n\tvar selectHistory string\n\tif exportType == \"month\" {\n\t\tselectHistory = `\n\t\tSELECT fhir_endpoints_info_history.url, fhir_endpoints_metadata.http_response, fhir_endpoints_metadata.response_time_seconds, fhir_endpoints_metadata.errors,\n\t\tcapability_statement, tls_version, mime_types, operation_resource,\n\t\tfhir_endpoints_metadata.smart_http_response, smart_response, fhir_endpoints_info_history.updated_at, capability_fhir_version\n\t\tFROM fhir_endpoints_info_history, fhir_endpoints_metadata\n\t\tWHERE fhir_endpoints_info_history.metadata_id = fhir_endpoints_metadata.id AND fhir_endpoints_info_history.url=$1 AND (date_trunc('month', fhir_endpoints_info_history.updated_at) = date_trunc('month', current_date - INTERVAL '1 month'))\n\t\tORDER BY fhir_endpoints_info_history.updated_at DESC;`\n\t} else if exportType == \"30days\" {\n\t\tselectHistory = `\n\t\tSELECT fhir_endpoints_info_history.url, fhir_endpoints_metadata.http_response, fhir_endpoints_metadata.response_time_seconds, fhir_endpoints_metadata.errors,\n\t\tcapability_statement, tls_version, mime_types, operation_resource,\n\t\tfhir_endpoints_metadata.smart_http_response, smart_response, fhir_endpoints_info_history.updated_at, capability_fhir_version\n\t\tFROM fhir_endpoints_info_history, fhir_endpoints_metadata\n\t\tWHERE fhir_endpoints_info_history.metadata_id = fhir_endpoints_metadata.id AND fhir_endpoints_info_history.url=$1 AND (date_trunc('day', fhir_endpoints_info_history.updated_at) >= date_trunc('day', current_date - INTERVAL '30 day'))\n\t\tORDER BY fhir_endpoints_info_history.updated_at DESC;`\n\t} else if exportType == \"all\" {\n\t\tselectHistory = `\n\t\tSELECT fhir_endpoints_info_history.url, fhir_endpoints_metadata.http_response, fhir_endpoints_metadata.response_time_seconds, fhir_endpoints_metadata.errors,\n\t\tcapability_statement, tls_version, mime_types, operation_resource,\n\t\tfhir_endpoints_metadata.smart_http_response, smart_response, fhir_endpoints_info_history.updated_at, capability_fhir_version\n\t\tFROM fhir_endpoints_info_history, fhir_endpoints_metadata\n\t\tWHERE fhir_endpoints_info_history.metadata_id = fhir_endpoints_metadata.id AND fhir_endpoints_info_history.url=$1\n\t\tORDER BY fhir_endpoints_info_history.updated_at DESC;`\n\t}\n\n\thistoryRows, err := ha.store.DB.QueryContext(ctx, selectHistory, ha.fhirURL)\n\tif err != nil {\n\t\tlog.Warnf(\"Failed getting the history rows for URL %s. Error: %s\", ha.fhirURL, err)\n\t\tresult := Result{\n\t\t\tURL: ha.fhirURL,\n\t\t\tRows: resultRows,\n\t\t}\n\t\tha.result <- result\n\t\treturn nil\n\t}\n\n\t// Puts the rows in an array and sends it back on the channel to be processed\n\tdefer historyRows.Close()\n\tfor historyRows.Next() {\n\t\tvar op Operation\n\t\tvar url string\n\t\tvar capStat []byte\n\t\tvar smartRsp []byte\n\t\tvar opRes []byte\n\t\terr = historyRows.Scan(\n\t\t\t&url,\n\t\t\t&op.HTTPResponse,\n\t\t\t&op.HTTPResponseTimeSecond,\n\t\t\t&op.Errors,\n\t\t\t&capStat,\n\t\t\t&op.TLSVersion,\n\t\t\tpq.Array(&op.MIMETypes),\n\t\t\t&opRes,\n\t\t\t&op.SMARTHTTPResponse,\n\t\t\t&smartRsp,\n\t\t\t&op.UpdatedAt,\n\t\t\t&op.FHIRVersion)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Error while scanning the rows of the history table for URL %s. Error: %s\", ha.fhirURL, err)\n\t\t\tresult := Result{\n\t\t\t\tURL: ha.fhirURL,\n\t\t\t\tRows: resultRows,\n\t\t\t}\n\t\t\tha.result <- result\n\t\t\treturn nil\n\t\t}\n\n\t\top.SMARTResponse = getSMARTResponse(smartRsp)\n\t\top.SupportedResources = getSupportedResources(opRes)\n\n\t\tresultRows = append(resultRows, op)\n\t}\n\tresult := Result{\n\t\tURL: ha.fhirURL,\n\t\tRows: resultRows,\n\t}\n\tha.result <- result\n\treturn nil\n}", "func getHistory(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\nfmt.Println(\"************************ READ HISTORY ************************* \")\nfmt.Printf(\"- args value: %s\\n\", args)\nfmt.Printf(\"- len(args) value: %s\\n\", len(args))\n\n\ttype AuditHistory struct {\n\t\tTxId string `json:\"txId\"`\n\t\tValue mystruct `json:\"value\"`\n\t}\n\tvar history []AuditHistory;\n\tvar mys mystruct\n\n\t\t\n\tmode1serial := strings.Join(args,\"\")\n\tfmt.Printf(\"- start getHistoryForModel_SerialNumber Combination: %s\\n\", mode1serial)\n\n\t// Get History\n\tresultsIterator, err := stub.GetHistoryForKey(mode1serial)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tdefer resultsIterator.Close()\nfmt.Printf(\"resultsIterator.HasNext()\\n%s\", resultsIterator.HasNext())\n\n\tfor resultsIterator.HasNext() {\n\tfmt.Printf(\"Inside loop\\n%s\")\n\t\thistoryData, err := resultsIterator.Next()\n\t\tfmt.Printf(\"Inside loop : \\n%s\", historyData)\n\t\t\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\n\t\tvar tx AuditHistory\n\t\ttx.TxId = historyData.TxId //copy transaction id over\n\t\t\tfmt.Printf(\"Inside loop historyData.TxId : \\n%s\", historyData.TxId )\n\t\tjson.Unmarshal(historyData.Value, &mys) //un stringify it aka JSON.parse()\n\t\tif historyData.Value == nil { //product has been deleted\n\t\t\tvar emptymys mystruct\n\t\t\ttx.Value = emptymys //copy nil product\n\t\t} else {\n\t\t\tjson.Unmarshal(historyData.Value, &mys) //un stringify it aka JSON.parse()\n\t\t\ttx.Value = mys //copy product over\n\t\t}\n\t\thistory = append(history, tx) //add this tx to the list\n\t}\n\tfmt.Printf(\"- getHistoryForProduct returning:\\n%s\", history)\n\n\t//change to array of bytes\n\thistoryAsBytes, _ := json.Marshal(history) //convert to array of bytes\n\tfmt.Printf(\"- getHistoryForProduct returning historyAsBytes:\\n%s\", historyAsBytes)\n\treturn shim.Success(historyAsBytes)\n}", "func (f *ResolverQueryResolverFunc) History() []ResolverQueryResolverFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]ResolverQueryResolverFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}", "func (_Lmc *LmcCaller) GetUserRewardDebt(opts *bind.CallOpts, _userAddress common.Address, _index *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Lmc.contract.Call(opts, &out, \"getUserRewardDebt\", _userAddress, _index)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func GetHistory(store Store, datatype, realm, user, id string, r *http.Request) (*cpb.History, int, error) {\n\thlist := make([]proto.Message, 0)\n\tif err := store.ReadHistory(datatype, realm, user, id, &hlist); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\t// TODO: perhaps this should be the empty list?\n\t\t\treturn nil, http.StatusBadRequest, fmt.Errorf(\"no config history available\")\n\t\t}\n\t\treturn nil, http.StatusBadRequest, fmt.Errorf(\"service storage unavailable: %v, retry later\", err)\n\t}\n\the := make([]*cpb.HistoryEntry, len(hlist))\n\tvar ok bool\n\tfor i, e := range hlist {\n\t\the[i], ok = e.(*cpb.HistoryEntry)\n\t\tif !ok {\n\t\t\treturn nil, http.StatusInternalServerError, fmt.Errorf(\"cannot load history entry %d\", i)\n\t\t}\n\t}\n\thistory := &cpb.History{\n\t\tHistory: he,\n\t}\n\tpageToken := r.URL.Query().Get(\"pageToken\")\n\tstart, err := strconv.ParseInt(pageToken, 10, 64)\n\tif err != nil {\n\t\tstart = 0\n\t}\n\n\tpageSize := r.URL.Query().Get(\"pageSize\")\n\tsize, err := strconv.ParseInt(pageSize, 10, 64)\n\tif err != nil || size < 1 {\n\t\tsize = 50\n\t}\n\tif size > 1000 {\n\t\tsize = 1000\n\t}\n\t// Reverse order\n\ta := history.History\n\tfor i := len(a)/2 - 1; i >= 0; i-- {\n\t\topp := len(a) - 1 - i\n\t\ta[i], a[opp] = a[opp], a[i]\n\t}\n\n\tfor i, entry := range history.History {\n\t\tif entry.Revision <= start {\n\t\t\thistory.History = history.History[i:]\n\t\t\tbreak\n\t\t}\n\t}\n\tif len(history.History) > int(size) {\n\t\thistory.NextPageToken = fmt.Sprintf(\"%d\", history.History[size].Revision)\n\t\thistory.History = history.History[:size]\n\t}\n\treturn history, http.StatusOK, nil\n}", "func (_Token *TokenSession) BaseRewardHistoryLength() (*big.Int, error) {\n\treturn _Token.Contract.BaseRewardHistoryLength(&_Token.CallOpts)\n}", "func (th *translationHandler) GetHistory(w http.ResponseWriter, r *http.Request) {\n\t// All and all this part could have been done better\n\t// with TreeMap but I decided to stick with this custom solution\n\tsort.Strings(th.history.englishWordKeys)\n\tresultArray := []map[string]string{}\n\n\tfor _, key := range th.history.englishWordKeys {\n\t\tresultArray = append(resultArray, th.history.db[key])\n\t}\n\n\tth.history.WordHistory = resultArray\n\tth.codec.Encode(w, th.history)\n}", "func (l *Linenoise) historyNext(ls *linestate) string {\n\tif len(l.history) == 0 {\n\t\treturn \"\"\n\t}\n\t// update the current history entry with the line buffer\n\tl.historySet(ls.historyIndex, ls.String())\n\tls.historyIndex--\n\t// next history item\n\tif ls.historyIndex < 0 {\n\t\tls.historyIndex = 0\n\t}\n\treturn l.historyGet(ls.historyIndex)\n}", "func (f *ReleaseStoreTransactFunc) History() []ReleaseStoreTransactFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]ReleaseStoreTransactFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}", "func (sc *LoanDocContract) History(ctx contractapi.TransactionContextInterface, key string) ([]LoanDocHistory, error) {\n\n\titer, err := ctx.GetStub().GetHistoryForKey(key)\n\tif err != nil {\n return nil, err\n\t}\n\tdefer func() { _ = iter.Close() }()\n\n\tvar results []LoanDocHistory\n\tfor iter.HasNext() {\n\t\tstate, err := iter.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tentryObj := new(LoanDocObj)\n\t\tif errNew := json.Unmarshal(state.Value, entryObj); errNew != nil {\n\t\t\treturn nil, errNew\n\t\t}\n\n\t\tentry := LoanDocHistory{\n\t\t\tTxID:\t\tstate.GetTxId(),\n\t\t\tTimestamp:\ttime.Unix(state.GetTimestamp().GetSeconds(), 0),\n\t\t\tLoanDoc:\tentryObj,\n\t\t}\n\n\t\tresults = append(results, entry)\n\t}\n\treturn results, nil\n}", "func History(ctx context.Context, nameOrID string, options *HistoryOptions) ([]*types.HistoryResponse, error) {\n\tif options == nil {\n\t\toptions = new(HistoryOptions)\n\t}\n\t_ = options\n\tvar history []*types.HistoryResponse\n\tconn, err := bindings.GetClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresponse, err := conn.DoRequest(ctx, nil, http.MethodGet, \"/images/%s/history\", nil, nil, nameOrID)\n\tif err != nil {\n\t\treturn history, err\n\t}\n\tdefer response.Body.Close()\n\n\treturn history, response.Process(&history)\n}", "func GetHistoryRecords() ([]string, error) {\n\trecords := []string{}\n\n\t// \tGet bash history records\n\tbashRecords, err := getBashHistoryRecords()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trecords = append(records, bashRecords...)\n\n\t//Get zsh history records\n\tzshRecords, err := getZshHistoryRecords()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trecords = append(records, zshRecords...)\n\treturn records, nil\n}", "func (f *ReleaseStoreGetLatestFunc) History() []ReleaseStoreGetLatestFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]ReleaseStoreGetLatestFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}", "func (f *PipelineAddFunc) History() []PipelineAddFuncCall {\n\treturn f.history\n}", "func (f *AutoIndexingServiceGetUnsafeDBFunc) History() []AutoIndexingServiceGetUnsafeDBFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]AutoIndexingServiceGetUnsafeDBFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}", "func (client SmartGroupsClient) GetHistoryResponder(resp *http.Response) (result SmartGroupModification, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (self* userRestAPI) flightHistory(w http.ResponseWriter, r *http.Request) {\n\n\t// Read arguments\n\tband,number,err := self.extractBandAndNumber(r)\n\tif err != nil {\n\t\tlogError(err)\n\t\thttp.Error(w, fmt.Sprintf(\"\\nFailed to parse arguments '%s'\\n\",err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Retrieve history for specified traveller\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t_,history,err := self.engine.TripHistoryAsJSON(band,number)\n\tif err != nil {\n\t\tlogError(err)\n\t\thttp.Error(w, fmt.Sprintf(\"\\nFailed to retrieve flight history with error '%s'\\n\",err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tio.WriteString(w,history)\n\n}", "func (rl *readline) ReadHistory() error {\n\tf, err := os.Open(rl.historyFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\t_, err = rl.State.ReadHistory(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func ExampleSmartGroupsClient_GetHistory() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclient, err := armalertsmanagement.NewSmartGroupsClient(\"9e261de7-c804-4b9d-9ebf-6f50fe350a9a\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := client.GetHistory(ctx,\n\t\t\"a808445e-bb38-4751-85c2-1b109ccc1059\",\n\t\tnil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// TODO: use response item\n\t_ = res\n}", "func viewHistory(board *chess.Board, includeEval bool, resultString string) {\n\tvar input rune\n\tindex := len(board.History) - 1\n\ttempBoard := board\n\treader := bufio.NewReader(os.Stdin)\n\tfor {\n\t\t//fmt.Print(\"\\033[1;1H\")\n\t\t//fmt.Print(\"\\033[0J\")\n\t\ttempBoard.PrintBoard(false)\n\t\tif includeEval {\n\t\t\tfmt.Println(\"Basic: \", eval.EvaluateBasic(tempBoard), \" With tables: \", eval.EvaluateWithTables(tempBoard))\n\t\t}\n\t\tfmt.Println(\"Options: a: move back one ply, d: move backward one ply, q: quit\")\n\t\tif index == -1 {\n\t\t\tfmt.Println(\"Beginning of game!\")\n\t\t} else if index == len(board.History)-1 {\n\t\t\tfmt.Println(\"End of game!\")\n\t\t\tfmt.Println(resultString)\n\t\t}\n\t\tinput, _, _ = reader.ReadRune()\n\t\tif input == 'q' {\n\t\t\tquit(board)\n\t\t} else if input == 'a' {\n\t\t\tif index == -1 { //reset index so doesn't run of end of History at first of game\n\t\t\t\tindex++\n\t\t\t}\n\t\t\tindex-- //adjust index\n\t\t\tif index == -1 {\n\t\t\t\ttempBoard, _ = chess.ParseFen(\"\") //index of -1 means initial position (not recorded in History)\n\t\t\t} else {\n\t\t\t\ttempBoard, _ = chess.ParseFen(board.History[index])\n\t\t\t}\n\n\t\t} else if input == 'd' {\n\t\t\tif index == len(board.History)-1 { //reset index\n\t\t\t\tindex--\n\t\t\t}\n\t\t\tindex++ //adjust index\n\t\t\tif index == -1 {\n\t\t\t\ttempBoard, _ = chess.ParseFen(board.History[index+1])\n\t\t\t} else {\n\t\t\t\ttempBoard, _ = chess.ParseFen(board.History[index])\n\t\t\t}\n\n\t\t}\n\t}\n}", "func (a *Agent) GetReleaseHistory(\n\tctx context.Context,\n\tname string,\n) ([]*release.Release, error) {\n\tctx, span := telemetry.NewSpan(ctx, \"helm-get-release-history\")\n\tdefer span.End()\n\n\ttelemetry.WithAttributes(span,\n\t\ttelemetry.AttributeKV{Key: \"name\", Value: name},\n\t)\n\n\tcmd := action.NewHistory(a.ActionConfig)\n\n\treturn cmd.Run(name)\n}", "func (c *FakeClient) ReleaseHistory(rlsName string, max int) ([]*release.Release, error) {\n\treturn c.Rels, nil\n}", "func (s *PublicSfcAPI) GetRewardWeights(ctx context.Context, stakerID hexutil.Uint) (map[string]interface{}, error) {\n\tbaseRewardWeight, txRewardWeight, err := s.b.GetRewardWeights(ctx, idx.StakerID(stakerID))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif baseRewardWeight == nil || txRewardWeight == nil {\n\t\treturn nil, nil\n\t}\n\treturn map[string]interface{}{\n\t\t\"baseRewardWeight\": (*hexutil.Big)(baseRewardWeight),\n\t\t\"txRewardWeight\": (*hexutil.Big)(txRewardWeight),\n\t}, nil\n}", "func GetHistoryById(historyId int , o orm.Ormer) (*models.History){\n\thistory :=models.History{Id: historyId}\n\terr:= o.Read(&history)\n\tif err == orm.ErrNoRows {\n\t\tfmt.Println(\"error: can`t find the battle\")\n\t} else if err == orm.ErrMissPK {\n\t\tfmt.Println(\"error: can`t find the primary key\")\n\t} else {\n\t\tfmt.Println(\"query finished\")\n\t}\n\treturn &history\n}", "func (f *ResolverQueueAutoIndexJobForRepoFunc) History() []ResolverQueueAutoIndexJobForRepoFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]ResolverQueueAutoIndexJobForRepoFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}", "func (f *DBStoreTransactFunc) History() []DBStoreTransactFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]DBStoreTransactFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}", "func (f *DBStoreTransactFunc) History() []DBStoreTransactFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]DBStoreTransactFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}" ]
[ "0.70522064", "0.6610849", "0.65936154", "0.6502668", "0.62767226", "0.62686336", "0.6237844", "0.6173763", "0.61523086", "0.61413825", "0.6135928", "0.6079403", "0.60785544", "0.60713136", "0.6067368", "0.60503054", "0.60456854", "0.6004538", "0.5992135", "0.59637904", "0.5960176", "0.59577703", "0.5940309", "0.5892393", "0.5877412", "0.5869292", "0.5859112", "0.5783713", "0.57722867", "0.5762689", "0.5754149", "0.57406163", "0.5732412", "0.571898", "0.5703646", "0.5695654", "0.56875676", "0.56840485", "0.56830466", "0.5674662", "0.5664659", "0.5661029", "0.5655857", "0.5646383", "0.55848265", "0.5576183", "0.5573958", "0.55723053", "0.5544371", "0.55428356", "0.55341303", "0.55338186", "0.5524111", "0.5522361", "0.5518108", "0.5512582", "0.54970104", "0.5484693", "0.5478161", "0.5465441", "0.54402965", "0.543739", "0.54235834", "0.5421337", "0.54176515", "0.54094607", "0.5395599", "0.53942937", "0.53825", "0.53705156", "0.5365871", "0.53654987", "0.5335602", "0.5323334", "0.5317307", "0.5315489", "0.5311832", "0.5310902", "0.53043246", "0.53024405", "0.5297823", "0.52961564", "0.5293678", "0.5289329", "0.5267921", "0.52647203", "0.5257755", "0.5254853", "0.5254517", "0.52516633", "0.5244258", "0.52403545", "0.5238519", "0.5234922", "0.5232974", "0.52171755", "0.52116746", "0.5208818", "0.52086145", "0.52086145" ]
0.7648041
0
getAccountReward reads account reward details
func (p *Protocol) getAccountReward(epochNumber uint64, candidateName string) (*AccountReward, error) { db := p.Store.GetDB() getQuery := fmt.Sprintf(selectAccountReward, AccountRewardTableName) stmt, err := db.Prepare(getQuery) if err != nil { return nil, errors.Wrap(err, "failed to prepare get query") } defer stmt.Close() rows, err := stmt.Query(epochNumber, candidateName) if err != nil { return nil, errors.Wrap(err, "failed to execute get query") } var accountReward AccountReward parsedRows, err := s.ParseSQLRows(rows, &accountReward) if err != nil { return nil, errors.Wrap(err, "failed to parse results") } if len(parsedRows) == 0 { return nil, indexprotocol.ErrNotExist } if len(parsedRows) > 1 { return nil, errors.New("only one row is expected") } return parsedRows[0].(*AccountReward), nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (as AccountStorage) GetReward(ctx sdk.Context, accKey types.AccountKey) (*Reward, sdk.Error) {\n\tstore := ctx.KVStore(as.key)\n\trewardByte := store.Get(getRewardKey(accKey))\n\tif rewardByte == nil {\n\t\treturn nil, ErrRewardNotFound()\n\t}\n\treward := new(Reward)\n\tif err := as.cdc.UnmarshalJSON(rewardByte, reward); err != nil {\n\t\treturn nil, ErrFailedToUnmarshalReward(err)\n\t}\n\treturn reward, nil\n}", "func (_TrialRulesAbstract *TrialRulesAbstractCaller) GetReward(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _TrialRulesAbstract.contract.Call(opts, out, \"getReward\")\n\treturn *ret0, err\n}", "func (_IStakingRewards *IStakingRewardsTransactor) GetReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _IStakingRewards.contract.Transact(opts, \"getReward\")\n}", "func (_TrialRulesAbstract *TrialRulesAbstractCallerSession) GetReward() (*big.Int, error) {\n\treturn _TrialRulesAbstract.Contract.GetReward(&_TrialRulesAbstract.CallOpts)\n}", "func (_XStaking *XStakingTransactor) GetReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _XStaking.contract.Transact(opts, \"getReward\")\n}", "func (_TrialRulesAbstract *TrialRulesAbstractSession) GetReward() (*big.Int, error) {\n\treturn _TrialRulesAbstract.Contract.GetReward(&_TrialRulesAbstract.CallOpts)\n}", "func (_Lmc *LmcCallerSession) GetUserAccumulatedReward(_userAddress common.Address, tokenIndex *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserAccumulatedReward(&_Lmc.CallOpts, _userAddress, tokenIndex)\n}", "func (_Lmc *LmcSession) GetUserAccumulatedReward(_userAddress common.Address, tokenIndex *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserAccumulatedReward(&_Lmc.CallOpts, _userAddress, tokenIndex)\n}", "func (_IStakingRewards *IStakingRewardsTransactorSession) GetReward() (*types.Transaction, error) {\n\treturn _IStakingRewards.Contract.GetReward(&_IStakingRewards.TransactOpts)\n}", "func (_IStakingRewards *IStakingRewardsSession) GetReward() (*types.Transaction, error) {\n\treturn _IStakingRewards.Contract.GetReward(&_IStakingRewards.TransactOpts)\n}", "func (_Lmc *LmcCallerSession) GetUserRewardDebt(_userAddress common.Address, _index *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserRewardDebt(&_Lmc.CallOpts, _userAddress, _index)\n}", "func (_Lmc *LmcSession) GetUserRewardDebt(_userAddress common.Address, _index *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserRewardDebt(&_Lmc.CallOpts, _userAddress, _index)\n}", "func (_XStaking *XStakingTransactorSession) GetReward() (*types.Transaction, error) {\n\treturn _XStaking.Contract.GetReward(&_XStaking.TransactOpts)\n}", "func (_XStaking *XStakingSession) GetReward() (*types.Transaction, error) {\n\treturn _XStaking.Contract.GetReward(&_XStaking.TransactOpts)\n}", "func (_Lmc *LmcCaller) GetUserAccumulatedReward(opts *bind.CallOpts, _userAddress common.Address, tokenIndex *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Lmc.contract.Call(opts, &out, \"getUserAccumulatedReward\", _userAddress, tokenIndex)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (k Keeper) GetReward(ctx sdk.Context, stakingCoinDenom string, farmerAcc sdk.AccAddress) (reward types.Reward, found bool) {\n\tstore := ctx.KVStore(k.storeKey)\n\tbz := store.Get(types.GetRewardKey(stakingCoinDenom, farmerAcc))\n\tif bz == nil {\n\t\treturn reward, false\n\t}\n\tvar rewardCoins types.RewardCoins\n\tk.cdc.MustUnmarshal(bz, &rewardCoins)\n\treturn types.Reward{\n\t\tFarmer: farmerAcc.String(),\n\t\tStakingCoinDenom: stakingCoinDenom,\n\t\tRewardCoins: rewardCoins.RewardCoins,\n\t}, true\n}", "func (c4 *Connect4) GetReward() int {\n\tif c4.Winner == nil {\n\t\treturn 0\n\t} else if *c4.Winner == 1 {\n\t\treturn 1\n\t}\n\treturn -1\n}", "func (_Lmc *LmcCaller) GetUserRewardDebt(opts *bind.CallOpts, _userAddress common.Address, _index *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Lmc.contract.Call(opts, &out, \"getUserRewardDebt\", _userAddress, _index)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func GetReward(a Action, feedback Action) float64 {\n\tif a == feedback {\n\t\treturn 1\n\t}\n\treturn -1\n}", "func ViewReward(rw http.ResponseWriter, r *http.Request) {\n\t// get the token\n\treqToken := r.Header.Get(\"Authorization\")\n\t\n\t// get the claims\n\tclaims, isNotValid := GetClaims(reqToken, rw)\n\tif isNotValid {\n\t\treturn\n\t}\n\n\tdt, err := db.GetUserRewards(claims.Roll)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write(Rsp(err.Error(), \"Server Error\"))\n\t\treturn\n\t}\n\trw.WriteHeader(http.StatusOK)\n\tres := c.RespData{\n\t\tMessage: \"All data\",\n\t\tData: dt,\n\t}\n\tjson.NewEncoder(rw).Encode(res)\n}", "func (_IStakingRewards *IStakingRewardsCallerSession) GetRewardForDuration() (*big.Int, error) {\n\treturn _IStakingRewards.Contract.GetRewardForDuration(&_IStakingRewards.CallOpts)\n}", "func (_IStakingRewards *IStakingRewardsSession) GetRewardForDuration() (*big.Int, error) {\n\treturn _IStakingRewards.Contract.GetRewardForDuration(&_IStakingRewards.CallOpts)\n}", "func (_XStaking *XStakingCallerSession) GetRewardForDuration() (*big.Int, error) {\n\treturn _XStaking.Contract.GetRewardForDuration(&_XStaking.CallOpts)\n}", "func (_IStakingRewards *IStakingRewardsCaller) GetRewardForDuration(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _IStakingRewards.contract.Call(opts, &out, \"getRewardForDuration\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_XStaking *XStakingSession) GetRewardForDuration() (*big.Int, error) {\n\treturn _XStaking.Contract.GetRewardForDuration(&_XStaking.CallOpts)\n}", "func (_Token *TokenCaller) CurrentReward(opts *bind.CallOpts, account common.Address) (struct {\n\tInitialDeposit *big.Int\n\tReward *big.Int\n}, error) {\n\tret := new(struct {\n\t\tInitialDeposit *big.Int\n\t\tReward *big.Int\n\t})\n\tout := ret\n\terr := _Token.contract.Call(opts, out, \"currentReward\", account)\n\treturn *ret, err\n}", "func (c *gRPCClient) AccountRewards(address gosmtypes.Address, offset uint32, maxResults uint32) ([]*apitypes.Reward, uint32, error) {\n\tgsc := c.getGlobalStateServiceClient()\n\tresp, err := gsc.AccountDataQuery(context.Background(), &apitypes.AccountDataQueryRequest{\n\t\tFilter: &apitypes.AccountDataFilter{\n\t\t\tAccountId: &apitypes.AccountId{Address: address.Bytes()},\n\t\t\tAccountDataFlags: uint32(apitypes.AccountDataFlag_ACCOUNT_DATA_FLAG_REWARD),\n\t\t},\n\n\t\tMaxResults: maxResults,\n\t\tOffset: offset,\n\t})\n\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\trewards := make([]*apitypes.Reward, 0)\n\n\tfor _, data := range resp.AccountItem {\n\t\tr := data.GetReward()\n\t\tif r != nil {\n\t\t\trewards = append(rewards, r)\n\t\t}\n\t}\n\n\treturn rewards, resp.TotalResults, nil\n}", "func (_Token *TokenCaller) BaseReward(opts *bind.CallOpts, index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t\tret1 = new(*big.Int)\n\t\tret2 = new(*big.Int)\n\t)\n\tout := &[]interface{}{\n\t\tret0,\n\t\tret1,\n\t\tret2,\n\t}\n\terr := _Token.contract.Call(opts, out, \"baseReward\", index)\n\treturn *ret0, *ret1, *ret2, err\n}", "func (_XStaking *XStakingCaller) GetRewardForDuration(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _XStaking.contract.Call(opts, &out, \"getRewardForDuration\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_Token *TokenSession) CurrentReward(account common.Address) (struct {\n\tInitialDeposit *big.Int\n\tReward *big.Int\n}, error) {\n\treturn _Token.Contract.CurrentReward(&_Token.CallOpts, account)\n}", "func (_Token *TokenCallerSession) BaseReward(index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseReward(&_Token.CallOpts, index)\n}", "func (as AccountStorage) GetRewardHistory(\n\tctx sdk.Context, me types.AccountKey, bucketSlot int64) (*RewardHistory, sdk.Error) {\n\tstore := ctx.KVStore(as.key)\n\trewardHistoryBytes := store.Get(getRewardHistoryKey(me, bucketSlot))\n\tif rewardHistoryBytes == nil {\n\t\treturn nil, nil\n\t}\n\thistory := new(RewardHistory)\n\tif err := as.cdc.UnmarshalJSON(rewardHistoryBytes, history); err != nil {\n\t\treturn nil, ErrFailedToUnmarshalRewardHistory(err)\n\t}\n\treturn history, nil\n}", "func (_Token *TokenCallerSession) CurrentReward(account common.Address) (struct {\n\tInitialDeposit *big.Int\n\tReward *big.Int\n}, error) {\n\treturn _Token.Contract.CurrentReward(&_Token.CallOpts, account)\n}", "func (_Token *TokenSession) BaseReward(index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseReward(&_Token.CallOpts, index)\n}", "func (s *BlocksService) Reward(ctx context.Context) (*BlocksReward, *http.Response, error) {\n\tvar responseStruct *BlocksReward\n\tresp, err := s.client.SendRequest(ctx, \"GET\", \"blocks/getReward\", nil, &responseStruct)\n\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn responseStruct, resp, err\n}", "func GenerateGetRewardBalanceScript(env Environment) []byte {\n\tcode := assets.MustAssetString(rewardBalanceFilename)\n\n\treturn []byte(replaceAddresses(code, env))\n}", "func (c RewardsController) GetRewards(page int) revel.Result {\n\n\tif !c.GetCurrentUser() {\n\t\treturn c.ForbiddenResponse()\n\t}\n\n\t//ChangeRewardsModel() // Remove when finish production\n\n\tvar reward models.Reward\n\tif Reward, ok := app.Mapper.GetModel(&reward); ok {\n\t\tvar rewards = []models.Reward{}\n\t\tvar match = bson.M{\"$and\": []bson.M{\n\t\t\tbson.M{\"$or\": []bson.M{\n\t\t\t\tbson.M{\"user_id\": c.CurrentUser.GetID().Hex()},\n\t\t\t\tbson.M{\"users\": bson.M{\"$elemMatch\": bson.M{\"$eq\": c.CurrentUser.GetID().Hex()}}},\n\t\t\t}},\n\t\t\tbson.M{\"is_visible\": true},\n\t\t\tbson.M{\"resource_type\": bson.M{\"$ne\": core.ModelTypeChallenge}},\n\t\t}}\n\t\tif page <= 1 {\n\t\t\tpage = 1\n\t\t}\n\t\tvar pipe = mgomap.Aggregate{}.Match(match).Sort(bson.M{\"updated_at\": -1}).Skip((page - 1) * core.LimitRewards).Limit(core.LimitRewards)\n\n\t\tif err := Reward.Pipe(pipe, &rewards); err != nil {\n\t\t\treturn c.ErrorResponse(c.Message(\"error.notFound\", \"Rewards\"), \"No rewards Found\", 400)\n\t\t}\n\t\treturn c.SuccessResponse(rewards, \"success\", core.ModelsType[core.ModelReward], serializers.RewardSerializer{Lang: c.Request.Locale})\n\n\t}\n\treturn c.ServerErrorResponse()\n}", "func getAccumulatedRewards(ctx sdk.Context, distKeeper types.DistributionKeeper, delegation stakingtypes.Delegation) ([]wasmvmtypes.Coin, error) {\n\t// Try to get *delegator* reward info!\n\tparams := distributiontypes.QueryDelegationRewardsRequest{\n\t\tDelegatorAddress: delegation.DelegatorAddress,\n\t\tValidatorAddress: delegation.ValidatorAddress,\n\t}\n\tcache, _ := ctx.CacheContext()\n\tqres, err := distKeeper.DelegationRewards(sdk.WrapSDKContext(cache), &params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// now we have it, convert it into wasmvm types\n\trewards := make([]wasmvmtypes.Coin, len(qres.Rewards))\n\tfor i, r := range qres.Rewards {\n\t\trewards[i] = wasmvmtypes.Coin{\n\t\t\tDenom: r.Denom,\n\t\t\tAmount: r.Amount.TruncateInt().String(),\n\t\t}\n\t}\n\treturn rewards, nil\n}", "func MiningRewardBalance(block consensus.Block, account []byte) *RTU {\n//\tif bytes, err := block.Lookup([]byte(bytesToHexString(account))); err == nil {\n\tif bytes, err := block.Lookup(account); err == nil {\n\t\treturn BytesToRtu(bytes)\n\t}\n\treturn BytesToRtu(nil)\n}", "func (t *Transaction) Reward() string {\n\treturn t.reward\n}", "func (a Actor) AwardBlockReward(rt vmr.Runtime, params *AwardBlockRewardParams) *adt.EmptyValue {\n\trt.ValidateImmediateCallerIs(builtin.SystemActorAddr)\n\tAssertMsg(rt.CurrentBalance().GreaterThanEqual(params.GasReward),\n\t\t\"actor current balance %v insufficient to pay gas reward %v\", rt.CurrentBalance(), params.GasReward)\n\n\tAssertMsg(params.TicketCount > 0, \"cannot give block reward for zero tickets\")\n\n\tminer, ok := rt.ResolveAddress(params.Miner)\n\tif !ok {\n\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to resolve given owner address\")\n\t}\n\n\tpriorBalance := rt.CurrentBalance()\n\n\tvar penalty abi.TokenAmount\n\tvar st State\n\trt.State().Transaction(&st, func() interface{} {\n\t\tblockReward := a.computeBlockReward(&st, big.Sub(priorBalance, params.GasReward), params.TicketCount)\n\t\ttotalReward := big.Add(blockReward, params.GasReward)\n\n\t\t// Cap the penalty at the total reward value.\n\t\tpenalty = big.Min(params.Penalty, totalReward)\n\n\t\t// Reduce the payable reward by the penalty.\n\t\trewardPayable := big.Sub(totalReward, penalty)\n\n\t\tAssertMsg(big.Add(rewardPayable, penalty).LessThanEqual(priorBalance),\n\t\t\t\"reward payable %v + penalty %v exceeds balance %v\", rewardPayable, penalty, priorBalance)\n\n\t\t// Record new reward into reward map.\n\t\tif rewardPayable.GreaterThan(abi.NewTokenAmount(0)) {\n\t\t\tnewReward := Reward{\n\t\t\t\tStartEpoch: rt.CurrEpoch(),\n\t\t\t\tEndEpoch: rt.CurrEpoch() + rewardVestingPeriod,\n\t\t\t\tValue: rewardPayable,\n\t\t\t\tAmountWithdrawn: abi.NewTokenAmount(0),\n\t\t\t\tVestingFunction: rewardVestingFunction,\n\t\t\t}\n\t\t\tif err := st.addReward(adt.AsStore(rt), miner, &newReward); err != nil {\n\t\t\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to add reward to rewards map: %w\", err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\t// Burn the penalty amount.\n\t_, code := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, penalty)\n\tbuiltin.RequireSuccess(rt, code, \"failed to send penalty to BurntFundsActor\")\n\n\treturn nil\n}", "func (m *MemoryRewardStorage) Get(id int) (rewards.Reward, error) {\n\tvar reward rewards.Reward\n\n\tfor _, r := range m.rewards {\n\t\tif r.ID == id {\n\t\t\treturn r, nil\n\t\t}\n\t}\n\n\treturn reward, rewards.ErrNotFound\n}", "func (mgr *MiningMgr) currentReward() (*big.Int, error) {\n\ttimeOfLastNewValue, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte(\"_TIME_OF_LAST_NEW_VALUE\")))\n\tif err != nil {\n\t\treturn nil, errors.New(\"getting _TIME_OF_LAST_NEW_VALUE\")\n\t}\n\ttotalTips, err := mgr.contractInstance.GetUintVar(nil, rpc.Keccak256([]byte(\"_CURRENT_TOTAL_TIPS\")))\n\tif err != nil {\n\t\treturn nil, errors.New(\"getting _CURRENT_TOTAL_TIPS\")\n\t}\n\n\ttimeDiff := big.NewInt(time.Now().Unix() - timeOfLastNewValue.Int64())\n\ttrb := big.NewInt(1e18)\n\trewardPerSec := big.NewInt(0).Div(trb, big.NewInt(300)) // 1 TRB every 5 minutes so total reward is timeDiff multiplied by reward per second.\n\trewardTRB := big.NewInt(0).Mul(rewardPerSec, timeDiff)\n\n\tsingleMinerTip := big.NewInt(0).Div(totalTips, big.NewInt(10)) // Half of the tips are burned(remain in the contract) to reduce inflation.\n\trewardWithTips := big.NewInt(0).Add(singleMinerTip, rewardTRB)\n\n\tif rewardWithTips == big.NewInt(0) {\n\t\treturn big.NewInt(0), nil\n\t}\n\n\treturn mgr.convertTRBtoETH(rewardWithTips)\n}", "func getRewardForValidator(totalPower *big.Int, validatorPower *big.Int, totalRewards *balance.Amount) *balance.Amount {\n\tnumerator := big.NewInt(0).Mul(totalRewards.BigInt(), validatorPower)\n\treward := balance.NewAmountFromBigInt(big.NewInt(0).Div(numerator, totalPower))\n\treturn reward\n}", "func EstimateReward(reward, pr, gamma float64) float64 {\n\tret := reward / (pr + gamma)\n\tlog.Logf(MABLogLevel, \"MAB Estimate Reward: %v / (%v + %v) = %v\\n\",\n\t\treward, pr, gamma, ret)\n\treturn ret\n}", "func (c *Coinbase) AddReward(output *Output) {\n\toutput.EncryptedMask = make([]byte, 1)\n\tc.Rewards = append(c.Rewards, output)\n}", "func (k Querier) Rewards(c context.Context, req *types.QueryRewardsRequest) (*types.QueryRewardsResponse, error) {\n\tif req == nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"invalid request\")\n\t}\n\n\tif req.StakingCoinDenom != \"\" {\n\t\tif err := sdk.ValidateDenom(req.StakingCoinDenom); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tctx := sdk.UnwrapSDKContext(c)\n\tstore := ctx.KVStore(k.storeKey)\n\tvar rewards []types.Reward\n\tvar pageRes *query.PageResponse\n\tvar err error\n\n\tif req.Farmer != \"\" {\n\t\tvar farmerAcc sdk.AccAddress\n\t\tfarmerAcc, err = sdk.AccAddressFromBech32(req.Farmer)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tstorePrefix := types.GetRewardsByFarmerIndexKey(farmerAcc)\n\t\tindexStore := prefix.NewStore(store, storePrefix)\n\t\tpageRes, err = query.FilteredPaginate(indexStore, req.Pagination, func(key, value []byte, accumulate bool) (bool, error) {\n\t\t\t_, stakingCoinDenom := types.ParseRewardsByFarmerIndexKey(append(storePrefix, key...))\n\t\t\tif req.StakingCoinDenom != \"\" {\n\t\t\t\tif stakingCoinDenom != req.StakingCoinDenom {\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treward, found := k.GetReward(ctx, stakingCoinDenom, farmerAcc)\n\t\t\tif !found { // TODO: remove this check\n\t\t\t\treturn false, fmt.Errorf(\"reward not found\")\n\t\t\t}\n\t\t\tif accumulate {\n\t\t\t\trewards = append(rewards, reward)\n\t\t\t}\n\t\t\treturn true, nil\n\t\t})\n\t} else {\n\t\tvar storePrefix []byte\n\t\tif req.StakingCoinDenom != \"\" {\n\t\t\tstorePrefix = types.GetRewardsByStakingCoinDenomKey(req.StakingCoinDenom)\n\t\t} else {\n\t\t\tstorePrefix = types.RewardKeyPrefix\n\t\t}\n\t\trewardStore := prefix.NewStore(store, storePrefix)\n\n\t\tpageRes, err = query.Paginate(rewardStore, req.Pagination, func(key, value []byte) error {\n\t\t\tstakingCoinDenom, farmerAcc := types.ParseRewardKey(append(storePrefix, key...))\n\t\t\trewardCoins, err := k.UnmarshalRewardCoins(value)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trewards = append(rewards, types.Reward{\n\t\t\t\tFarmer: farmerAcc.String(),\n\t\t\t\tStakingCoinDenom: stakingCoinDenom,\n\t\t\t\tRewardCoins: rewardCoins.RewardCoins,\n\t\t\t})\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\treturn &types.QueryRewardsResponse{Rewards: rewards, Pagination: pageRes}, nil\n}", "func (n Network) ChainReward(ctx context.Context, launchID uint64) (rewardtypes.RewardPool, error) {\n\tres, err := n.rewardQuery.\n\t\tRewardPool(ctx,\n\t\t\t&rewardtypes.QueryGetRewardPoolRequest{\n\t\t\t\tLaunchID: launchID,\n\t\t\t},\n\t\t)\n\n\tif cosmoserror.Unwrap(err) == cosmoserror.ErrNotFound {\n\t\treturn rewardtypes.RewardPool{}, ErrObjectNotFound\n\t} else if err != nil {\n\t\treturn rewardtypes.RewardPool{}, err\n\t}\n\treturn res.RewardPool, nil\n}", "func computeReward(epoch abi.ChainEpoch, prevTheta, currTheta, simpleTotal, baselineTotal big.Int) abi.TokenAmount {\n\tsimpleReward := big.Mul(simpleTotal, ExpLamSubOne) //Q.0 * Q.128 => Q.128\n\tepochLam := big.Mul(big.NewInt(int64(epoch)), Lambda) // Q.0 * Q.128 => Q.128\n\n\tsimpleReward = big.Mul(simpleReward, big.NewFromGo(math.ExpNeg(epochLam.Int))) // Q.128 * Q.128 => Q.256\n\tsimpleReward = big.Rsh(simpleReward, math.Precision128) // Q.256 >> 128 => Q.128\n\n\tbaselineReward := big.Sub(computeBaselineSupply(currTheta, baselineTotal), computeBaselineSupply(prevTheta, baselineTotal)) // Q.128\n\n\treward := big.Add(simpleReward, baselineReward) // Q.128\n\n\treturn big.Rsh(reward, math.Precision128) // Q.128 => Q.0\n}", "func (c RewardsController) CollectReward(id string) revel.Result {\n\tif !c.GetCurrentUser() {\n\t\treturn c.ForbiddenResponse()\n\t}\n\n\tif !bson.IsObjectIdHex(id) {\n\t\treturn c.ErrorResponse(nil, c.Message(\"error.invalid\", \"\"), core.ModelStatus[core.StatusInvalidID])\n\t}\n\n\tvar selector = []bson.M{\n\t\tbson.M{\"user_id\": c.CurrentUser.GetID().Hex()},\n\t\tbson.M{\"_id\": id},\n\t\tbson.M{\"multi\": false},\n\t}\n\tvar query = bson.M{\"$set\": []bson.M{\n\t\tbson.M{\"status.name\": core.StatusObtained},\n\t\tbson.M{\"status.code\": core.ValidationStatus[core.StatusObtained]},\n\t}}\n\n\t// Get pending Rewards for the user\n\tif Reward, ok := app.Mapper.GetModel(&models.Reward{}); ok {\n\t\tif err := Reward.UpdateQuery(selector, query, false); err != nil {\n\t\t\trevel.ERROR.Print(\"ERROR Find\")\n\t\t\treturn c.ErrorResponse(err, err.Error(), 400)\n\t\t}\n\t\treturn c.SuccessResponse(bson.M{\"data\": \"Reward collected successfully\"}, \"success\", core.ModelsType[core.ModelSimpleResponse], nil)\n\t}\n\n\treturn c.ServerErrorResponse()\n}", "func (_BondedECDSAKeep *BondedECDSAKeepTransactorSession) DistributeETHReward() (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.DistributeETHReward(&_BondedECDSAKeep.TransactOpts)\n}", "func (s *PublicSfcAPI) GetRewardWeights(ctx context.Context, stakerID hexutil.Uint) (map[string]interface{}, error) {\n\tbaseRewardWeight, txRewardWeight, err := s.b.GetRewardWeights(ctx, idx.StakerID(stakerID))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif baseRewardWeight == nil || txRewardWeight == nil {\n\t\treturn nil, nil\n\t}\n\treturn map[string]interface{}{\n\t\t\"baseRewardWeight\": (*hexutil.Big)(baseRewardWeight),\n\t\t\"txRewardWeight\": (*hexutil.Big)(txRewardWeight),\n\t}, nil\n}", "func (_BondedECDSAKeep *BondedECDSAKeepSession) DistributeETHReward() (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.DistributeETHReward(&_BondedECDSAKeep.TransactOpts)\n}", "func (vi *votedInfo) CalculateReward(multiplier, divider *big.Int, period int) {\n\tif multiplier.Sign() == 0 || period == 0 {\n\t\treturn\n\t}\n\tif divider.Sign() == 0 || vi.totalBondedDelegation.Sign() == 0 {\n\t\treturn\n\t}\n\t// reward = multiplier * period * bondedDelegation / (divider * totalBondedDelegation)\n\tbase := new(big.Int).Mul(multiplier, big.NewInt(int64(period)))\n\treward := new(big.Int)\n\tfor i, addrKey := range vi.rank {\n\t\tif i == vi.maxRankForReward {\n\t\t\tbreak\n\t\t}\n\t\tprep := vi.preps[addrKey]\n\t\tif prep.Enable() == false {\n\t\t\tcontinue\n\t\t}\n\n\t\treward.Mul(base, prep.GetBondedDelegation())\n\t\treward.Div(reward, divider)\n\t\treward.Div(reward, vi.totalBondedDelegation)\n\n\t\tlog.Tracef(\"VOTED REWARD %d = %d * %d * %d / (%d * %d)\",\n\t\t\treward, multiplier, period, prep.GetBondedDelegation(), divider, vi.totalBondedDelegation)\n\n\t\tprep.SetIScore(new(big.Int).Add(prep.IScore(), reward))\n\t}\n}", "func (_BondedECDSAKeep *BondedECDSAKeepTransactor) DistributeETHReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.contract.Transact(opts, \"distributeETHReward\")\n}", "func (_Lmc *LmcCallerSession) GetUserRewardDebtLength(_userAddress common.Address) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserRewardDebtLength(&_Lmc.CallOpts, _userAddress)\n}", "func (p *Protocol) getRewardHistory(actionHash string) ([]*RewardHistory, error) {\n\tdb := p.Store.GetDB()\n\n\tgetQuery := fmt.Sprintf(selectRewardHistory,\n\t\tRewardHistoryTableName)\n\tstmt, err := db.Prepare(getQuery)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to prepare get query\")\n\t}\n\tdefer stmt.Close()\n\n\trows, err := stmt.Query(actionHash)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to execute get query\")\n\t}\n\n\tvar rewardHistory RewardHistory\n\tparsedRows, err := s.ParseSQLRows(rows, &rewardHistory)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to parse results\")\n\t}\n\n\tif len(parsedRows) == 0 {\n\t\treturn nil, indexprotocol.ErrNotExist\n\t}\n\n\tvar rewardHistoryList []*RewardHistory\n\tfor _, parsedRow := range parsedRows {\n\t\trewards := parsedRow.(*RewardHistory)\n\t\trewardHistoryList = append(rewardHistoryList, rewards)\n\t}\n\treturn rewardHistoryList, nil\n}", "func (t *SimpleChaincode) getBalance(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tvar sourceAccountName, destinationAccountName string // Entities\n\tvar err error\n\n\tif len(args) != 2 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting name of the person to query\")\n\t}\n\n\tsourceAccountName = args[0]\n\tdestinationAccountName = args[1]\n\n\t// Get the state from the ledger\n\taccountValBytes, err := stub.GetState(sourceAccountName + \".\" + destinationAccountName)\n\tif err != nil {\n\t\tjsonResp := \"{\\\"Error\\\":\\\"Failed to get state for \" + sourceAccountName + \".\" + destinationAccountName + \"\\\"}\"\n\t\treturn shim.Error(jsonResp)\n\t}\n\n\tif accountValBytes == nil {\n\t\taccountValBytes = []byte(\"0\")\n\t}\n\n\tjsonResp := \"{\\\"Source\\\":\\\"\" + sourceAccountName + \"\\\",\" +\n\t\t\"\\\"Destination\\\":\\\"\" + destinationAccountName + \"\\\",\\\"\" +\n\t\t\"Amount\\\":\\\"\" + string(accountValBytes) + \"\\\"}\"\n\tfmt.Printf(\"Query Response:%s\\n\", jsonResp)\n\treturn shim.Success(accountValBytes)\n}", "func (r *Rewarding) BlockReward() *big.Int {\n\tval, ok := new(big.Int).SetString(r.BlockRewardStr, 10)\n\tif !ok {\n\t\tlog.S().Panicf(\"Error when casting block reward string %s into big int\", r.BlockRewardStr)\n\t}\n\treturn val\n}", "func (va ClawbackVestingAccount) distributeReward(ctx sdk.Context, ak AccountKeeper, bondDenom string, reward sdk.Coins) {\n\tnow := ctx.BlockTime().Unix()\n\tt := va.StartTime\n\tfirstUnvestedPeriod := 0\n\tunvestedTokens := sdk.ZeroInt()\n\tfor i, period := range va.VestingPeriods {\n\t\tt += period.Length\n\t\tif t <= now {\n\t\t\tfirstUnvestedPeriod = i + 1\n\t\t\tcontinue\n\t\t}\n\t\tunvestedTokens = unvestedTokens.Add(period.Amount.AmountOf(bondDenom))\n\t}\n\n\trunningTotReward := sdk.NewCoins()\n\trunningTotStaking := sdk.ZeroInt()\n\tfor i := firstUnvestedPeriod; i < len(va.VestingPeriods); i++ {\n\t\tperiod := va.VestingPeriods[i]\n\t\trunningTotStaking = runningTotStaking.Add(period.Amount.AmountOf(bondDenom))\n\t\trunningTotRatio := runningTotStaking.ToDec().Quo(unvestedTokens.ToDec())\n\t\ttargetCoins := scaleCoins(reward, runningTotRatio)\n\t\tthisReward := targetCoins.Sub(runningTotReward)\n\t\trunningTotReward = targetCoins\n\t\tperiod.Amount = period.Amount.Add(thisReward...)\n\t\tva.VestingPeriods[i] = period\n\t}\n\n\tva.OriginalVesting = va.OriginalVesting.Add(reward...)\n\tak.SetAccount(ctx, &va)\n}", "func (del Delegation) ClaimedReward() (hexutil.Big, error) {\n\tval, err := repository.R().RewardsClaimed(&del.Address, (*big.Int)(del.Delegation.ToStakerId), nil, nil)\n\tif err != nil {\n\t\treturn hexutil.Big{}, err\n\t}\n\treturn (hexutil.Big)(*val), nil\n}", "func (_Lmc *LmcSession) GetUserRewardDebtLength(_userAddress common.Address) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserRewardDebtLength(&_Lmc.CallOpts, _userAddress)\n}", "func GetRewardEventsInfo(fromBlock *big.Int, toBlock *big.Int) []*RewardInfo {\n\n\tvar logEpochRewardSig = []byte(\"EpochRewardsDistributedToVoters(address,uint256)\")\n\tvar logEpochRewardSigHash = crypto.Keccak256Hash(logEpochRewardSig)\n\tvar TopicsFilter = [][]common.Hash{{logEpochRewardSigHash}}\n\n\tcontractAddress := common.HexToAddress(WrapperContractDeploymentAddress[NetActive][Election])\n\n\tquery := ethereum.FilterQuery{\n\t\tFromBlock: fromBlock,\n\t\tToBlock: toBlock,\n\t\tTopics: TopicsFilter,\n\n\t\tAddresses: []common.Address{\n\t\t\tcontractAddress,\n\t\t},\n\t}\n\n\tlogs, err := atlasEthClient.FilterLogs(context.Background(), query)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trewards_info := make([]*RewardInfo, 0, len(logs))\n\n\tcontractAbi, err := abi.JSON(strings.NewReader(string(binding.ElectionABI)))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, vLog := range logs {\n\n\t\tvar epochRewardEvent EpochRewardEvent\n\t\terr := contractAbi.Unpack(&epochRewardEvent, \"EpochRewardsDistributedToVoters\", vLog.Data)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tri := &RewardInfo{Group: common.HexToAddress(vLog.Topics[1].Hex()).String(),\n\t\t\tGroupHash: vLog.Topics[1],\n\t\t\tRewardValue: epochRewardEvent.Value,\n\t\t\tBlockNumber: new(big.Int).SetUint64(vLog.BlockNumber)}\n\n\t\tAddAtlasToRewardInfo(ri)\n\n\t\trewards_info = append(rewards_info, ri)\n\t}\n\n\treturn rewards_info\n}", "func (_IStakingRewards *IStakingRewardsCaller) LastTimeRewardApplicable(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _IStakingRewards.contract.Call(opts, &out, \"lastTimeRewardApplicable\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (cra clawbackRewardAction) ProcessReward(ctx sdk.Context, reward sdk.Coins, rawAccount exported.VestingAccount) error {\n\tcva, ok := rawAccount.(*ClawbackVestingAccount)\n\tif !ok {\n\t\treturn sdkerrors.Wrapf(sdkerrors.ErrNotSupported, \"expected *ClawbackVestingAccount, got %T\", rawAccount)\n\t}\n\tcva.postReward(ctx, reward, cra.ak, cra.bk, cra.sk)\n\treturn nil\n}", "func (_Smartchef *SmartchefCallerSession) PendingReward(_user common.Address) (*big.Int, error) {\n\treturn _Smartchef.Contract.PendingReward(&_Smartchef.CallOpts, _user)\n}", "func (s *MutableState) AddRewards(time epochtime.EpochTime, factor *quantity.Quantity, accounts []signature.PublicKey) error {\n\tsteps, err := s.RewardSchedule()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar activeStep *staking.RewardStep\n\tfor _, step := range steps {\n\t\tif time < step.Until {\n\t\t\tactiveStep = &step\n\t\t\tbreak\n\t\t}\n\t}\n\tif activeStep == nil {\n\t\t// We're past the end of the schedule.\n\t\treturn nil\n\t}\n\n\tcommonPool, err := s.CommonPool()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"loading common pool\")\n\t}\n\n\tfor _, id := range accounts {\n\t\tent := s.Account(id)\n\n\t\tq := ent.Escrow.Active.Balance.Clone()\n\t\t// Multiply first.\n\t\tif err := q.Mul(factor); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by reward factor\")\n\t\t}\n\t\tif err := q.Mul(&activeStep.Scale); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by reward step scale\")\n\t\t}\n\t\tif err := q.Quo(staking.RewardAmountDenominator); err != nil {\n\t\t\treturn errors.Wrap(err, \"dividing by reward amount denominator\")\n\t\t}\n\n\t\tif q.IsZero() {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar com *quantity.Quantity\n\t\trate := ent.Escrow.CommissionSchedule.CurrentRate(time)\n\t\tif rate != nil {\n\t\t\tcom = q.Clone()\n\t\t\t// Multiply first.\n\t\t\tif err := com.Mul(rate); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"multiplying by commission rate\")\n\t\t\t}\n\t\t\tif err := com.Quo(staking.CommissionRateDenominator); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"dividing by commission rate denominator\")\n\t\t\t}\n\n\t\t\tif err := q.Sub(com); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"subtracting commission\")\n\t\t\t}\n\t\t}\n\n\t\tif !q.IsZero() {\n\t\t\tif err := quantity.Move(&ent.Escrow.Active.Balance, commonPool, q); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"transferring to active escrow balance from common pool\")\n\t\t\t}\n\t\t}\n\n\t\tif com != nil && !com.IsZero() {\n\t\t\tdelegation := s.Delegation(id, id)\n\n\t\t\tif err := ent.Escrow.Active.Deposit(&delegation.Shares, commonPool, com); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"depositing commission\")\n\t\t\t}\n\n\t\t\ts.SetDelegation(id, id, delegation)\n\t\t}\n\n\t\ts.SetAccount(id, ent)\n\t}\n\n\ts.SetCommonPool(commonPool)\n\n\treturn nil\n}", "func (_Contract *ContractCaller) TaskHandlingReward(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Contract.contract.Call(opts, &out, \"taskHandlingReward\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (s *MutableState) AddRewardSingleAttenuated(time epochtime.EpochTime, factor *quantity.Quantity, attenuationNumerator, attenuationDenominator int, account signature.PublicKey) error {\n\tsteps, err := s.RewardSchedule()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar activeStep *staking.RewardStep\n\tfor _, step := range steps {\n\t\tif time < step.Until {\n\t\t\tactiveStep = &step\n\t\t\tbreak\n\t\t}\n\t}\n\tif activeStep == nil {\n\t\t// We're past the end of the schedule.\n\t\treturn nil\n\t}\n\n\tvar numQ, denQ quantity.Quantity\n\tif err = numQ.FromInt64(int64(attenuationNumerator)); err != nil {\n\t\treturn errors.Wrapf(err, \"importing attenuation numerator %d\", attenuationNumerator)\n\t}\n\tif err = denQ.FromInt64(int64(attenuationDenominator)); err != nil {\n\t\treturn errors.Wrapf(err, \"importing attenuation denominator %d\", attenuationDenominator)\n\t}\n\n\tcommonPool, err := s.CommonPool()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"loading common pool\")\n\t}\n\n\tent := s.Account(account)\n\n\tq := ent.Escrow.Active.Balance.Clone()\n\t// Multiply first.\n\tif err := q.Mul(factor); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by reward factor\")\n\t}\n\tif err := q.Mul(&activeStep.Scale); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by reward step scale\")\n\t}\n\tif err := q.Mul(&numQ); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by attenuation numerator\")\n\t}\n\tif err := q.Quo(staking.RewardAmountDenominator); err != nil {\n\t\treturn errors.Wrap(err, \"dividing by reward amount denominator\")\n\t}\n\tif err := q.Quo(&denQ); err != nil {\n\t\treturn errors.Wrap(err, \"dividing by attenuation denominator\")\n\t}\n\n\tif q.IsZero() {\n\t\treturn nil\n\t}\n\n\tvar com *quantity.Quantity\n\trate := ent.Escrow.CommissionSchedule.CurrentRate(time)\n\tif rate != nil {\n\t\tcom = q.Clone()\n\t\t// Multiply first.\n\t\tif err := com.Mul(rate); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by commission rate\")\n\t\t}\n\t\tif err := com.Quo(staking.CommissionRateDenominator); err != nil {\n\t\t\treturn errors.Wrap(err, \"dividing by commission rate denominator\")\n\t\t}\n\n\t\tif err := q.Sub(com); err != nil {\n\t\t\treturn errors.Wrap(err, \"subtracting commission\")\n\t\t}\n\t}\n\n\tif !q.IsZero() {\n\t\tif err := quantity.Move(&ent.Escrow.Active.Balance, commonPool, q); err != nil {\n\t\t\treturn errors.Wrap(err, \"transferring to active escrow balance from common pool\")\n\t\t}\n\t}\n\n\tif com != nil && !com.IsZero() {\n\t\tdelegation := s.Delegation(account, account)\n\n\t\tif err := ent.Escrow.Active.Deposit(&delegation.Shares, commonPool, com); err != nil {\n\t\t\treturn errors.Wrap(err, \"depositing commission\")\n\t\t}\n\n\t\ts.SetDelegation(account, account, delegation)\n\t}\n\n\ts.SetAccount(account, ent)\n\n\ts.SetCommonPool(commonPool)\n\n\treturn nil\n}", "func (_Smartchef *SmartchefCaller) PendingReward(opts *bind.CallOpts, _user common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Smartchef.contract.Call(opts, out, \"pendingReward\", _user)\n\treturn *ret0, err\n}", "func (d *Dao) UseReward(id int64, usePlat string) (rst bool, err error) {\n\tif err := d.orm.\n\t\tModel(&model.AnchorReward{}).\n\t\tWhere(\"id=?\", id).\n\t\tUpdate(map[string]interface{}{\"status\": model.RewardUsed, \"use_plat\": usePlat, \"use_time\": xtime.Time(time.Now().Unix())}).Error; err != nil {\n\t\tlog.Error(\"useReward (%v) error(%v)\", id, err)\n\t\treturn rst, err\n\t}\n\trst = true\n\treturn\n}", "func (_Lmc *LmcCallerSession) LastRewardBlock() (*big.Int, error) {\n\treturn _Lmc.Contract.LastRewardBlock(&_Lmc.CallOpts)\n}", "func (r *Reward) GetRewardForBlock(blockHeight uint64) *big.Int {\n\tblockHeight += r.startHeight\n\n\tif blockHeight > lastBlock {\n\t\treturn big.NewInt(0)\n\t}\n\n\tif blockHeight == lastBlock {\n\t\treturn helpers.BipToPip(big.NewInt(lastReward))\n\t}\n\n\treward := big.NewInt(firstReward)\n\treward.Sub(reward, big.NewInt(int64(blockHeight/200000)))\n\n\tif reward.Sign() < 1 {\n\t\treturn helpers.BipToPip(big.NewInt(1))\n\t}\n\n\treturn helpers.BipToPip(reward)\n}", "func (_Smartchef *SmartchefSession) PendingReward(_user common.Address) (*big.Int, error) {\n\treturn _Smartchef.Contract.PendingReward(&_Smartchef.CallOpts, _user)\n}", "func accumulateRewards(config *params.ChainConfig, state *state.DB, header *types.Header) {\n\t// TODO: implement mining rewards\n}", "func (_Lmc *LmcSession) LastRewardBlock() (*big.Int, error) {\n\treturn _Lmc.Contract.LastRewardBlock(&_Lmc.CallOpts)\n}", "func (_Contract *ContractCallerSession) TaskHandlingReward() (*big.Int, error) {\n\treturn _Contract.Contract.TaskHandlingReward(&_Contract.CallOpts)\n}", "func (_BondedECDSAKeep *BondedECDSAKeepTransactorSession) DistributeERC20Reward(_tokenAddress common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.DistributeERC20Reward(&_BondedECDSAKeep.TransactOpts, _tokenAddress, _value)\n}", "func (d *Dao) AddReward(c context.Context, iRewardID int64, uid int64, iSource int64, iRoomid int64, iLifespan int64) (err error) {\n\t//aReward, _ := getRewardConfByLid(iRewardID)\n\n\tm, _ := time.ParseDuration(fmt.Sprintf(\"+%dh\", iLifespan))\n\n\targ := &AnchorTaskModel.AnchorReward{\n\t\tUid: uid,\n\t\tRewardId: iRewardID,\n\t\tRoomid: iRoomid,\n\t\tSource: iSource,\n\t\tAchieveTime: xtime.Time(time.Now().Unix()),\n\t\tExpireTime: xtime.Time(time.Now().Add(m).Unix()),\n\t\tStatus: model.RewardUnUsed,\n\t}\n\n\t//spew.Dump\n\t// (arg)\n\tif err := d.orm.Create(arg).Error; err != nil {\n\t\tlog.Error(\"addReward(%v) error(%v)\", arg, err)\n\t\treturn err\n\t}\n\n\tif err := d.SetNewReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"addRewardMc(%v) error(%v)\", uid, err)\n\t}\n\n\tif err := d.SetHasReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"SetHasReward(%v) error(%v)\", uid, err)\n\t}\n\n\tlog.Info(\"addReward (%v) succ\", arg)\n\n\treturn\n}", "func (_Lmc *LmcCallerSession) UserAccruedRewards(arg0 common.Address) (*big.Int, error) {\n\treturn _Lmc.Contract.UserAccruedRewards(&_Lmc.CallOpts, arg0)\n}", "func (_Lmc *LmcSession) UserAccruedRewards(arg0 common.Address) (*big.Int, error) {\n\treturn _Lmc.Contract.UserAccruedRewards(&_Lmc.CallOpts, arg0)\n}", "func (path *Path) Rewards() map[*Reward]int {\n\treturn path.rewards\n}", "func (d *Dao) HasReward(c context.Context, uid int64) (r int64, err error) {\n\trst, err := d.GetHasReward(c, uid)\n\tif err != nil {\n\t\tif err == memcache.ErrNotFound {\n\t\t\treward, err2 := d.findByUid(uid, true)\n\t\t\tif err2 != nil {\n\t\t\t\treturn rst, err2\n\t\t\t}\n\t\t\tif reward != nil {\n\t\t\t\trst = int64(1)\n\t\t\t\td.SetHasReward(c, uid, rst)\n\t\t\t} else {\n\t\t\t\trst = int64(0)\n\t\t\t\td.SetHasReward(c, uid, rst)\n\t\t\t}\n\t\t\treturn rst, err\n\t\t}\n\t\tlog.Error(\"HasReward(%v) error(%v)\", uid, err)\n\t\treturn rst, err\n\t}\n\treturn rst, err\n}", "func (_Lmc *LmcCaller) GetUserRewardDebtLength(opts *bind.CallOpts, _userAddress common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Lmc.contract.Call(opts, &out, \"getUserRewardDebtLength\", _userAddress)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func GetCmdQueryMiningRewardWeight(queryRoute string, cdc *codec.Codec) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: treasury.QueryMiningRewardWeight,\n\t\tShort: \"Query the mining reward weight\",\n\t\tLong: strings.TrimSpace(`\nQuery the mining reward rate at the specified epoch.\n\n$ terracli query treasury reward-weight --epoch=14\n`),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tcliCtx := context.NewCLIContext().WithCodec(cdc)\n\n\t\t\tepoch := viper.GetInt(flagEpoch)\n\t\t\tres, err := cliCtx.QueryWithData(fmt.Sprintf(\"custom/%s/%s/%d\", queryRoute, treasury.QueryMiningRewardWeight, epoch), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvar rewardWeight sdk.Dec\n\t\t\tcdc.MustUnmarshalBinaryLengthPrefixed(res, &rewardWeight)\n\t\t\treturn cliCtx.PrintOutput(rewardWeight)\n\t\t},\n\t}\n\n\treturn cmd\n}", "func (_IStakingRewards *IStakingRewardsSession) LastTimeRewardApplicable() (*big.Int, error) {\n\treturn _IStakingRewards.Contract.LastTimeRewardApplicable(&_IStakingRewards.CallOpts)\n}", "func (_IStakingRewards *IStakingRewardsCallerSession) LastTimeRewardApplicable() (*big.Int, error) {\n\treturn _IStakingRewards.Contract.LastTimeRewardApplicable(&_IStakingRewards.CallOpts)\n}", "func ValidateRewardTx(tx *types.Transaction, header *types.BlockHeader) error {\n\tif tx.Data.Type != types.TxTypeReward || !tx.Data.From.IsEmpty() || tx.Data.AccountNonce != 0 || tx.Data.GasPrice.Cmp(common.Big0) != 0 || tx.Data.GasLimit != 0 || len(tx.Data.Payload) != 0 {\n\t\treturn errInvalidReward\n\t}\n\n\t// validate to address\n\tto := tx.Data.To\n\tif to.IsEmpty() {\n\t\treturn errEmptyToAddress\n\t}\n\n\tif !to.Equal(header.Creator) {\n\t\treturn errCoinbaseMismatch\n\t}\n\n\t// validate reward\n\tamount := tx.Data.Amount\n\tif err := validateReward(amount); err != nil {\n\t\treturn err\n\t}\n\n\treward := consensus.GetReward(header.Height)\n\tif reward == nil || reward.Cmp(amount) != 0 {\n\t\treturn fmt.Errorf(\"invalid reward Amount, block height %d, want %s, got %s\", header.Height, reward, amount)\n\t}\n\n\t// validate timestamp\n\tif tx.Data.Timestamp != header.CreateTimestamp.Uint64() {\n\t\treturn errTimestampMismatch\n\t}\n\n\treturn nil\n}", "func (_XStaking *XStakingCaller) RewardRate(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _XStaking.contract.Call(opts, &out, \"rewardRate\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_Lmc *LmcCaller) UserAccruedRewards(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Lmc.contract.Call(opts, &out, \"userAccruedRewards\", arg0)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_BondedECDSAKeep *BondedECDSAKeepSession) DistributeERC20Reward(_tokenAddress common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.DistributeERC20Reward(&_BondedECDSAKeep.TransactOpts, _tokenAddress, _value)\n}", "func (_Contract *ContractSession) TaskHandlingReward() (*big.Int, error) {\n\treturn _Contract.Contract.TaskHandlingReward(&_Contract.CallOpts)\n}", "func (k Keeper) ClaimEarnReward(ctx sdk.Context, owner, receiver sdk.AccAddress, denom string, multiplierName string) error {\n\tmultiplier, found := k.GetMultiplierByDenom(ctx, denom, multiplierName)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrInvalidMultiplier, \"denom '%s' has no multiplier '%s'\", denom, multiplierName)\n\t}\n\n\tclaimEnd := k.GetClaimEnd(ctx)\n\n\tif ctx.BlockTime().After(claimEnd) {\n\t\treturn errorsmod.Wrapf(types.ErrClaimExpired, \"block time %s > claim end time %s\", ctx.BlockTime(), claimEnd)\n\t}\n\n\tsyncedClaim, found := k.GetSynchronizedEarnClaim(ctx, owner)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrClaimNotFound, \"address: %s\", owner)\n\t}\n\n\tamt := syncedClaim.Reward.AmountOf(denom)\n\n\tclaimingCoins := sdk.NewCoins(sdk.NewCoin(denom, amt))\n\trewardCoins := sdk.NewCoins(sdk.NewCoin(denom, sdk.NewDecFromInt(amt).Mul(multiplier.Factor).RoundInt()))\n\tif rewardCoins.IsZero() {\n\t\treturn types.ErrZeroClaim\n\t}\n\tlength := k.GetPeriodLength(ctx.BlockTime(), multiplier.MonthsLockup)\n\n\terr := k.SendTimeLockedCoinsToAccount(ctx, types.IncentiveMacc, receiver, rewardCoins, length)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// remove claimed coins (NOT reward coins)\n\tsyncedClaim.Reward = syncedClaim.Reward.Sub(claimingCoins...)\n\tk.SetEarnClaim(ctx, syncedClaim)\n\n\tctx.EventManager().EmitEvent(\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventTypeClaim,\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimedBy, owner.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimAmount, claimingCoins.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimType, syncedClaim.GetType()),\n\t\t),\n\t)\n\treturn nil\n}", "func (gm GlobalManager) GetRewardAndPopFromWindow(\n\tctx sdk.Context, evaluate types.Coin, penaltyScore sdk.Rat) (types.Coin, sdk.Error) {\n\tif evaluate.IsZero() {\n\t\treturn types.NewCoinFromInt64(0), nil\n\t}\n\n\tconsumptionMeta, err := gm.storage.GetConsumptionMeta(ctx)\n\tif err != nil {\n\t\treturn types.NewCoinFromInt64(0), err\n\t}\n\n\t// consumptionRatio = (this consumption * penalty score) / (total consumption in 7 days window)\n\tconsumptionRatio :=\n\t\tevaluate.ToRat().Mul(sdk.OneRat().Sub(penaltyScore)).Quo(\n\t\t\tconsumptionMeta.ConsumptionWindow.ToRat())\n\t// reward = (consumption reward pool) * (consumptionRatio)\n\treward := types.RatToCoin(\n\t\tconsumptionMeta.ConsumptionRewardPool.ToRat().Mul(consumptionRatio))\n\tconsumptionMeta.ConsumptionRewardPool = consumptionMeta.ConsumptionRewardPool.Minus(reward)\n\tconsumptionMeta.ConsumptionWindow = consumptionMeta.ConsumptionWindow.Minus(evaluate)\n\tif err := gm.addTotalLinoCoin(ctx, reward); err != nil {\n\t\treturn types.NewCoinFromInt64(0), err\n\t}\n\tif err := gm.storage.SetConsumptionMeta(ctx, consumptionMeta); err != nil {\n\t\treturn types.NewCoinFromInt64(0), err\n\t}\n\treturn reward, nil\n}", "func GenerateGetRewardRatioScript(env Environment) []byte {\n\tcode := assets.MustAssetString(rewardRatioFilename)\n\n\treturn []byte(replaceAddresses(code, env))\n}", "func (d *Dao) GetById(id int64) (reward *model.AnchorReward, err error) {\n\trewards := []*model.AnchorReward{}\n\tif err := d.orm.Model(&model.AnchorReward{}).Find(&rewards, \"id=?\", id).Error; err != nil {\n\t\tlog.Error(\"getRewardById (%v) error(%v)\", id, err)\n\t\treturn reward, err\n\t}\n\tif len(rewards) != 0 {\n\t\treward = rewards[0]\n\t}\n\n\treturn\n}", "func (_RandomBeacon *RandomBeaconCaller) AvailableRewards(opts *bind.CallOpts, stakingProvider common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _RandomBeacon.contract.Call(opts, &out, \"availableRewards\", stakingProvider)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_XStaking *XStakingCaller) LastTimeRewardApplicable(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _XStaking.contract.Call(opts, &out, \"lastTimeRewardApplicable\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func EstimatedRewards(request []string) (float64, error) {\n\tcoinId, err := strconv.ParseUint(request[0], 10, 64)\n\tif err != nil {\n\t\treturn 0.00, errors.New(\"Invalid coinid format\")\n\t}\n\n\twtmClient := NewWhatToMineClient(nil, BASE, userAgent)\n\twtmClient.SetDebug(debug)\n\tstatus, err := wtmClient.GetCoin(coinId, 1000000, 0, 0)\n\tif err != nil {\n\t\treturn 0.00, err\n\t}\n\treturn status.EstimatedRewards, nil\n}", "func (_XStaking *XStakingCaller) Rewards(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _XStaking.contract.Call(opts, &out, \"rewards\", arg0)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}" ]
[ "0.7359368", "0.7301453", "0.7164745", "0.71411335", "0.7125285", "0.7048926", "0.7008327", "0.69872797", "0.69602764", "0.6954469", "0.690628", "0.6885635", "0.6868693", "0.68661374", "0.6859745", "0.67821544", "0.66738814", "0.6654499", "0.6475623", "0.64529395", "0.6261458", "0.6246468", "0.62242985", "0.6195933", "0.6158053", "0.61578", "0.6118841", "0.60365266", "0.6016429", "0.6001463", "0.59719723", "0.594712", "0.594583", "0.5936152", "0.58616644", "0.5839224", "0.5821445", "0.57871443", "0.5748447", "0.5716853", "0.56978446", "0.5695439", "0.56791675", "0.56055015", "0.55523235", "0.5459567", "0.54541695", "0.5434743", "0.540461", "0.54011714", "0.5380912", "0.53638774", "0.5348907", "0.5340571", "0.5326437", "0.52942514", "0.52936494", "0.528707", "0.52851385", "0.5278455", "0.5271794", "0.5271559", "0.5236288", "0.5235212", "0.52292746", "0.5227996", "0.52249175", "0.52187693", "0.521759", "0.5208392", "0.51973426", "0.5195461", "0.5185917", "0.51845515", "0.51736575", "0.5168641", "0.5138485", "0.51206106", "0.5099771", "0.5086932", "0.5081647", "0.507968", "0.5079229", "0.50727415", "0.50620556", "0.5061022", "0.5053968", "0.5036312", "0.50357723", "0.5032823", "0.50285256", "0.5027033", "0.502235", "0.5017938", "0.501224", "0.50000733", "0.4992881", "0.49908218", "0.4987382", "0.49841532" ]
0.75374323
0
updateRewardHistory stores reward information into reward history table
func (p *Protocol) updateRewardHistory(tx *sql.Tx, epochNumber uint64, actionHash string, rewardInfoMap map[string]*RewardInfo) error { valStrs := make([]string, 0, len(rewardInfoMap)) valArgs := make([]interface{}, 0, len(rewardInfoMap)*7) for rewardAddress, rewards := range rewardInfoMap { blockReward := rewards.BlockReward.String() epochReward := rewards.EpochReward.String() foundationBonus := rewards.FoundationBonus.String() var candidateName string // If more than one candidates share the same reward address, just use the first candidate as their delegate if len(p.RewardAddrToName[rewardAddress]) > 0 { candidateName = p.RewardAddrToName[rewardAddress][0] } valStrs = append(valStrs, "(?, ?, ?, ?, CAST(? as DECIMAL(65, 0)), CAST(? as DECIMAL(65, 0)), CAST(? as DECIMAL(65, 0)))") valArgs = append(valArgs, epochNumber, actionHash, rewardAddress, candidateName, blockReward, epochReward, foundationBonus) } insertQuery := fmt.Sprintf(insertRewardHistory, RewardHistoryTableName, strings.Join(valStrs, ",")) if _, err := tx.Exec(insertQuery, valArgs...); err != nil { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MemoryRewardStorage) Update(reward rewards.Reward) {\n\tfor index, r := range m.rewards {\n\t\tif r.ID == reward.ID {\n\t\t\tm.rewards[index] = reward\n\t\t}\n\t}\n}", "func (_Token *TokenSession) BaseRewardHistory(index *big.Int) (*big.Int, *big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseRewardHistory(&_Token.CallOpts, index)\n}", "func (as AccountStorage) SetRewardHistory(\n\tctx sdk.Context, me types.AccountKey, bucketSlot int64, history *RewardHistory) sdk.Error {\n\tstore := ctx.KVStore(as.key)\n\thistoryBytes, err := as.cdc.MarshalJSON(*history)\n\tif err != nil {\n\t\treturn ErrFailedToMarshalRewardHistory(err)\n\t}\n\tstore.Set(getRewardHistoryKey(me, bucketSlot), historyBytes)\n\treturn nil\n}", "func (d *Dao) AddReward(c context.Context, iRewardID int64, uid int64, iSource int64, iRoomid int64, iLifespan int64) (err error) {\n\t//aReward, _ := getRewardConfByLid(iRewardID)\n\n\tm, _ := time.ParseDuration(fmt.Sprintf(\"+%dh\", iLifespan))\n\n\targ := &AnchorTaskModel.AnchorReward{\n\t\tUid: uid,\n\t\tRewardId: iRewardID,\n\t\tRoomid: iRoomid,\n\t\tSource: iSource,\n\t\tAchieveTime: xtime.Time(time.Now().Unix()),\n\t\tExpireTime: xtime.Time(time.Now().Add(m).Unix()),\n\t\tStatus: model.RewardUnUsed,\n\t}\n\n\t//spew.Dump\n\t// (arg)\n\tif err := d.orm.Create(arg).Error; err != nil {\n\t\tlog.Error(\"addReward(%v) error(%v)\", arg, err)\n\t\treturn err\n\t}\n\n\tif err := d.SetNewReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"addRewardMc(%v) error(%v)\", uid, err)\n\t}\n\n\tif err := d.SetHasReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"SetHasReward(%v) error(%v)\", uid, err)\n\t}\n\n\tlog.Info(\"addReward (%v) succ\", arg)\n\n\treturn\n}", "func (_Token *TokenCallerSession) BaseRewardHistory(index *big.Int) (*big.Int, *big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseRewardHistory(&_Token.CallOpts, index)\n}", "func accumulateRewards(config *params.ChainConfig, state *state.DB, header *types.Header) {\n\t// TODO: implement mining rewards\n}", "func (_Token *TokenCaller) BaseRewardHistory(opts *bind.CallOpts, index *big.Int) (*big.Int, *big.Int, *big.Int, *big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t\tret1 = new(*big.Int)\n\t\tret2 = new(*big.Int)\n\t\tret3 = new(*big.Int)\n\t)\n\tout := &[]interface{}{\n\t\tret0,\n\t\tret1,\n\t\tret2,\n\t\tret3,\n\t}\n\terr := _Token.contract.Call(opts, out, \"baseRewardHistory\", index)\n\treturn *ret0, *ret1, *ret2, *ret3, err\n}", "func (as AccountStorage) SetReward(ctx sdk.Context, accKey types.AccountKey, reward *Reward) sdk.Error {\n\tstore := ctx.KVStore(as.key)\n\trewardByte, err := as.cdc.MarshalJSON(*reward)\n\tif err != nil {\n\t\treturn ErrFailedToMarshalReward(err)\n\t}\n\tstore.Set(getRewardKey(accKey), rewardByte)\n\treturn nil\n}", "func (s *MutableState) AddRewards(time epochtime.EpochTime, factor *quantity.Quantity, accounts []signature.PublicKey) error {\n\tsteps, err := s.RewardSchedule()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar activeStep *staking.RewardStep\n\tfor _, step := range steps {\n\t\tif time < step.Until {\n\t\t\tactiveStep = &step\n\t\t\tbreak\n\t\t}\n\t}\n\tif activeStep == nil {\n\t\t// We're past the end of the schedule.\n\t\treturn nil\n\t}\n\n\tcommonPool, err := s.CommonPool()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"loading common pool\")\n\t}\n\n\tfor _, id := range accounts {\n\t\tent := s.Account(id)\n\n\t\tq := ent.Escrow.Active.Balance.Clone()\n\t\t// Multiply first.\n\t\tif err := q.Mul(factor); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by reward factor\")\n\t\t}\n\t\tif err := q.Mul(&activeStep.Scale); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by reward step scale\")\n\t\t}\n\t\tif err := q.Quo(staking.RewardAmountDenominator); err != nil {\n\t\t\treturn errors.Wrap(err, \"dividing by reward amount denominator\")\n\t\t}\n\n\t\tif q.IsZero() {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar com *quantity.Quantity\n\t\trate := ent.Escrow.CommissionSchedule.CurrentRate(time)\n\t\tif rate != nil {\n\t\t\tcom = q.Clone()\n\t\t\t// Multiply first.\n\t\t\tif err := com.Mul(rate); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"multiplying by commission rate\")\n\t\t\t}\n\t\t\tif err := com.Quo(staking.CommissionRateDenominator); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"dividing by commission rate denominator\")\n\t\t\t}\n\n\t\t\tif err := q.Sub(com); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"subtracting commission\")\n\t\t\t}\n\t\t}\n\n\t\tif !q.IsZero() {\n\t\t\tif err := quantity.Move(&ent.Escrow.Active.Balance, commonPool, q); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"transferring to active escrow balance from common pool\")\n\t\t\t}\n\t\t}\n\n\t\tif com != nil && !com.IsZero() {\n\t\t\tdelegation := s.Delegation(id, id)\n\n\t\t\tif err := ent.Escrow.Active.Deposit(&delegation.Shares, commonPool, com); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"depositing commission\")\n\t\t\t}\n\n\t\t\ts.SetDelegation(id, id, delegation)\n\t\t}\n\n\t\ts.SetAccount(id, ent)\n\t}\n\n\ts.SetCommonPool(commonPool)\n\n\treturn nil\n}", "func (p *Protocol) getRewardHistory(actionHash string) ([]*RewardHistory, error) {\n\tdb := p.Store.GetDB()\n\n\tgetQuery := fmt.Sprintf(selectRewardHistory,\n\t\tRewardHistoryTableName)\n\tstmt, err := db.Prepare(getQuery)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to prepare get query\")\n\t}\n\tdefer stmt.Close()\n\n\trows, err := stmt.Query(actionHash)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to execute get query\")\n\t}\n\n\tvar rewardHistory RewardHistory\n\tparsedRows, err := s.ParseSQLRows(rows, &rewardHistory)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to parse results\")\n\t}\n\n\tif len(parsedRows) == 0 {\n\t\treturn nil, indexprotocol.ErrNotExist\n\t}\n\n\tvar rewardHistoryList []*RewardHistory\n\tfor _, parsedRow := range parsedRows {\n\t\trewards := parsedRow.(*RewardHistory)\n\t\trewardHistoryList = append(rewardHistoryList, rewards)\n\t}\n\treturn rewardHistoryList, nil\n}", "func ViewReward(rw http.ResponseWriter, r *http.Request) {\n\t// get the token\n\treqToken := r.Header.Get(\"Authorization\")\n\t\n\t// get the claims\n\tclaims, isNotValid := GetClaims(reqToken, rw)\n\tif isNotValid {\n\t\treturn\n\t}\n\n\tdt, err := db.GetUserRewards(claims.Roll)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write(Rsp(err.Error(), \"Server Error\"))\n\t\treturn\n\t}\n\trw.WriteHeader(http.StatusOK)\n\tres := c.RespData{\n\t\tMessage: \"All data\",\n\t\tData: dt,\n\t}\n\tjson.NewEncoder(rw).Encode(res)\n}", "func m7RewardsAndDatesPart2UpdateAccounts(db *IndexerDb, accountData []addressAccountData, assetDataMap map[uint32]createClose, txnID txnID, state *MigrationState) error {\n\t// Make sure round accounting doesn't interfere with updating these accounts.\n\tdb.accountingLock.Lock()\n\tdefer db.accountingLock.Unlock()\n\n\t// Open a postgres transaction and submit results for each account.\n\ttx, err := db.db.BeginTx(context.Background(), &serializable)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: tx begin: %v\", err)\n\t}\n\tdefer tx.Rollback() // ignored if .Commit() first\n\n\t// 1. updateTotalRewards - conditionally update the total rewards if the account wasn't closed during iteration.\n\t// $3 is the round after which new blocks will have the closed_at field set.\n\t// We only set rewards_total when closed_at was set before that round.\n\tupdateTotalRewards, err := tx.Prepare(`UPDATE account SET rewards_total = coalesce(rewards_total, 0) + $2 WHERE addr = $1 AND coalesce(closed_at, 0) < $3`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: set rewards prepare: %v\", err)\n\t}\n\tdefer updateTotalRewards.Close()\n\n\t// 2. setCreateCloseAccount - set the accounts create/close rounds.\n\t// We always set the created_at field because it will never change.\n\t// closed_at may already be set by the time the migration runs, or it might need to be cleared out.\n\tsetCreateCloseAccount, err := tx.Prepare(`UPDATE account SET created_at = $2, closed_at = coalesce(closed_at, $3), deleted = coalesce(deleted, $4) WHERE addr = $1`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: set create close prepare: %v\", err)\n\t}\n\tdefer setCreateCloseAccount.Close()\n\n\t// 3. setCreateCloseAsset - set the accounts created assets create/close rounds.\n\tsetCreateCloseAsset, err := tx.Prepare(`UPDATE asset SET created_at = $3, closed_at = coalesce(closed_at, $4), deleted = coalesce(deleted, $5) WHERE creator_addr = $1 AND index=$2`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: set create close asset prepare: %v\", err)\n\t}\n\tdefer setCreateCloseAsset.Close()\n\n\t// 4. setCreateCloseAssetHolding - (upsert) set the accounts asset holding create/close rounds.\n\tsetCreateCloseAssetHolding, err := tx.Prepare(`INSERT INTO account_asset(addr, assetid, amount, frozen, created_at, closed_at, deleted) VALUES ($1, $2, 0, false, $3, $4, $5) ON CONFLICT (addr, assetid) DO UPDATE SET created_at = EXCLUDED.created_at, closed_at = coalesce(account_asset.closed_at, EXCLUDED.closed_at), deleted = coalesce(account_asset.deleted, EXCLUDED.deleted)`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: set create close asset holding prepare: %v\", err)\n\t}\n\tdefer setCreateCloseAssetHolding.Close()\n\n\t// 5. setCreateCloseApp - set the accounts created apps create/close rounds.\n\tsetCreateCloseApp, err := tx.Prepare(`UPDATE app SET created_at = $3, closed_at = coalesce(closed_at, $4), deleted = coalesce(deleted, $5) WHERE creator = $1 AND index=$2`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: set create close app prepare: %v\", err)\n\t}\n\tdefer setCreateCloseApp.Close()\n\n\t// 6. setCreateCloseAppLocal - (upsert) set the accounts local apps create/close rounds.\n\tsetCreateCloseAppLocal, err := tx.Prepare(`INSERT INTO account_app (addr, app, created_at, closed_at, deleted) VALUES ($1, $2, $3, $4, $5) ON CONFLICT (addr, app) DO UPDATE SET created_at = EXCLUDED.created_at, closed_at = coalesce(account_app.closed_at, EXCLUDED.closed_at), deleted = coalesce(account_app.deleted, EXCLUDED.deleted)`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: set create close app local prepare: %v\", err)\n\t}\n\tdefer setCreateCloseAppLocal.Close()\n\n\t// loop through all of the accounts.\n\tfor _, ad := range accountData {\n\t\taddressStr := ad.address.String()\n\n\t\t// 1. updateTotalRewards - conditionally update the total rewards if the account wasn't closed during iteration.\n\t\t_, err = updateTotalRewards.Exec(\n\t\t\tad.address[:], ad.accountData.cumulativeRewards, state.NextRound)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"m7: failed to update %s with rewards %d: %v\",\n\t\t\t\taddressStr, ad.accountData.cumulativeRewards, err)\n\t\t}\n\n\t\t// 2. setCreateCloseAccount - set the accounts create/close rounds.\n\t\t{\n\t\t\tdeleted := sql.NullBool{\n\t\t\t\tBool: ad.accountData.account.deleted,\n\t\t\t\tValid: ad.accountData.account.deletedValid,\n\t\t\t}\n\t\t\tcreated := sql.NullInt64{\n\t\t\t\tInt64: int64(ad.accountData.account.created),\n\t\t\t\tValid: ad.accountData.account.createdValid,\n\t\t\t}\n\t\t\tclosed := sql.NullInt64{\n\t\t\t\tInt64: int64(ad.accountData.account.closed),\n\t\t\t\tValid: ad.accountData.account.closedValid,\n\t\t\t}\n\t\t\t_, err = setCreateCloseAccount.Exec(ad.address[:], created, closed, deleted)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"m7: failed to update %s with create/close: %v\", addressStr, err)\n\t\t\t}\n\t\t}\n\n\t\t// 4. setCreateCloseAssetHolding - (upsert) set the accounts asset holding create/close rounds.\n\t\terr = executeForEachCreatable(setCreateCloseAssetHolding, ad.address,\n\t\t\tad.accountData.assetHolding)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"m7: failed to update %s with asset holding create/close: %v\",\n\t\t\t\taddressStr, err)\n\t\t}\n\n\t\tif ad.accountData.additional != nil {\n\t\t\t// 3. setCreateCloseAsset - set the accounts created assets create/close rounds.\n\t\t\tfor index := range ad.accountData.additional.asset {\n\t\t\t\tcc, ok := assetDataMap[index]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"m7: asset index %d created by %s is not in assetDataMap\",\n\t\t\t\t\t\tindex, addressStr)\n\t\t\t\t}\n\t\t\t\terr := executeCreatableCC(setCreateCloseAsset, ad.address, index, cc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"m7: failed to update %s with asset index %d create/close: %v\",\n\t\t\t\t\t\taddressStr, index, err)\n\t\t\t\t}\n\t\t\t\tdelete(assetDataMap, index)\n\t\t\t}\n\n\t\t\t// 5. setCreateCloseApp - set the accounts created apps create/close rounds.\n\t\t\terr = executeForEachCreatable(setCreateCloseApp, ad.address, ad.accountData.additional.app)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"m7: failed to update %s with app create/close: %v\", addressStr, err)\n\t\t\t}\n\n\t\t\t// 6. setCreateCloseAppLocal - (upsert) set the accounts local apps create/close rounds.\n\t\t\terr = executeForEachCreatable(setCreateCloseAppLocal, ad.address,\n\t\t\t\tad.accountData.additional.appLocal)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"m7: failed to update %s with app local create/close: %v\",\n\t\t\t\t\taddressStr, err)\n\t\t\t}\n\t\t}\n\t}\n\n\t{\n\t\tround := int64(txnID.round)\n\t\tintra := int64(txnID.intra)\n\t\tstate.PointerRound = &round\n\t\tstate.PointerIntra = &intra\n\t}\n\tmigrationStateJSON := encoding.EncodeJSON(state)\n\t_, err = db.db.Exec(setMetastateUpsert, migrationMetastateKey, migrationStateJSON)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: failed to update migration checkpoint: %v\", err)\n\t}\n\n\t// Commit transactions.\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: failed to commit changes: %v\", err)\n\t}\n\n\treturn nil\n}", "func (s *MutableState) AddRewardSingleAttenuated(time epochtime.EpochTime, factor *quantity.Quantity, attenuationNumerator, attenuationDenominator int, account signature.PublicKey) error {\n\tsteps, err := s.RewardSchedule()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar activeStep *staking.RewardStep\n\tfor _, step := range steps {\n\t\tif time < step.Until {\n\t\t\tactiveStep = &step\n\t\t\tbreak\n\t\t}\n\t}\n\tif activeStep == nil {\n\t\t// We're past the end of the schedule.\n\t\treturn nil\n\t}\n\n\tvar numQ, denQ quantity.Quantity\n\tif err = numQ.FromInt64(int64(attenuationNumerator)); err != nil {\n\t\treturn errors.Wrapf(err, \"importing attenuation numerator %d\", attenuationNumerator)\n\t}\n\tif err = denQ.FromInt64(int64(attenuationDenominator)); err != nil {\n\t\treturn errors.Wrapf(err, \"importing attenuation denominator %d\", attenuationDenominator)\n\t}\n\n\tcommonPool, err := s.CommonPool()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"loading common pool\")\n\t}\n\n\tent := s.Account(account)\n\n\tq := ent.Escrow.Active.Balance.Clone()\n\t// Multiply first.\n\tif err := q.Mul(factor); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by reward factor\")\n\t}\n\tif err := q.Mul(&activeStep.Scale); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by reward step scale\")\n\t}\n\tif err := q.Mul(&numQ); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by attenuation numerator\")\n\t}\n\tif err := q.Quo(staking.RewardAmountDenominator); err != nil {\n\t\treturn errors.Wrap(err, \"dividing by reward amount denominator\")\n\t}\n\tif err := q.Quo(&denQ); err != nil {\n\t\treturn errors.Wrap(err, \"dividing by attenuation denominator\")\n\t}\n\n\tif q.IsZero() {\n\t\treturn nil\n\t}\n\n\tvar com *quantity.Quantity\n\trate := ent.Escrow.CommissionSchedule.CurrentRate(time)\n\tif rate != nil {\n\t\tcom = q.Clone()\n\t\t// Multiply first.\n\t\tif err := com.Mul(rate); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by commission rate\")\n\t\t}\n\t\tif err := com.Quo(staking.CommissionRateDenominator); err != nil {\n\t\t\treturn errors.Wrap(err, \"dividing by commission rate denominator\")\n\t\t}\n\n\t\tif err := q.Sub(com); err != nil {\n\t\t\treturn errors.Wrap(err, \"subtracting commission\")\n\t\t}\n\t}\n\n\tif !q.IsZero() {\n\t\tif err := quantity.Move(&ent.Escrow.Active.Balance, commonPool, q); err != nil {\n\t\t\treturn errors.Wrap(err, \"transferring to active escrow balance from common pool\")\n\t\t}\n\t}\n\n\tif com != nil && !com.IsZero() {\n\t\tdelegation := s.Delegation(account, account)\n\n\t\tif err := ent.Escrow.Active.Deposit(&delegation.Shares, commonPool, com); err != nil {\n\t\t\treturn errors.Wrap(err, \"depositing commission\")\n\t\t}\n\n\t\ts.SetDelegation(account, account, delegation)\n\t}\n\n\ts.SetAccount(account, ent)\n\n\ts.SetCommonPool(commonPool)\n\n\treturn nil\n}", "func (c *Coinbase) AddReward(output *Output) {\n\toutput.EncryptedMask = make([]byte, 1)\n\tc.Rewards = append(c.Rewards, output)\n}", "func (room *RoomRecorder) setHistory(history []*action_.PlayerAction) {\n\troom.historyM.Lock()\n\troom._history = history\n\troom.historyM.Unlock()\n}", "func (genA *GeneticAlgorithm) UpdateHistory(maxFitness, averageFitness float64) {\n\tgenA.AverageFitnessHistory = append(genA.AverageFitnessHistory, averageFitness)\n\tgenA.MaxFitnessHistory = append(genA.MaxFitnessHistory, maxFitness)\n}", "func (path *Path) AddRewards(rewards map[*Reward]int) {\n\tfor key, value := range rewards {\n\t\tpath.rewards[key] += value\n\t}\n}", "func (_RandomBeacon *RandomBeaconTransactor) UpdateRewardParameters(opts *bind.TransactOpts, sortitionPoolRewardsBanDuration *big.Int, relayEntryTimeoutNotificationRewardMultiplier *big.Int, unauthorizedSigningNotificationRewardMultiplier *big.Int, dkgMaliciousResultNotificationRewardMultiplier *big.Int) (*types.Transaction, error) {\n\treturn _RandomBeacon.contract.Transact(opts, \"updateRewardParameters\", sortitionPoolRewardsBanDuration, relayEntryTimeoutNotificationRewardMultiplier, unauthorizedSigningNotificationRewardMultiplier, dkgMaliciousResultNotificationRewardMultiplier)\n}", "func m7RewardsAndDatesPart2(db *IndexerDb, state *MigrationState) error {\n\tdb.log.Print(\"m7 account cumulative rewards migration starting\")\n\n\t// Skip the work if all accounts have previously been updated.\n\tif (state.PointerRound == nil) || (*state.PointerRound != 0) || (*state.PointerIntra != 0) {\n\t\tmaxRound := uint32(state.NextRound)\n\n\t\t// Get the number of accounts to potentially warn the user about high memory usage.\n\t\terr := warnUser(db, maxRound)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Get special accounts, so that we can ignore them throughout the migration. A later migration\n\t\t// handles them.\n\t\tspecialAccounts, err := db.GetSpecialAccounts()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"m7: unable to get special accounts: %v\", err)\n\t\t}\n\t\t// Get the transaction id that created each account. This function simple loops over all\n\t\t// transactions from rounds <= `maxRound` in arbitrary order.\n\t\taccountsFirstUsed, err := getAccountsFirstUsed(db, maxRound, specialAccounts)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Get account data for accounts without transactions such as genesis accounts.\n\t\t// This function reads the `account` table but only considers accounts created before or at\n\t\t// `maxRound`.\n\t\treadyAccountData, err := getAccountsWithoutTxnData(\n\t\t\tdb, maxRound, specialAccounts, accountsFirstUsed)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Finally, read all transactions from most recent to oldest, update rewards and create/close dates,\n\t\t// and write this account data to the database. To save memory, this function removes account's\n\t\t// data as soon as we reach the transaction that created this account at which point older\n\t\t// transactions cannot update its state. It writes account data to the database in batches.\n\t\terr = updateAccounts(db, specialAccounts, accountsFirstUsed, readyAccountData, state)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Update migration state.\n\tstate.NextMigration++\n\tstate.NextRound = 0\n\tstate.PointerRound = nil\n\tstate.PointerIntra = nil\n\tmigrationStateJSON := encoding.EncodeJSON(state)\n\t_, err := db.db.Exec(setMetastateUpsert, migrationMetastateKey, migrationStateJSON)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"m7: failed to write final migration state: %v\", err)\n\t}\n\n\treturn nil\n}", "func (_RandomBeacon *RandomBeaconTransactorSession) UpdateRewardParameters(sortitionPoolRewardsBanDuration *big.Int, relayEntryTimeoutNotificationRewardMultiplier *big.Int, unauthorizedSigningNotificationRewardMultiplier *big.Int, dkgMaliciousResultNotificationRewardMultiplier *big.Int) (*types.Transaction, error) {\n\treturn _RandomBeacon.Contract.UpdateRewardParameters(&_RandomBeacon.TransactOpts, sortitionPoolRewardsBanDuration, relayEntryTimeoutNotificationRewardMultiplier, unauthorizedSigningNotificationRewardMultiplier, dkgMaliciousResultNotificationRewardMultiplier)\n}", "func (_Lmc *LmcSession) UpdateRewardMultipliers() (*types.Transaction, error) {\n\treturn _Lmc.Contract.UpdateRewardMultipliers(&_Lmc.TransactOpts)\n}", "func (_RandomBeacon *RandomBeaconSession) UpdateRewardParameters(sortitionPoolRewardsBanDuration *big.Int, relayEntryTimeoutNotificationRewardMultiplier *big.Int, unauthorizedSigningNotificationRewardMultiplier *big.Int, dkgMaliciousResultNotificationRewardMultiplier *big.Int) (*types.Transaction, error) {\n\treturn _RandomBeacon.Contract.UpdateRewardParameters(&_RandomBeacon.TransactOpts, sortitionPoolRewardsBanDuration, relayEntryTimeoutNotificationRewardMultiplier, unauthorizedSigningNotificationRewardMultiplier, dkgMaliciousResultNotificationRewardMultiplier)\n}", "func (k Querier) Rewards(c context.Context, req *types.QueryRewardsRequest) (*types.QueryRewardsResponse, error) {\n\tif req == nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"invalid request\")\n\t}\n\n\tif req.StakingCoinDenom != \"\" {\n\t\tif err := sdk.ValidateDenom(req.StakingCoinDenom); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tctx := sdk.UnwrapSDKContext(c)\n\tstore := ctx.KVStore(k.storeKey)\n\tvar rewards []types.Reward\n\tvar pageRes *query.PageResponse\n\tvar err error\n\n\tif req.Farmer != \"\" {\n\t\tvar farmerAcc sdk.AccAddress\n\t\tfarmerAcc, err = sdk.AccAddressFromBech32(req.Farmer)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tstorePrefix := types.GetRewardsByFarmerIndexKey(farmerAcc)\n\t\tindexStore := prefix.NewStore(store, storePrefix)\n\t\tpageRes, err = query.FilteredPaginate(indexStore, req.Pagination, func(key, value []byte, accumulate bool) (bool, error) {\n\t\t\t_, stakingCoinDenom := types.ParseRewardsByFarmerIndexKey(append(storePrefix, key...))\n\t\t\tif req.StakingCoinDenom != \"\" {\n\t\t\t\tif stakingCoinDenom != req.StakingCoinDenom {\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treward, found := k.GetReward(ctx, stakingCoinDenom, farmerAcc)\n\t\t\tif !found { // TODO: remove this check\n\t\t\t\treturn false, fmt.Errorf(\"reward not found\")\n\t\t\t}\n\t\t\tif accumulate {\n\t\t\t\trewards = append(rewards, reward)\n\t\t\t}\n\t\t\treturn true, nil\n\t\t})\n\t} else {\n\t\tvar storePrefix []byte\n\t\tif req.StakingCoinDenom != \"\" {\n\t\t\tstorePrefix = types.GetRewardsByStakingCoinDenomKey(req.StakingCoinDenom)\n\t\t} else {\n\t\t\tstorePrefix = types.RewardKeyPrefix\n\t\t}\n\t\trewardStore := prefix.NewStore(store, storePrefix)\n\n\t\tpageRes, err = query.Paginate(rewardStore, req.Pagination, func(key, value []byte) error {\n\t\t\tstakingCoinDenom, farmerAcc := types.ParseRewardKey(append(storePrefix, key...))\n\t\t\trewardCoins, err := k.UnmarshalRewardCoins(value)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trewards = append(rewards, types.Reward{\n\t\t\t\tFarmer: farmerAcc.String(),\n\t\t\t\tStakingCoinDenom: stakingCoinDenom,\n\t\t\t\tRewardCoins: rewardCoins.RewardCoins,\n\t\t\t})\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\treturn &types.QueryRewardsResponse{Rewards: rewards, Pagination: pageRes}, nil\n}", "func (vi *votedInfo) CalculateReward(multiplier, divider *big.Int, period int) {\n\tif multiplier.Sign() == 0 || period == 0 {\n\t\treturn\n\t}\n\tif divider.Sign() == 0 || vi.totalBondedDelegation.Sign() == 0 {\n\t\treturn\n\t}\n\t// reward = multiplier * period * bondedDelegation / (divider * totalBondedDelegation)\n\tbase := new(big.Int).Mul(multiplier, big.NewInt(int64(period)))\n\treward := new(big.Int)\n\tfor i, addrKey := range vi.rank {\n\t\tif i == vi.maxRankForReward {\n\t\t\tbreak\n\t\t}\n\t\tprep := vi.preps[addrKey]\n\t\tif prep.Enable() == false {\n\t\t\tcontinue\n\t\t}\n\n\t\treward.Mul(base, prep.GetBondedDelegation())\n\t\treward.Div(reward, divider)\n\t\treward.Div(reward, vi.totalBondedDelegation)\n\n\t\tlog.Tracef(\"VOTED REWARD %d = %d * %d * %d / (%d * %d)\",\n\t\t\treward, multiplier, period, prep.GetBondedDelegation(), divider, vi.totalBondedDelegation)\n\n\t\tprep.SetIScore(new(big.Int).Add(prep.IScore(), reward))\n\t}\n}", "func (db *Db) SaveAccountBalanceHistory(entry types.AccountBalanceHistory) error {\n\tstmt := `\nINSERT INTO account_balance_history (address, balance, delegated, unbonding, redelegating, commission, reward, timestamp) \nVALUES ($1, $2, $3, $4, $5, $6, $7, $8)\nON CONFLICT ON CONSTRAINT unique_balance_for_height DO UPDATE \n SET balance = excluded.balance,\n delegated = excluded.delegated,\n unbonding = excluded.unbonding,\n redelegating = excluded.redelegating,\n commission = excluded.commission, \n reward = excluded.reward`\n\n\t_, err := db.Sql.Exec(stmt,\n\t\tentry.Account,\n\t\tpq.Array(dbtypes.NewDbCoins(entry.Balance)),\n\t\tpq.Array(dbtypes.NewDbCoins(entry.Delegations)),\n\t\tpq.Array(dbtypes.NewDbCoins(entry.Unbonding)),\n\t\tpq.Array(dbtypes.NewDbCoins(entry.Redelegations)),\n\t\tpq.Array(dbtypes.NewDbDecCoins(entry.Commission)),\n\t\tpq.Array(dbtypes.NewDbDecCoins(entry.Reward)),\n\t\tentry.Timestamp,\n\t)\n\treturn err\n}", "func (node *TreeNode) backpropagateReward(scores [2]float64) {\n\tcurrentNode := node\n\tfor currentNode.Parent != nil {\n\t\tcurrentNode.VisitCount += 1.0\n\t\tcurrentNode.CumulativeScore[0] += scores[0]\n\t\tcurrentNode.CumulativeScore[1] += scores[1]\n\t\tcurrentNode = currentNode.Parent\n\t}\n\t//Increment root node counter\n\tcurrentNode.VisitCount += 1.0\n}", "func (_Lmc *LmcTransactorSession) UpdateRewardMultipliers() (*types.Transaction, error) {\n\treturn _Lmc.Contract.UpdateRewardMultipliers(&_Lmc.TransactOpts)\n}", "func (_Lmc *LmcTransactor) UpdateRewardMultipliers(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Lmc.contract.Transact(opts, \"updateRewardMultipliers\")\n}", "func UpdatePriceHistory(w http.ResponseWriter, r *http.Request) {\n\ttype request struct {\n\t\tUrl string `json:\"url\"`\n\t}\n\n\ttype response struct {\n\t\tName string `josn:\"name\"`\n\t\tPrice float32 `json:\"price\"`\n\t}\n\n\t// get id\n\tvars := mux.Vars(r)\n\tkey, _ := strconv.Atoi(vars[\"item_id\"])\n\n\t// get item from database\n\tvar item entity.Item\n\tdatabase.Connector.First(&item, key)\n\n\t// create request\n\treq := request{\n\t\tUrl: item.Url,\n\t}\n\n\tjsonReq, err := json.Marshal(req)\n\n\t// post to scrapper api\n\tresp, err := http.Post(os.Getenv(\"SCRAPPER_HOST\"), \"application/json\", bytes.NewBuffer(jsonReq))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer resp.Body.Close()\n\tbodyBytes, _ := ioutil.ReadAll(resp.Body)\n\n\t// crate new Price History\n\tvar res response\n\tjson.Unmarshal(bodyBytes, &res)\n\n\tprice := entity.PriceHistory{\n\t\tItem_ID: item.ID,\n\t\tPrice: uint(res.Price),\n\t}\n\n\tdatabase.Connector.Create(&price)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusCreated)\n\tjson.NewEncoder(w).Encode(price)\n}", "func (d *Dao) UseReward(id int64, usePlat string) (rst bool, err error) {\n\tif err := d.orm.\n\t\tModel(&model.AnchorReward{}).\n\t\tWhere(\"id=?\", id).\n\t\tUpdate(map[string]interface{}{\"status\": model.RewardUsed, \"use_plat\": usePlat, \"use_time\": xtime.Time(time.Now().Unix())}).Error; err != nil {\n\t\tlog.Error(\"useReward (%v) error(%v)\", id, err)\n\t\treturn rst, err\n\t}\n\trst = true\n\treturn\n}", "func AppendHistory(db *gorm.DB, id int, newHistory []History) {\n\tvar lastMatch History\n\tdb.Where(\"player_id = ?\", id).Order(\"match_date desc\").First(&lastMatch)\n\n\tfor i := 0; i < len(newHistory); i++ {\n\t\tif newHistory[i].MatchDate.After(lastMatch.MatchDate) {\n\t\t\tdb.Create(&newHistory[i])\n\t\t}\n\t}\n}", "func m6RewardsAndDatesPart1(db *IndexerDb, state *MigrationState) error {\n\t// Cache the round in the migration metastate\n\tround, err := db.GetMaxRoundAccounted()\n\tif err == idb.ErrorNotInitialized {\n\t\t// Shouldn't end up in the migration if this were the case.\n\t\tround = 0\n\t} else if err != nil {\n\t\tdb.log.WithError(err).Errorf(\"m6: problem caching max round: %v\", err)\n\t\treturn err\n\t}\n\n\t// state is updated in the DB when calling 'sqlMigration'\n\tstate.NextRound = int64(round)\n\n\t// update metastate\n\tsqlLines := []string{\n\t\t// rewards\n\t\t`ALTER TABLE account ADD COLUMN rewards_total bigint NOT NULL DEFAULT 0`,\n\n\t\t// created/closed round\n\t\t`ALTER TABLE account ADD COLUMN deleted boolean DEFAULT NULL`,\n\t\t`ALTER TABLE account ADD COLUMN created_at bigint DEFAULT NULL`,\n\t\t`ALTER TABLE account ADD COLUMN closed_at bigint DEFAULT NULL`,\n\t\t`ALTER TABLE app ADD COLUMN deleted boolean DEFAULT NULL`,\n\t\t`ALTER TABLE app ADD COLUMN created_at bigint DEFAULT NULL`,\n\t\t`ALTER TABLE app ADD COLUMN closed_at bigint DEFAULT NULL`,\n\t\t`ALTER TABLE account_app ADD COLUMN deleted boolean DEFAULT NULL`,\n\t\t`ALTER TABLE account_app ADD COLUMN created_at bigint DEFAULT NULL`,\n\t\t`ALTER TABLE account_app ADD COLUMN closed_at bigint DEFAULT NULL`,\n\t\t`ALTER TABLE account_asset ADD COLUMN deleted boolean DEFAULT NULL`,\n\t\t`ALTER TABLE account_asset ADD COLUMN created_at bigint DEFAULT NULL`,\n\t\t`ALTER TABLE account_asset ADD COLUMN closed_at bigint DEFAULT NULL`,\n\t\t`ALTER TABLE asset ADD COLUMN deleted boolean DEFAULT NULL`,\n\t\t`ALTER TABLE asset ADD COLUMN created_at bigint DEFAULT NULL`,\n\t\t`ALTER TABLE asset ADD COLUMN closed_at bigint DEFAULT NULL`,\n\t}\n\treturn sqlMigration(db, state, sqlLines)\n}", "func (_Token *TokenSession) BaseReward(index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseReward(&_Token.CallOpts, index)\n}", "func (s *Client) AddHistory(username, input, reply string) {\n\ts.Init(username)\n\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\ttx, _ := s.db.Begin()\n\tstmt, _ := tx.Prepare(`INSERT INTO history (user_id, input,reply)VALUES((SELECT id FROM users WHERE username = ?),?,?);`)\n\tdefer stmt.Close()\n\tstmt.Exec(username, input, reply)\n\ttx.Commit()\n}", "func (a Actor) AwardBlockReward(rt vmr.Runtime, params *AwardBlockRewardParams) *adt.EmptyValue {\n\trt.ValidateImmediateCallerIs(builtin.SystemActorAddr)\n\tAssertMsg(rt.CurrentBalance().GreaterThanEqual(params.GasReward),\n\t\t\"actor current balance %v insufficient to pay gas reward %v\", rt.CurrentBalance(), params.GasReward)\n\n\tAssertMsg(params.TicketCount > 0, \"cannot give block reward for zero tickets\")\n\n\tminer, ok := rt.ResolveAddress(params.Miner)\n\tif !ok {\n\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to resolve given owner address\")\n\t}\n\n\tpriorBalance := rt.CurrentBalance()\n\n\tvar penalty abi.TokenAmount\n\tvar st State\n\trt.State().Transaction(&st, func() interface{} {\n\t\tblockReward := a.computeBlockReward(&st, big.Sub(priorBalance, params.GasReward), params.TicketCount)\n\t\ttotalReward := big.Add(blockReward, params.GasReward)\n\n\t\t// Cap the penalty at the total reward value.\n\t\tpenalty = big.Min(params.Penalty, totalReward)\n\n\t\t// Reduce the payable reward by the penalty.\n\t\trewardPayable := big.Sub(totalReward, penalty)\n\n\t\tAssertMsg(big.Add(rewardPayable, penalty).LessThanEqual(priorBalance),\n\t\t\t\"reward payable %v + penalty %v exceeds balance %v\", rewardPayable, penalty, priorBalance)\n\n\t\t// Record new reward into reward map.\n\t\tif rewardPayable.GreaterThan(abi.NewTokenAmount(0)) {\n\t\t\tnewReward := Reward{\n\t\t\t\tStartEpoch: rt.CurrEpoch(),\n\t\t\t\tEndEpoch: rt.CurrEpoch() + rewardVestingPeriod,\n\t\t\t\tValue: rewardPayable,\n\t\t\t\tAmountWithdrawn: abi.NewTokenAmount(0),\n\t\t\t\tVestingFunction: rewardVestingFunction,\n\t\t\t}\n\t\t\tif err := st.addReward(adt.AsStore(rt), miner, &newReward); err != nil {\n\t\t\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to add reward to rewards map: %w\", err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\t// Burn the penalty amount.\n\t_, code := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, penalty)\n\tbuiltin.RequireSuccess(rt, code, \"failed to send penalty to BurntFundsActor\")\n\n\treturn nil\n}", "func (_Token *TokenCallerSession) BaseReward(index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseReward(&_Token.CallOpts, index)\n}", "func rewardAndSlash(ctx contract.Context, cachedDelegations *CachedDposStorage, state *State) ([]*DelegationResult, error) {\n\tformerValidatorTotals := make(map[string]loom.BigUInt)\n\tdelegatorRewards := make(map[string]*loom.BigUInt)\n\tdistributedRewards := common.BigZero()\n\n\tdelegations, err := cachedDelegations.loadDelegationList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, validator := range state.Validators {\n\t\tcandidate := GetCandidateByPubKey(ctx, validator.PubKey)\n\n\t\tif candidate == nil {\n\t\t\tctx.Logger().Info(\"Attempted to reward validator no longer on candidates list.\", \"validator\", validator)\n\t\t\tcontinue\n\t\t}\n\n\t\tcandidateAddress := loom.UnmarshalAddressPB(candidate.Address)\n\t\tvalidatorKey := candidateAddress.String()\n\t\tstatistic, _ := GetStatistic(ctx, candidateAddress)\n\n\t\tif statistic == nil {\n\t\t\tdelegatorRewards[validatorKey] = common.BigZero()\n\t\t\tformerValidatorTotals[validatorKey] = *common.BigZero()\n\t\t} else {\n\t\t\t// If a validator is jailed, don't calculate and distribute rewards\n\t\t\tif ctx.FeatureEnabled(features.DPOSVersion3_3, false) {\n\t\t\t\tif statistic.Jailed {\n\t\t\t\t\tdelegatorRewards[validatorKey] = common.BigZero()\n\t\t\t\t\tformerValidatorTotals[validatorKey] = *common.BigZero()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\t// If a validator's SlashPercentage is 0, the validator is\n\t\t\t// rewarded for avoiding faults during the last slashing period\n\t\t\tif common.IsZero(statistic.SlashPercentage.Value) {\n\t\t\t\tdistributionTotal := calculateRewards(statistic.DelegationTotal.Value, state.Params, state.TotalValidatorDelegations.Value)\n\n\t\t\t\t// The validator share, equal to validator_fee * total_validotor_reward\n\t\t\t\t// is to be split between the referrers and the validator\n\t\t\t\tvalidatorShare := CalculateFraction(loom.BigUInt{big.NewInt(int64(candidate.Fee))}, distributionTotal)\n\n\t\t\t\t// delegatorsShare is what fraction of the total rewards will be\n\t\t\t\t// distributed to delegators\n\t\t\t\tdelegatorsShare := common.BigZero()\n\t\t\t\tdelegatorsShare.Sub(&distributionTotal, &validatorShare)\n\t\t\t\tdelegatorRewards[validatorKey] = delegatorsShare\n\n\t\t\t\t// Distribute rewards to referrers\n\t\t\t\tfor _, d := range delegations {\n\t\t\t\t\tif loom.UnmarshalAddressPB(d.Validator).Compare(loom.UnmarshalAddressPB(candidate.Address)) == 0 {\n\t\t\t\t\t\tdelegation, err := GetDelegation(ctx, d.Index, *d.Validator, *d.Delegator)\n\t\t\t\t\t\t// if the delegation is not found OR if the delegation\n\t\t\t\t\t\t// has no referrer, we do not need to attempt to\n\t\t\t\t\t\t// distribute the referrer rewards\n\t\t\t\t\t\tif err == contract.ErrNotFound || len(delegation.Referrer) == 0 {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t} else if err != nil {\n\t\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// if referrer is not found, do not distribute the reward\n\t\t\t\t\t\treferrerAddress := getReferrer(ctx, delegation.Referrer)\n\t\t\t\t\t\tif referrerAddress == nil {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// calculate referrerReward\n\t\t\t\t\t\treferrerReward := calculateRewards(delegation.Amount.Value, state.Params, state.TotalValidatorDelegations.Value)\n\t\t\t\t\t\treferrerReward = CalculateFraction(loom.BigUInt{big.NewInt(int64(candidate.Fee))}, referrerReward)\n\t\t\t\t\t\treferrerReward = CalculateFraction(defaultReferrerFee, referrerReward)\n\n\t\t\t\t\t\t// referrer fees are delegater to limbo validator\n\t\t\t\t\t\tdistributedRewards.Add(distributedRewards, &referrerReward)\n\t\t\t\t\t\tcachedDelegations.IncreaseRewardDelegation(ctx, LimboValidatorAddress(ctx).MarshalPB(), referrerAddress, referrerReward)\n\n\t\t\t\t\t\t// any referrer bonus amount is subtracted from the validatorShare\n\t\t\t\t\t\tvalidatorShare.Sub(&validatorShare, &referrerReward)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tdistributedRewards.Add(distributedRewards, &validatorShare)\n\t\t\t\tcachedDelegations.IncreaseRewardDelegation(ctx, candidate.Address, candidate.Address, validatorShare)\n\n\t\t\t\t// If a validator has some non-zero WhitelistAmount,\n\t\t\t\t// calculate the validator's reward based on whitelist amount\n\t\t\t\tif !common.IsZero(statistic.WhitelistAmount.Value) {\n\t\t\t\t\tamount := calculateWeightedWhitelistAmount(*statistic)\n\t\t\t\t\twhitelistDistribution := calculateShare(amount, statistic.DelegationTotal.Value, *delegatorsShare)\n\t\t\t\t\t// increase a delegator's distribution\n\t\t\t\t\tdistributedRewards.Add(distributedRewards, &whitelistDistribution)\n\t\t\t\t\tcachedDelegations.IncreaseRewardDelegation(ctx, candidate.Address, candidate.Address, whitelistDistribution)\n\t\t\t\t}\n\n\t\t\t\t// Keeping track of cumulative distributed rewards by adding\n\t\t\t\t// every validator's total rewards to\n\t\t\t\t// `state.TotalRewardDistribution`\n\t\t\t\t// NOTE: because we round down in every `calculateRewards` call,\n\t\t\t\t// we expect `state.TotalRewardDistribution` to be a slight\n\t\t\t\t// overestimate of what was actually distributed. We could be\n\t\t\t\t// exact with our record keeping by incrementing\n\t\t\t\t// `state.TotalRewardDistribution` each time\n\t\t\t\t// `IncreaseRewardDelegation` is called, but because we will not\n\t\t\t\t// use `state.TotalRewardDistributions` as part of any invariants,\n\t\t\t\t// we will live with this situation.\n\t\t\t\tif !ctx.FeatureEnabled(features.DPOSVersion3_1, false) {\n\t\t\t\t\tstate.TotalRewardDistribution.Value.Add(&state.TotalRewardDistribution.Value, &distributionTotal)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err := slashValidatorDelegations(ctx, cachedDelegations, statistic, candidateAddress); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tif err := SetStatistic(ctx, statistic); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tformerValidatorTotals[validatorKey] = statistic.DelegationTotal.Value\n\t\t}\n\t}\n\n\tnewDelegationTotals, err := distributeDelegatorRewards(ctx, cachedDelegations, formerValidatorTotals, delegatorRewards, distributedRewards)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif ctx.FeatureEnabled(features.DPOSVersion3_1, false) {\n\t\tstate.TotalRewardDistribution.Value.Add(&state.TotalRewardDistribution.Value, distributedRewards)\n\t}\n\n\tdelegationResults := make([]*DelegationResult, 0, len(newDelegationTotals))\n\tfor validator := range newDelegationTotals {\n\t\tdelegationResults = append(delegationResults, &DelegationResult{\n\t\t\tValidatorAddress: loom.MustParseAddress(validator),\n\t\t\tDelegationTotal: *newDelegationTotals[validator],\n\t\t})\n\t}\n\tsort.Sort(byDelegationTotal(delegationResults))\n\n\treturn delegationResults, nil\n}", "func HistoryRecord(env *Environment, command []string, date time.Time, code int) error {\n\tif env == nil {\n\t\treturn nil\n\t}\n\n\thistoryPath := env.LocalPath(HistoryDir)\n\tif utils.IsNotExist(historyPath) {\n\t\terr := utils.MkdirAll(historyPath, 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\th := &historyRow{\n\t\tCommand: strings.Join(command, \" \"),\n\t\tDate: date,\n\t\tCode: code,\n\t}\n\n\treturn h.save(historyPath)\n}", "func (db *Db) SaveTokenPricesHistory(prices []types.TokenPrice) error {\n\tif len(prices) == 0 {\n\t\treturn nil\n\t}\n\n\tquery := `INSERT INTO token_price_history (unit_name, price, market_cap, timestamp) VALUES`\n\tvar param []interface{}\n\n\tfor i, ticker := range prices {\n\t\tvi := i * 4\n\t\tquery += fmt.Sprintf(\"($%d,$%d,$%d,$%d),\", vi+1, vi+2, vi+3, vi+4)\n\t\tparam = append(param, ticker.UnitName, ticker.Price, ticker.MarketCap, ticker.Timestamp)\n\t}\n\n\tquery = query[:len(query)-1] // Remove trailing \",\"\n\tquery += `\nON CONFLICT ON CONSTRAINT unique_price_for_timestamp DO UPDATE \n\tSET price = excluded.price,\n\t market_cap = excluded.market_cap`\n\n\t_, err := db.Sql.Exec(query, param...)\n\treturn err\n}", "func (as AccountStorage) DeleteRewardHistory(ctx sdk.Context, me types.AccountKey, bucketSlot int64) {\n\tstore := ctx.KVStore(as.key)\n\tstore.Delete(getRewardHistoryKey(me, bucketSlot))\n\treturn\n}", "func (s *HeroesServiceChaincode) gethistory(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tif len(args) < 2 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 3\")\n\t}\n\n\tvegKey := args[1]\n\tfmt.Printf(\"##### start History of Record: %s\\n\", vegKey)\n\n\tresultsIterator, err := stub.GetHistoryForKey(vegKey)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tdefer resultsIterator.Close()\n\n\t// buffer is a JSON array containing historic values for the marble\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"[\")\n\n\tbArrayMemberAlreadyWritten := false\n\tfor resultsIterator.HasNext() {\n\t\tresponse, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t\t// Add a comma before array members, suppress it for the first array member\n\t\tif bArrayMemberAlreadyWritten == true {\n\t\t\tbuffer.WriteString(\",\")\n\t\t}\n\t\tbuffer.WriteString(\"{\\\"TxId\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(response.TxId)\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\", \\\"Value\\\":\")\n\t\t// if it was a delete operation on given key, then we need to set the\n\t\t//corresponding value null. Else, we will write the response.Value\n\t\t//as-is (as the Value itself a JSON marble)\n\t\tif response.IsDelete {\n\t\t\tbuffer.WriteString(\"null\")\n\t\t} else {\n\t\t\tbuffer.WriteString(string(response.Value))\n\t\t}\n\n\t\tbuffer.WriteString(\", \\\"Timestamp\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(time.Unix(response.Timestamp.Seconds, int64(response.Timestamp.Nanos)).String())\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\", \\\"IsDelete\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(strconv.FormatBool(response.IsDelete))\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\"}\")\n\t\tbArrayMemberAlreadyWritten = true\n\t}\n\tbuffer.WriteString(\"]\")\n\n\tfmt.Printf(\"- getHistoryForVegetable returning:\\n%s\\n\", buffer.String())\n\n\treturn shim.Success(buffer.Bytes())\n}", "func (m *MemoryRewardStorage) Add(reward rewards.Reward) int {\n\treward.ID = len(m.rewards) + 1\n\tm.rewards = append(m.rewards, reward)\n\n\treturn reward.ID\n}", "func PurchasedRewardsAPIHandler(response http.ResponseWriter, request *http.Request) {\n\tt := time.Now()\n\tlogRequest := t.Format(\"2006/01/02 15:04:05\") + \" | Request:\" + request.Method + \" | Endpoint: purchasedrewards | \" //Connect to database\n\tfmt.Println(logRequest)\n\tdb, e := sql.Open(\"mysql\", dbConnectionURL)\n\tif e != nil {\n\t\tfmt.Print(e)\n\t}\n\n\t//set mime type to JSON\n\tresponse.Header().Set(\"Content-type\", \"application/json\")\n\n\terr := request.ParseForm()\n\tif err != nil {\n\t\thttp.Error(response, fmt.Sprintf(\"error parsing url %v\", err), 500)\n\t}\n\n\t//can't define dynamic slice in golang\n\tvar result = make([]string, 1000)\n\n\tswitch request.Method {\n\tcase GET:\n\t\tGroupId := strings.Replace(request.URL.Path, \"/api/purchasedrewards/\", \"\", -1)\n\n\t\t//fmt.Println(GroupId)\n\t\tst, getErr := db.Prepare(\"select * from PurchasedRewards where GroupId=?\")\n\t\tif err != nil {\n\t\t\tfmt.Print(getErr)\n\t\t}\n\t\trows, getErr := st.Query(GroupId)\n\t\tif getErr != nil {\n\t\t\tfmt.Print(getErr)\n\t\t}\n\t\ti := 0\n\t\tfor rows.Next() {\n\t\t\tvar RequestId int\n\t\t\tvar GroupId int\n\t\t\tvar RewardName string\n\t\t\tvar PointCost int\n\t\t\tvar RewardDescription string\n\t\t\tvar RewardedUser string\n\n\t\t\tgetErr := rows.Scan(&RequestId, &GroupId, &RewardName, &PointCost, &RewardDescription, &RewardedUser)\n\t\t\treward := &PurchasedReward{RequestId: RequestId, GroupId: GroupId, RewardName: RewardName, PointCost: PointCost, RewardDescription: RewardDescription, RewardedUser: RewardedUser}\n\t\t\tb, getErr := json.Marshal(reward)\n\t\t\tif getErr != nil {\n\t\t\t\tfmt.Println(getErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresult[i] = fmt.Sprintf(\"%s\", string(b))\n\t\t\ti++\n\t\t}\n\t\tresult = result[:i]\n\n\tcase POST:\n\n\t\tGroupId := request.PostFormValue(\"GroupId\")\n\t\tRewardName := request.PostFormValue(\"RewardName\")\n\t\tPointCost := request.PostFormValue(\"PointCost\")\n\t\tRewardDescription := request.PostFormValue(\"RewardDescription\")\n\t\tRewardedUser := request.PostFormValue(\"RewardedUser\")\n\n\t\tvar UserBalance int\n\t\tuserBalanceQueryErr := db.QueryRow(\"SELECT TotalPoints FROM `Points` WHERE `EmailAddress`=? AND `GroupId`=?\", RewardedUser, GroupId).Scan(&UserBalance)\n\t\tswitch {\n\t\tcase userBalanceQueryErr == sql.ErrNoRows:\n\t\t\tlog.Printf(logRequest, \"Unable to find user and group: \\n\", RewardedUser, GroupId)\n\t\tcase userBalanceQueryErr != nil:\n\t\t\tlog.Fatal(userBalanceQueryErr)\n\t\tdefault:\n\t\t}\n\t\tcostInt, err := strconv.Atoi(PointCost)\n\t\tif UserBalance > costInt {\n\t\t\t// Update user's points\n\t\t\tUserBalance -= costInt\n\n\t\t\t// Update database row\n\t\t\tstBalanceUpdate, postBalanceUpdateErr := db.Prepare(\"UPDATE Points SET `totalpoints`=?, `emailaddress`=? WHERE `groupid`=?\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Print(err)\n\t\t\t}\n\t\t\tresBalanceUpdate, postBalanceUpdateErr := stBalanceUpdate.Exec(UserBalance, RewardedUser, GroupId)\n\t\t\tif postBalanceUpdateErr != nil {\n\t\t\t\tfmt.Print(postBalanceUpdateErr)\n\t\t\t}\n\t\t\tif resBalanceUpdate != nil {\n\t\t\t\tresult[0] = \"Points Subtracted\"\n\t\t\t}\n\t\t\tresult = result[:1]\n\n\t\t\t// Add purchase to record\n\t\t\tstPurchase, postPurchaseErr := db.Prepare(\"INSERT INTO PurchasedRewards(`requestid`, `groupid`, `rewardname`, `pointcost`, `rewarddescription`, `rewardeduser`) VALUES(NULL,?,?,?,?,?)\")\n\t\t\tif postPurchaseErr != nil {\n\t\t\t\tfmt.Print(postPurchaseErr)\n\t\t\t}\n\t\t\tresPurchase, postPurchaseErr := stPurchase.Exec(GroupId, RewardName, PointCost, RewardDescription, RewardedUser)\n\t\t\tif postPurchaseErr != nil {\n\t\t\t\tfmt.Print(postPurchaseErr)\n\t\t\t}\n\n\t\t\tif resPurchase != nil {\n\t\t\t\tresult[0] = \"Purchase Added\"\n\t\t\t}\n\n\t\t\tresult = result[:1]\n\t\t} else {\n\t\t\tresult[0] = \"Purchase Rejected\"\n\t\t\tresult = result[:1]\n\t\t}\n\n\tcase PUT:\n\t\tRequestId := request.PostFormValue(\"RequestId\")\n\t\tGroupId := request.PostFormValue(\"GroupId\")\n\t\tRewardName := request.PostFormValue(\"RewardName\")\n\t\tPointCost := request.PostFormValue(\"PointCost\")\n\t\tRewardDescription := request.PostFormValue(\"RewardDescription\")\n\t\tRewardedUser := request.PostFormValue(\"RewardedUser\")\n\n\t\tst, putErr := db.Prepare(\"UPDATE PurchasedRewards SET GroupId=?, RewardName=?, PointCost=?, RewardDescription=?, RewardedUser=? WHERE RequestId=?\")\n\t\tif err != nil {\n\t\t\tfmt.Print(putErr)\n\t\t}\n\t\tres, putErr := st.Exec(GroupId, RewardName, PointCost, RewardDescription, RewardedUser, RequestId)\n\t\tif putErr != nil {\n\t\t\tfmt.Print(putErr)\n\t\t}\n\n\t\tif res != nil {\n\t\t\tresult[0] = \"Reward Modified\"\n\t\t}\n\t\tresult = result[:1]\n\n\tcase DELETE:\n\t\tRequestId := strings.Replace(request.URL.Path, \"/api/purchasedrewards/\", \"\", -1)\n\t\tst, deleteErr := db.Prepare(\"DELETE FROM PurchasedRewards where RequestId=?\")\n\t\tif deleteErr != nil {\n\t\t\tfmt.Print(deleteErr)\n\t\t}\n\t\tres, deleteErr := st.Exec(RequestId)\n\t\tif deleteErr != nil {\n\t\t\tfmt.Print(deleteErr)\n\t\t}\n\n\t\tif res != nil {\n\t\t\tresult[0] = \"Reward Deleted\"\n\t\t}\n\t\tresult = result[:1]\n\n\tdefault:\n\t}\n\n\tjson, err := json.Marshal(result)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t// Send the text diagnostics to the client. Clean backslashes from json\n\tfmt.Fprintf(response, \"%v\", CleanJSON(string(json)))\n\t//fmt.Fprintf(response, \" request.URL.Path '%v'\\n\", request.Method)\n\tdb.Close()\n}", "func ApplyRewardTx(tx *types.Transaction, statedb *state.Statedb) (*types.Receipt, error) {\n\tstatedb.CreateAccount(tx.Data.To)\n\tstatedb.AddBalance(tx.Data.To, tx.Data.Amount)\n\n\thash, err := statedb.Hash()\n\tif err != nil {\n\t\treturn nil, errors.NewStackedError(err, \"failed to get statedb root hash\")\n\t}\n\n\treceipt := &types.Receipt{\n\t\tTxHash: tx.Hash,\n\t\tPostState: hash,\n\t}\n\n\treturn receipt, nil\n}", "func ValidateRewardTx(tx *types.Transaction, header *types.BlockHeader) error {\n\tif tx.Data.Type != types.TxTypeReward || !tx.Data.From.IsEmpty() || tx.Data.AccountNonce != 0 || tx.Data.GasPrice.Cmp(common.Big0) != 0 || tx.Data.GasLimit != 0 || len(tx.Data.Payload) != 0 {\n\t\treturn errInvalidReward\n\t}\n\n\t// validate to address\n\tto := tx.Data.To\n\tif to.IsEmpty() {\n\t\treturn errEmptyToAddress\n\t}\n\n\tif !to.Equal(header.Creator) {\n\t\treturn errCoinbaseMismatch\n\t}\n\n\t// validate reward\n\tamount := tx.Data.Amount\n\tif err := validateReward(amount); err != nil {\n\t\treturn err\n\t}\n\n\treward := consensus.GetReward(header.Height)\n\tif reward == nil || reward.Cmp(amount) != 0 {\n\t\treturn fmt.Errorf(\"invalid reward Amount, block height %d, want %s, got %s\", header.Height, reward, amount)\n\t}\n\n\t// validate timestamp\n\tif tx.Data.Timestamp != header.CreateTimestamp.Uint64() {\n\t\treturn errTimestampMismatch\n\t}\n\n\treturn nil\n}", "func (_RandomBeacon *RandomBeaconFilterer) WatchRewardParametersUpdated(opts *bind.WatchOpts, sink chan<- *RandomBeaconRewardParametersUpdated) (event.Subscription, error) {\n\n\tlogs, sub, err := _RandomBeacon.contract.WatchLogs(opts, \"RewardParametersUpdated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(RandomBeaconRewardParametersUpdated)\n\t\t\t\tif err := _RandomBeacon.contract.UnpackLog(event, \"RewardParametersUpdated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func saveAddHistoryTime(w http.ResponseWriter, r *http.Request, t map[string]interface{}) {\n\tf := firego.NewGAE(appengine.NewContext(r), FIRE_URL)\n\tf.Auth(FIRE_AUTH)\n\tif e := f.Update(t); e == nil {\n\t\tif _, e := f.Push(nil); e == nil {\n\t\t\tstatus(w, fmt.Sprintf(\"updated history: %v\", t[\"lastSave\"]), 200)\n\t\t} else {\n\t\t\ts := fmt.Sprintf(\"Firebase push error: %v\", e)\n\t\t\tstatus(w, s, 303)\n\t\t}\n\t} else {\n\t\ts := fmt.Sprintf(\"Firebase update command error: %v\", e)\n\t\tstatus(w, s, 304)\n\t}\n}", "func EstimateReward(reward, pr, gamma float64) float64 {\n\tret := reward / (pr + gamma)\n\tlog.Logf(MABLogLevel, \"MAB Estimate Reward: %v / (%v + %v) = %v\\n\",\n\t\treward, pr, gamma, ret)\n\treturn ret\n}", "func (m *RestaurantMutation) AddHistoryIDs(ids ...int) {\n\tif m.histories == nil {\n\t\tm.histories = make(map[int]struct{})\n\t}\n\tfor i := range ids {\n\t\tm.histories[ids[i]] = struct{}{}\n\t}\n}", "func (_Token *TokenCaller) BaseReward(opts *bind.CallOpts, index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t\tret1 = new(*big.Int)\n\t\tret2 = new(*big.Int)\n\t)\n\tout := &[]interface{}{\n\t\tret0,\n\t\tret1,\n\t\tret2,\n\t}\n\terr := _Token.contract.Call(opts, out, \"baseReward\", index)\n\treturn *ret0, *ret1, *ret2, err\n}", "func (cra clawbackRewardAction) ProcessReward(ctx sdk.Context, reward sdk.Coins, rawAccount exported.VestingAccount) error {\n\tcva, ok := rawAccount.(*ClawbackVestingAccount)\n\tif !ok {\n\t\treturn sdkerrors.Wrapf(sdkerrors.ErrNotSupported, \"expected *ClawbackVestingAccount, got %T\", rawAccount)\n\t}\n\tcva.postReward(ctx, reward, cra.ak, cra.bk, cra.sk)\n\treturn nil\n}", "func (va ClawbackVestingAccount) distributeReward(ctx sdk.Context, ak AccountKeeper, bondDenom string, reward sdk.Coins) {\n\tnow := ctx.BlockTime().Unix()\n\tt := va.StartTime\n\tfirstUnvestedPeriod := 0\n\tunvestedTokens := sdk.ZeroInt()\n\tfor i, period := range va.VestingPeriods {\n\t\tt += period.Length\n\t\tif t <= now {\n\t\t\tfirstUnvestedPeriod = i + 1\n\t\t\tcontinue\n\t\t}\n\t\tunvestedTokens = unvestedTokens.Add(period.Amount.AmountOf(bondDenom))\n\t}\n\n\trunningTotReward := sdk.NewCoins()\n\trunningTotStaking := sdk.ZeroInt()\n\tfor i := firstUnvestedPeriod; i < len(va.VestingPeriods); i++ {\n\t\tperiod := va.VestingPeriods[i]\n\t\trunningTotStaking = runningTotStaking.Add(period.Amount.AmountOf(bondDenom))\n\t\trunningTotRatio := runningTotStaking.ToDec().Quo(unvestedTokens.ToDec())\n\t\ttargetCoins := scaleCoins(reward, runningTotRatio)\n\t\tthisReward := targetCoins.Sub(runningTotReward)\n\t\trunningTotReward = targetCoins\n\t\tperiod.Amount = period.Amount.Add(thisReward...)\n\t\tva.VestingPeriods[i] = period\n\t}\n\n\tva.OriginalVesting = va.OriginalVesting.Add(reward...)\n\tak.SetAccount(ctx, &va)\n}", "func (_Token *TokenSession) BaseRewardHistoryLength() (*big.Int, error) {\n\treturn _Token.Contract.BaseRewardHistoryLength(&_Token.CallOpts)\n}", "func (brutx *BotRecordUpdateTransaction) RevertBotRecordUpdate(record *BotRecord) error {\n\t// update the record expiration time in the most simple way possible,\n\t// should there have been a time jump, the caller might have to correct expiration time\n\trecord.Expiration -= BotMonth * CompactTimestamp(brutx.NrOfMonths)\n\n\t// remove all addresses that were added\n\terr := record.RemoveNetworkAddresses(brutx.Addresses.Add...) // passing a nil slice is valid\n\tif err != nil {\n\t\treturn err\n\t}\n\t// add all adderesses that were removed\n\terr = record.AddNetworkAddresses(brutx.Addresses.Remove...) // passing a nil slice is valid\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// remove all names that were added\n\terr = record.RemoveNames(brutx.Names.Add...) // passing a nil slice is valid\n\tif err != nil {\n\t\treturn err\n\t}\n\t// add all names that were removed\n\terr = record.AddNames(brutx.Names.Remove...) // passing a nil slice is valid\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// all good\n\treturn nil\n}", "func (h *changepointHeuristic) addToHistory(positionCounts counts) {\n\tif positionCounts.Runs > 0 {\n\t\th.addedUnexpectedRuns = positionCounts.HasUnexpected > 0\n\t\th.addedExpectedRuns = (positionCounts.Runs - positionCounts.HasUnexpected) > 0\n\t}\n\tif positionCounts.Retried > 0 {\n\t\th.addedUnexpectedAfterRetry = positionCounts.UnexpectedAfterRetry > 0\n\t\th.addedExpectedAfterRetry = (positionCounts.Retried - positionCounts.UnexpectedAfterRetry) > 0\n\t}\n}", "func (c RewardsController) CollectReward(id string) revel.Result {\n\tif !c.GetCurrentUser() {\n\t\treturn c.ForbiddenResponse()\n\t}\n\n\tif !bson.IsObjectIdHex(id) {\n\t\treturn c.ErrorResponse(nil, c.Message(\"error.invalid\", \"\"), core.ModelStatus[core.StatusInvalidID])\n\t}\n\n\tvar selector = []bson.M{\n\t\tbson.M{\"user_id\": c.CurrentUser.GetID().Hex()},\n\t\tbson.M{\"_id\": id},\n\t\tbson.M{\"multi\": false},\n\t}\n\tvar query = bson.M{\"$set\": []bson.M{\n\t\tbson.M{\"status.name\": core.StatusObtained},\n\t\tbson.M{\"status.code\": core.ValidationStatus[core.StatusObtained]},\n\t}}\n\n\t// Get pending Rewards for the user\n\tif Reward, ok := app.Mapper.GetModel(&models.Reward{}); ok {\n\t\tif err := Reward.UpdateQuery(selector, query, false); err != nil {\n\t\t\trevel.ERROR.Print(\"ERROR Find\")\n\t\t\treturn c.ErrorResponse(err, err.Error(), 400)\n\t\t}\n\t\treturn c.SuccessResponse(bson.M{\"data\": \"Reward collected successfully\"}, \"success\", core.ModelsType[core.ModelSimpleResponse], nil)\n\t}\n\n\treturn c.ServerErrorResponse()\n}", "func computeReward(epoch abi.ChainEpoch, prevTheta, currTheta, simpleTotal, baselineTotal big.Int) abi.TokenAmount {\n\tsimpleReward := big.Mul(simpleTotal, ExpLamSubOne) //Q.0 * Q.128 => Q.128\n\tepochLam := big.Mul(big.NewInt(int64(epoch)), Lambda) // Q.0 * Q.128 => Q.128\n\n\tsimpleReward = big.Mul(simpleReward, big.NewFromGo(math.ExpNeg(epochLam.Int))) // Q.128 * Q.128 => Q.256\n\tsimpleReward = big.Rsh(simpleReward, math.Precision128) // Q.256 >> 128 => Q.128\n\n\tbaselineReward := big.Sub(computeBaselineSupply(currTheta, baselineTotal), computeBaselineSupply(prevTheta, baselineTotal)) // Q.128\n\n\treward := big.Add(simpleReward, baselineReward) // Q.128\n\n\treturn big.Rsh(reward, math.Precision128) // Q.128 => Q.0\n}", "func (httpServer *HttpServer) handleListRewardAmount(params interface{}, closeChan <-chan struct{}) (interface{}, *rpcservice.RPCError) {\n\tresult := httpServer.databaseService.ListRewardAmount()\n\treturn result, nil\n}", "func InsertHistory(history *models.History , o orm.Ormer) (bool){\n\tnum , err:= o.Insert(history)\n\tif (err ==nil){\n\t\tfmt.Println(\"mysql row affected nums: \" , num)\n\t\treturn true\n\t}\n\treturn false;\n}", "func (s *BlocksService) Reward(ctx context.Context) (*BlocksReward, *http.Response, error) {\n\tvar responseStruct *BlocksReward\n\tresp, err := s.client.SendRequest(ctx, \"GET\", \"blocks/getReward\", nil, &responseStruct)\n\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn responseStruct, resp, err\n}", "func (_Token *TokenCallerSession) BaseRewardHistoryLength() (*big.Int, error) {\n\treturn _Token.Contract.BaseRewardHistoryLength(&_Token.CallOpts)\n}", "func (as AccountStorage) GetRewardHistory(\n\tctx sdk.Context, me types.AccountKey, bucketSlot int64) (*RewardHistory, sdk.Error) {\n\tstore := ctx.KVStore(as.key)\n\trewardHistoryBytes := store.Get(getRewardHistoryKey(me, bucketSlot))\n\tif rewardHistoryBytes == nil {\n\t\treturn nil, nil\n\t}\n\thistory := new(RewardHistory)\n\tif err := as.cdc.UnmarshalJSON(rewardHistoryBytes, history); err != nil {\n\t\treturn nil, ErrFailedToUnmarshalRewardHistory(err)\n\t}\n\treturn history, nil\n}", "func updateStoriesRank() error {\n\tsql := \"update stories set rank = 100 * points / POWER((select max(id) from stories) - id + 1,1.2)\"\n\t_, err := query.Exec(sql)\n\treturn err\n}", "func GetRewardEventsInfo(fromBlock *big.Int, toBlock *big.Int) []*RewardInfo {\n\n\tvar logEpochRewardSig = []byte(\"EpochRewardsDistributedToVoters(address,uint256)\")\n\tvar logEpochRewardSigHash = crypto.Keccak256Hash(logEpochRewardSig)\n\tvar TopicsFilter = [][]common.Hash{{logEpochRewardSigHash}}\n\n\tcontractAddress := common.HexToAddress(WrapperContractDeploymentAddress[NetActive][Election])\n\n\tquery := ethereum.FilterQuery{\n\t\tFromBlock: fromBlock,\n\t\tToBlock: toBlock,\n\t\tTopics: TopicsFilter,\n\n\t\tAddresses: []common.Address{\n\t\t\tcontractAddress,\n\t\t},\n\t}\n\n\tlogs, err := atlasEthClient.FilterLogs(context.Background(), query)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trewards_info := make([]*RewardInfo, 0, len(logs))\n\n\tcontractAbi, err := abi.JSON(strings.NewReader(string(binding.ElectionABI)))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, vLog := range logs {\n\n\t\tvar epochRewardEvent EpochRewardEvent\n\t\terr := contractAbi.Unpack(&epochRewardEvent, \"EpochRewardsDistributedToVoters\", vLog.Data)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tri := &RewardInfo{Group: common.HexToAddress(vLog.Topics[1].Hex()).String(),\n\t\t\tGroupHash: vLog.Topics[1],\n\t\t\tRewardValue: epochRewardEvent.Value,\n\t\t\tBlockNumber: new(big.Int).SetUint64(vLog.BlockNumber)}\n\n\t\tAddAtlasToRewardInfo(ri)\n\n\t\trewards_info = append(rewards_info, ri)\n\t}\n\n\treturn rewards_info\n}", "func (ht *historyTable) add(m Move, delta int32) {\n\th := historyHash(m)\n\tif ht[h].move != m {\n\t\tht[h] = historyEntry{stat: delta, move: m}\n\t} else {\n\t\tht[h].stat += delta\n\t}\n}", "func (_RewardsDistributionRecipient *RewardsDistributionRecipientSession) NotifyRewardAmount(reward *big.Int) (*types.Transaction, error) {\n\treturn _RewardsDistributionRecipient.Contract.NotifyRewardAmount(&_RewardsDistributionRecipient.TransactOpts, reward)\n}", "func (x *XmpMM) AppendVersionHistory(action ActionType, modifier, changed string, date xmp.Date) {\n\t// append change to last version\n\tv := x.GetLastVersion()\n\n\t// make new version if none exists or if list does not contain\n\t// entry for the current version\n\tif v == nil || v.Version != x.VersionID {\n\t\tv = &StVersion{\n\t\t\tEvent: ResourceEvent{\n\t\t\t\tAction: action,\n\t\t\t\tChanged: xmpdm.NewPartList(changed),\n\t\t\t\tInstanceID: x.InstanceID,\n\t\t\t\tSoftwareAgent: xmp.Agent,\n\t\t\t\tWhen: date,\n\t\t\t},\n\t\t\tModifier: modifier,\n\t\t\tModifyDate: date,\n\t\t\tVersion: x.VersionID,\n\t\t}\n\t\tx.AddVersion(v)\n\t\treturn\n\t}\n\tv.Event.Changed.Add(changed)\n}", "func (pu *PatientrecordUpdate) AddHistorytaking(h ...*Historytaking) *PatientrecordUpdate {\n\tids := make([]int, len(h))\n\tfor i := range h {\n\t\tids[i] = h[i].ID\n\t}\n\treturn pu.AddHistorytakingIDs(ids...)\n}", "func (q querier) RewardWeight(c context.Context, req *types.QueryRewardWeightRequest) (*types.QueryRewardWeightResponse, error) {\n\tctx := sdk.UnwrapSDKContext(c)\n\treturn &types.QueryRewardWeightResponse{RewardWeight: q.GetRewardWeight(ctx)}, nil\n}", "func (mw *ShardedRDB) SaveRaftState(updates []pb.Update,\n\tctx raftio.IContext) error {\n\tif len(updates) == 0 {\n\t\treturn nil\n\t}\n\tpid := mw.getParititionID(updates)\n\treturn mw.shards[pid].saveRaftState(updates, ctx)\n}", "func (_XStaking *XStakingSession) Rewards(arg0 common.Address) (*big.Int, error) {\n\treturn _XStaking.Contract.Rewards(&_XStaking.CallOpts, arg0)\n}", "func (r *Raft) AppendToLog_Follower(request AppendEntriesReq) {\n\tterm := request.term\n\tcmd := request.entries\n\tindex := request.prevLogIndex + 1\n\tlogVal := LogVal{term, cmd, 0} //make object for log's value field\n\n\tif len(r.myLog) == index {\n\t\tr.myLog = append(r.myLog, logVal) //when trying to add a new entry\n\t} else {\n\t\tr.myLog[index] = logVal //overwriting in case of log repair\n\t\t//fmt.Println(\"Overwiriting!!\")\n\t}\n\t//fmt.Println(r.myId(), \"Append to log\", string(cmd))\n\t//modify metadata after appending\n\t//r.myMetaData.lastLogIndex = r.myMetaData.lastLogIndex + 1\n\t//r.myMetaData.prevLogIndex = r.myMetaData.lastLogIndex\n\t//\tif len(r.myLog) == 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1\n\t//\t} else if len(r.myLog) > 1 {\n\t//\t\tr.myMetaData.prevLogTerm = r.myLog[r.myMetaData.prevLogIndex].Term\n\t//\t}\n\n\t//Changed on 4th april, above is wrong in case of overwriting of log\n\tr.myMetaData.lastLogIndex = index\n\tr.myMetaData.prevLogIndex = index - 1\n\tif index == 0 {\n\t\tr.myMetaData.prevLogTerm = r.myMetaData.prevLogTerm + 1 //or simple -1\n\t} else if index >= 1 {\n\t\tr.myMetaData.prevLogTerm = r.myLog[index-1].Term\n\t}\n\n\t//Update commit index\n\tleaderCI := float64(request.leaderCommitIndex)\n\tmyLI := float64(r.myMetaData.lastLogIndex)\n\tif request.leaderCommitIndex > r.myMetaData.commitIndex {\n\t\tif myLI == -1 { //REDUNDANT since Append to log will make sure it is never -1,also must not copy higher CI if self LI is -1\n\t\t\tr.myMetaData.commitIndex = int(leaderCI)\n\t\t} else {\n\t\t\tr.myMetaData.commitIndex = int(math.Min(leaderCI, myLI))\n\t\t}\n\t}\n\t//fmt.Println(r.myId(), \"My CI is:\", r.myMetaData.commitIndex)\n\tr.WriteLogToDisk()\n}", "func (_XStaking *XStakingCaller) Rewards(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _XStaking.contract.Call(opts, &out, \"rewards\", arg0)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_Lmc *LmcSession) GetUserAccumulatedReward(_userAddress common.Address, tokenIndex *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserAccumulatedReward(&_Lmc.CallOpts, _userAddress, tokenIndex)\n}", "func (k Keeper) ClaimSwapReward(ctx sdk.Context, owner, receiver sdk.AccAddress, denom string, multiplierName string) error {\n\tmultiplier, found := k.GetMultiplierByDenom(ctx, denom, multiplierName)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrInvalidMultiplier, \"denom '%s' has no multiplier '%s'\", denom, multiplierName)\n\t}\n\n\tclaimEnd := k.GetClaimEnd(ctx)\n\n\tif ctx.BlockTime().After(claimEnd) {\n\t\treturn errorsmod.Wrapf(types.ErrClaimExpired, \"block time %s > claim end time %s\", ctx.BlockTime(), claimEnd)\n\t}\n\n\tsyncedClaim, found := k.GetSynchronizedSwapClaim(ctx, owner)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrClaimNotFound, \"address: %s\", owner)\n\t}\n\n\tamt := syncedClaim.Reward.AmountOf(denom)\n\n\tclaimingCoins := sdk.NewCoins(sdk.NewCoin(denom, amt))\n\trewardCoins := sdk.NewCoins(sdk.NewCoin(denom, sdk.NewDecFromInt(amt).Mul(multiplier.Factor).RoundInt()))\n\tif rewardCoins.IsZero() {\n\t\treturn types.ErrZeroClaim\n\t}\n\tlength := k.GetPeriodLength(ctx.BlockTime(), multiplier.MonthsLockup)\n\n\terr := k.SendTimeLockedCoinsToAccount(ctx, types.IncentiveMacc, receiver, rewardCoins, length)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// remove claimed coins (NOT reward coins)\n\tsyncedClaim.Reward = syncedClaim.Reward.Sub(claimingCoins...)\n\tk.SetSwapClaim(ctx, syncedClaim)\n\n\tctx.EventManager().EmitEvent(\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventTypeClaim,\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimedBy, owner.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimAmount, claimingCoins.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimType, syncedClaim.GetType()),\n\t\t),\n\t)\n\treturn nil\n}", "func (t *truth) updateRedoRule(n *node) error {\n\tqid := n.event.QueryID\n\tbyQuery := t.byQuery[n.event.ParentID]\n\tbyQuery[len(byQuery)-1].AddEdge(n)\n\tt.byTime = n\n\tt.byQuery[qid] = append(t.byQuery[qid], n)\n\treturn nil\n}", "func (va ClawbackVestingAccount) postReward(ctx sdk.Context, reward sdk.Coins, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) {\n\t// Find the scheduled amount of vested and unvested staking tokens\n\tbondDenom := sk.BondDenom(ctx)\n\tvested := ReadSchedule(va.StartTime, va.EndTime, va.VestingPeriods, va.OriginalVesting, ctx.BlockTime().Unix()).AmountOf(bondDenom)\n\tunvested := va.OriginalVesting.AmountOf(bondDenom).Sub(vested)\n\n\tif unvested.IsZero() {\n\t\t// no need to adjust the vesting schedule\n\t\treturn\n\t}\n\n\tif vested.IsZero() {\n\t\t// all staked tokens must be unvested\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\n\t// Find current split of account balance on staking axis\n\tbonded := sk.GetDelegatorBonded(ctx, va.GetAddress())\n\tunbonding := sk.GetDelegatorUnbonding(ctx, va.GetAddress())\n\tdelegated := bonded.Add(unbonding)\n\n\t// discover what has been slashed and remove from delegated amount\n\toldDelegated := va.DelegatedVesting.AmountOf(bondDenom).Add(va.DelegatedFree.AmountOf(bondDenom))\n\tslashed := oldDelegated.Sub(intMin(oldDelegated, delegated))\n\tdelegated = delegated.Sub(intMin(delegated, slashed))\n\n\t// Prefer delegated tokens to be unvested\n\tunvested = intMin(unvested, delegated)\n\tvested = delegated.Sub(unvested)\n\n\t// Compute the unvested amount of reward and add to vesting schedule\n\tif unvested.IsZero() {\n\t\treturn\n\t}\n\tif vested.IsZero() {\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\tunvestedRatio := unvested.ToDec().QuoTruncate(bonded.ToDec()) // round down\n\tunvestedReward := scaleCoins(reward, unvestedRatio)\n\tva.distributeReward(ctx, ak, bondDenom, unvestedReward)\n}", "func (e *TarantoolEngine) addHistory(chID ChannelID, message Message, size, lifetime int64) (err error) {\n\t// not implemented\n\treturn\n}", "func (d *Dao) HasReward(c context.Context, uid int64) (r int64, err error) {\n\trst, err := d.GetHasReward(c, uid)\n\tif err != nil {\n\t\tif err == memcache.ErrNotFound {\n\t\t\treward, err2 := d.findByUid(uid, true)\n\t\t\tif err2 != nil {\n\t\t\t\treturn rst, err2\n\t\t\t}\n\t\t\tif reward != nil {\n\t\t\t\trst = int64(1)\n\t\t\t\td.SetHasReward(c, uid, rst)\n\t\t\t} else {\n\t\t\t\trst = int64(0)\n\t\t\t\td.SetHasReward(c, uid, rst)\n\t\t\t}\n\t\t\treturn rst, err\n\t\t}\n\t\tlog.Error(\"HasReward(%v) error(%v)\", uid, err)\n\t\treturn rst, err\n\t}\n\treturn rst, err\n}", "func (o *Post) AddPostHistories(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*PostHistory) error {\n\tvar err error\n\tfor _, rel := range related {\n\t\tif insert {\n\t\t\trel.PostID = o.ID\n\t\t\tif err = rel.Insert(ctx, exec, boil.Infer()); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"failed to insert into foreign table\")\n\t\t\t}\n\t\t} else {\n\t\t\tupdateQuery := fmt.Sprintf(\n\t\t\t\t\"UPDATE \\\"post_histories\\\" SET %s WHERE %s\",\n\t\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, []string{\"post_id\"}),\n\t\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", 2, postHistoryPrimaryKeyColumns),\n\t\t\t)\n\t\t\tvalues := []interface{}{o.ID, rel.ID}\n\n\t\t\tif boil.DebugMode {\n\t\t\t\tfmt.Fprintln(boil.DebugWriter, updateQuery)\n\t\t\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t\t\t}\n\n\t\t\tif _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"failed to update foreign table\")\n\t\t\t}\n\n\t\t\trel.PostID = o.ID\n\t\t}\n\t}\n\n\tif o.R == nil {\n\t\to.R = &postR{\n\t\t\tPostHistories: related,\n\t\t}\n\t} else {\n\t\to.R.PostHistories = append(o.R.PostHistories, related...)\n\t}\n\n\tfor _, rel := range related {\n\t\tif rel.R == nil {\n\t\t\trel.R = &postHistoryR{\n\t\t\t\tPost: o,\n\t\t\t}\n\t\t} else {\n\t\t\trel.R.Post = o\n\t\t}\n\t}\n\treturn nil\n}", "func (bbo *TabularBBO) LastUpdate(s []float64, a int, r float64, rng *mathlib.Random) {\n\t// If ready to update, update and wipe the states, actions, and rewards.\n\tif bbo.ep.LastUpdate(s, a, r) {\n\t\tbbo.episodeLimitReached(rng)\n\t}\n}", "func (brutx *BotRecordUpdateTransaction) UpdateBotRecord(blockTime types.Timestamp, record *BotRecord) error {\n\tvar err error\n\n\t// if the record indicate the bot is expired, we ensure to reset the names,\n\t// and also make sure the NrOfMonths is greater than 0\n\tif record.IsExpired(blockTime) {\n\t\tif brutx.NrOfMonths == 0 {\n\t\t\treturn errors.New(\"record update Tx does not make bot active, while bot is already expired\")\n\t\t}\n\t\trecord.ResetNames()\n\t}\n\n\t// update the expiration time\n\tif brutx.NrOfMonths != 0 {\n\t\terr = record.ExtendExpirationDate(blockTime, brutx.NrOfMonths)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// remove all addresses first, afterwards add the new addresses.\n\t// By removing first we ensure that we can add addresses that were removed by this Tx,\n\t// but more importantly it ensures that we don't invalidly report that an overflow has happened.\n\terr = record.RemoveNetworkAddresses(brutx.Addresses.Remove...) // passing a nil slice is valid\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = record.AddNetworkAddresses(brutx.Addresses.Add...) // passing a nil slice is valid\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// remove all names first, afterwards add the new names.\n\t// By removing first we ensure that we can add names that were removed by this Tx,\n\t// but more importantly it ensures that we don't invalidly report that an overflow has happened.\n\terr = record.RemoveNames(brutx.Names.Remove...) // passing a nil slice is valid\n\tif err != nil {\n\t\t// an error will also occur here, in case names are removed from a bot that was previously inactive,\n\t\t// as our earlier logic has already reset the names of the revord, making this step implicitly invalid,\n\t\t// which is what we want, as an inative revord no longer owns any names, no matter what was last known about the record.\n\t\treturn err\n\t}\n\terr = record.AddNames(brutx.Names.Add...) // passing a nil slice is valid\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// all good\n\treturn nil\n}", "func MeanReward(r []*Rollout) float64 {\n\tvar sum float64\n\tfor _, x := range r {\n\t\tsum += x.Reward\n\t}\n\treturn sum / float64(len(r))\n}", "func (va *ClawbackVestingAccount) PostReward(ctx sdk.Context, reward sdk.Coins, action exported.RewardAction) error {\n\treturn action.ProcessReward(ctx, reward, va)\n}", "func (s *Server) History(w http.ResponseWriter, r *http.Request) {\n\tteam, err := s.currentTeam(w, r)\n\tif err != nil {\n\t\ts.unauthorized(w, err)\n\t\treturn\n\t}\n\n\tscores, err := models.NewScore().History(team.ID)\n\tif err != nil {\n\t\tError(w, http.StatusNotFound, err, \"failed to get history\")\n\t}\n\tJSON(w, http.StatusOK, scores)\n}", "func (_RewardsDistributionRecipient *RewardsDistributionRecipientTransactorSession) NotifyRewardAmount(reward *big.Int) (*types.Transaction, error) {\n\treturn _RewardsDistributionRecipient.Contract.NotifyRewardAmount(&_RewardsDistributionRecipient.TransactOpts, reward)\n}", "func (_XStaking *XStakingFilterer) WatchRewardAdded(opts *bind.WatchOpts, sink chan<- *XStakingRewardAdded) (event.Subscription, error) {\n\n\tlogs, sub, err := _XStaking.contract.WatchLogs(opts, \"RewardAdded\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(XStakingRewardAdded)\n\t\t\t\tif err := _XStaking.contract.UnpackLog(event, \"RewardAdded\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (path *Path) Rewards() map[*Reward]int {\n\treturn path.rewards\n}", "func (w *Wallet) applyHistory(cc modules.ConsensusChange) {\n\tfor _, block := range cc.AppliedBlocks {\n\t\tw.consensusSetHeight++\n\t\t// Apply the miner payout transaction if applicable.\n\t\tminerPT := modules.ProcessedTransaction{\n\t\t\tTransaction: types.Transaction{},\n\t\t\tTransactionID: types.TransactionID(block.ID()),\n\t\t\tConfirmationHeight: w.consensusSetHeight,\n\t\t\tConfirmationTimestamp: block.Timestamp,\n\t\t}\n\t\trelevant := false\n\t\tfor i, mp := range block.MinerPayouts {\n\t\t\t_, exists := w.keys[mp.UnlockHash]\n\t\t\tif exists {\n\t\t\t\trelevant = true\n\t\t\t}\n\t\t\tminerPT.Outputs = append(minerPT.Outputs, modules.ProcessedOutput{\n\t\t\t\tFundType: types.SpecifierMinerPayout,\n\t\t\t\tMaturityHeight: w.consensusSetHeight + w.chainCts.MaturityDelay,\n\t\t\t\tWalletAddress: exists,\n\t\t\t\tRelatedAddress: mp.UnlockHash,\n\t\t\t\tValue: mp.Value,\n\t\t\t})\n\t\t\tw.historicOutputs[types.OutputID(block.MinerPayoutID(uint64(i)))] = historicOutput{\n\t\t\t\tUnlockHash: mp.UnlockHash,\n\t\t\t\tValue: mp.Value,\n\t\t\t}\n\t\t}\n\t\tif relevant {\n\t\t\tw.processedTransactions = append(w.processedTransactions, minerPT)\n\t\t\tw.processedTransactionMap[minerPT.TransactionID] = &w.processedTransactions[len(w.processedTransactions)-1]\n\t\t}\n\n\t\tblockheight, blockexists := w.cs.BlockHeightOfBlock(block)\n\t\tif !blockexists {\n\t\t\tbuild.Critical(\"Block wherer ubs is used to respent, does not yet exist as processedblock\")\n\t\t}\n\n\t\tfor ti, txn := range block.Transactions {\n\t\t\trelevant := false\n\t\t\tpt := modules.ProcessedTransaction{\n\t\t\t\tTransaction: txn,\n\t\t\t\tTransactionID: txn.ID(),\n\t\t\t\tConfirmationHeight: w.consensusSetHeight,\n\t\t\t\tConfirmationTimestamp: block.Timestamp,\n\t\t\t}\n\t\t\tfor _, sci := range txn.CoinInputs {\n\t\t\t\toutput := w.historicOutputs[types.OutputID(sci.ParentID)]\n\t\t\t\t_, exists := w.keys[output.UnlockHash]\n\t\t\t\tif exists {\n\t\t\t\t\trelevant = true\n\t\t\t\t} else if _, exists = w.multiSigCoinOutputs[sci.ParentID]; exists {\n\t\t\t\t\t// Since we know about every multisig output that is still open and releated,\n\t\t\t\t\t// any relevant multisig input must have a parent ID present in the multisig\n\t\t\t\t\t// output map.\n\t\t\t\t\trelevant = true\n\t\t\t\t\t// set \"exists\" to false since the output is not owned by the wallet.\n\t\t\t\t\texists = false\n\t\t\t\t}\n\t\t\t\tpt.Inputs = append(pt.Inputs, modules.ProcessedInput{\n\t\t\t\t\tFundType: types.SpecifierCoinInput,\n\t\t\t\t\tWalletAddress: exists,\n\t\t\t\t\tRelatedAddress: output.UnlockHash,\n\t\t\t\t\tValue: output.Value,\n\t\t\t\t})\n\t\t\t}\n\t\t\tfor i, sco := range txn.CoinOutputs {\n\t\t\t\t_, exists := w.keys[sco.Condition.UnlockHash()]\n\t\t\t\tif exists {\n\t\t\t\t\trelevant = true\n\t\t\t\t} else if _, exists = w.multiSigCoinOutputs[txn.CoinOutputID(uint64(i))]; exists {\n\t\t\t\t\t// If the coin output is a relevant multisig output, it's ID will already\n\t\t\t\t\t// be present in the multisigCoinOutputs map\n\t\t\t\t\trelevant = true\n\t\t\t\t\t// set \"exists\" to false since the output is not owned by the wallet.\n\t\t\t\t\texists = false\n\t\t\t\t}\n\t\t\t\tuh := sco.Condition.UnlockHash()\n\t\t\t\tpt.Outputs = append(pt.Outputs, modules.ProcessedOutput{\n\t\t\t\t\tFundType: types.SpecifierCoinOutput,\n\t\t\t\t\tMaturityHeight: w.consensusSetHeight,\n\t\t\t\t\tWalletAddress: exists,\n\t\t\t\t\tRelatedAddress: uh,\n\t\t\t\t\tValue: sco.Value,\n\t\t\t\t})\n\t\t\t\tw.historicOutputs[types.OutputID(txn.CoinOutputID(uint64(i)))] = historicOutput{\n\t\t\t\t\tUnlockHash: uh,\n\t\t\t\t\tValue: sco.Value,\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, sfi := range txn.BlockStakeInputs {\n\t\t\t\toutput := w.historicOutputs[types.OutputID(sfi.ParentID)]\n\t\t\t\t_, exists := w.keys[output.UnlockHash]\n\t\t\t\tif exists {\n\t\t\t\t\trelevant = true\n\t\t\t\t} else if _, exists = w.multiSigBlockStakeOutputs[sfi.ParentID]; exists {\n\t\t\t\t\t// Since we know about every multisig output that is still open and releated,\n\t\t\t\t\t// any relevant multisig input must have a parent ID present in the multisig\n\t\t\t\t\t// output map.\n\t\t\t\t\trelevant = true\n\t\t\t\t\t// set \"exists\" to false since the output is not owned by the wallet.\n\t\t\t\t\texists = false\n\t\t\t\t}\n\t\t\t\tpt.Inputs = append(pt.Inputs, modules.ProcessedInput{\n\t\t\t\t\tFundType: types.SpecifierBlockStakeInput,\n\t\t\t\t\tWalletAddress: exists,\n\t\t\t\t\tRelatedAddress: output.UnlockHash,\n\t\t\t\t\tValue: output.Value,\n\t\t\t\t})\n\t\t\t}\n\t\t\tfor i, sfo := range txn.BlockStakeOutputs {\n\t\t\t\t_, exists := w.keys[sfo.Condition.UnlockHash()]\n\t\t\t\tif exists {\n\t\t\t\t\trelevant = true\n\t\t\t\t} else if _, exists = w.multiSigBlockStakeOutputs[txn.BlockStakeOutputID(uint64(i))]; exists {\n\t\t\t\t\t// If the block stake output is a relevant multisig output, it's ID will already\n\t\t\t\t\t// be present in the multisigBlockStakeOutputs map\n\t\t\t\t\trelevant = true\n\t\t\t\t\t// set \"exists\" to false since the output is not owned by the wallet.\n\t\t\t\t\texists = false\n\t\t\t\t}\n\t\t\t\tuh := sfo.Condition.UnlockHash()\n\t\t\t\tpt.Outputs = append(pt.Outputs, modules.ProcessedOutput{\n\t\t\t\t\tFundType: types.SpecifierBlockStakeOutput,\n\t\t\t\t\tMaturityHeight: w.consensusSetHeight,\n\t\t\t\t\tWalletAddress: exists,\n\t\t\t\t\tRelatedAddress: uh,\n\t\t\t\t\tValue: sfo.Value,\n\t\t\t\t})\n\t\t\t\tbsoid := txn.BlockStakeOutputID(uint64(i))\n\t\t\t\t_, exists = w.blockstakeOutputs[bsoid]\n\t\t\t\tif exists {\n\t\t\t\t\tw.unspentblockstakeoutputs[bsoid] = types.UnspentBlockStakeOutput{\n\t\t\t\t\t\tBlockStakeOutputID: bsoid,\n\t\t\t\t\t\tIndexes: types.BlockStakeOutputIndexes{\n\t\t\t\t\t\t\tBlockHeight: blockheight,\n\t\t\t\t\t\t\tTransactionIndex: uint64(ti),\n\t\t\t\t\t\t\tOutputIndex: uint64(i),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValue: sfo.Value,\n\t\t\t\t\t\tCondition: sfo.Condition,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tw.historicOutputs[types.OutputID(bsoid)] = historicOutput{\n\t\t\t\t\tUnlockHash: uh,\n\t\t\t\t\tValue: sfo.Value,\n\t\t\t\t}\n\t\t\t}\n\t\t\tif relevant {\n\t\t\t\tw.processedTransactions = append(w.processedTransactions, pt)\n\t\t\t\tw.processedTransactionMap[pt.TransactionID] = &w.processedTransactions[len(w.processedTransactions)-1]\n\t\t\t}\n\t\t}\n\t}\n\t// Reset spent outputs map\n\tw.spentOutputs = make(map[types.OutputID]types.BlockHeight)\n}", "func (s *SimpleChaincode) pat_gethistory(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tif len(args) < 2 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 3\")\n\t}\n\n\tpatKey := args[1]\n\tfmt.Printf(\"##### start History of Record: %s\\n\", patKey)\n\n\tresultsIterator, err := stub.GetHistoryForKey(patKey)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tdefer resultsIterator.Close()\n\n\t// buffer is a JSON array containing historic values for the marble\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"[\")\n\n\tbArrayMemberAlreadyWritten := false\n\tfor resultsIterator.HasNext() {\n\t\tresponse, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\t\t// Add a comma before array members, suppress it for the first array member\n\t\tif bArrayMemberAlreadyWritten == true {\n\t\t\tbuffer.WriteString(\",\")\n\t\t}\n\t\tbuffer.WriteString(\"{\\\"TxId\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(response.TxId)\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\", \\\"Value\\\":\")\n\t\t// if it was a delete operation on given key, then we need to set the\n\t\t//corresponding value null. Else, we will write the response.Value\n\t\t//as-is (as the Value itself a JSON marble)\n\t\tif response.IsDelete {\n\t\t\tbuffer.WriteString(\"null\")\n\t\t} else {\n\t\t\tbuffer.WriteString(string(response.Value))\n\t\t}\n\n\t\tbuffer.WriteString(\", \\\"Timestamp\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(time.Unix(response.Timestamp.Seconds, int64(response.Timestamp.Nanos)).String())\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\", \\\"IsDelete\\\":\")\n\t\tbuffer.WriteString(\"\\\"\")\n\t\tbuffer.WriteString(strconv.FormatBool(response.IsDelete))\n\t\tbuffer.WriteString(\"\\\"\")\n\n\t\tbuffer.WriteString(\"}\")\n\t\tbArrayMemberAlreadyWritten = true\n\t}\n\tbuffer.WriteString(\"]\")\n\n\tfmt.Printf(\"- getHistoryForPatient returning:\\n%s\\n\", buffer.String())\n\n\treturn shim.Success(buffer.Bytes())\n}", "func getAccumulatedRewards(ctx sdk.Context, distKeeper types.DistributionKeeper, delegation stakingtypes.Delegation) ([]wasmvmtypes.Coin, error) {\n\t// Try to get *delegator* reward info!\n\tparams := distributiontypes.QueryDelegationRewardsRequest{\n\t\tDelegatorAddress: delegation.DelegatorAddress,\n\t\tValidatorAddress: delegation.ValidatorAddress,\n\t}\n\tcache, _ := ctx.CacheContext()\n\tqres, err := distKeeper.DelegationRewards(sdk.WrapSDKContext(cache), &params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// now we have it, convert it into wasmvm types\n\trewards := make([]wasmvmtypes.Coin, len(qres.Rewards))\n\tfor i, r := range qres.Rewards {\n\t\trewards[i] = wasmvmtypes.Coin{\n\t\t\tDenom: r.Denom,\n\t\t\tAmount: r.Amount.TruncateInt().String(),\n\t\t}\n\t}\n\treturn rewards, nil\n}", "func (routingHandler *RoutingHandler) updateRoutingTable(whisperStatus *gossiper.WhisperStatus, address *net.UDPAddr) {\n\n\troutingHandler.mutex.Lock()\n\tdefer routingHandler.mutex.Unlock()\n\n\t// if new packet with higher id, updateEnvelopes table\n\tif routingHandler.updateLastOriginID(whisperStatus.Origin, whisperStatus.ID) {\n\n\t\tstatus, loaded := routingHandler.peerStatus[address.String()]\n\t\tif !loaded {\n\t\t\troutingHandler.peerStatus[address.String()] = &Status{}\n\t\t\tstatus, _ = routingHandler.peerStatus[address.String()]\n\t\t}\n\n\t\tif whisperStatus.Code == bloomFilterExCode || whisperStatus.Code == statusCode {\n\t\t\tif whisperStatus.Bloom != nil && len(whisperStatus.Bloom) == BloomFilterSize {\n\t\t\t\tif loaded {\n\t\t\t\t\tstatus.Bloom = AggregateBloom(whisperStatus.Bloom, status.Bloom)\n\t\t\t\t} else {\n\t\t\t\t\tstatus.Bloom = whisperStatus.Bloom\n\t\t\t\t}\n\t\t\t\t//fmt.Println(\"\\nWhisper: routing table updated for BloomFilter, peer entry \" + address.String())\n\t\t\t}\n\t\t}\n\n\t\tif whisperStatus.Code == powRequirementCode || whisperStatus.Code == statusCode {\n\t\t\tif !(math.IsInf(whisperStatus.Pow, 0) || math.IsNaN(whisperStatus.Pow) || whisperStatus.Pow < 0.0) {\n\t\t\t\tif loaded {\n\t\t\t\t\tstatus.Pow = math.Min(status.Pow, whisperStatus.Pow)\n\t\t\t\t} else {\n\t\t\t\t\tstatus.Pow = whisperStatus.Pow\n\t\t\t\t}\n\t\t\t\t//fmt.Println(\"\\nWhisper: routing table updated for PoW, peer entry \" + address.String())\n\t\t\t}\n\t\t}\n\t\t//fmt.Println(\"\\nWhisper: routing table updated, peer entry \" + address.String())\n\t}\n}", "func (o InstanceOutput) UpgradeHistory() UpgradeHistoryEntryResponseArrayOutput {\n\treturn o.ApplyT(func(v *Instance) UpgradeHistoryEntryResponseArrayOutput { return v.UpgradeHistory }).(UpgradeHistoryEntryResponseArrayOutput)\n}", "func appendUFATransactionHistory(stub shim.ChaincodeStubInterface, ufanumber string, payload string) error {\r\n\tvar recordList []string\r\n\r\n\tlogger.Info(\"Appending to transaction history \" + ufanumber)\r\n\trecBytes, _ := stub.GetState(UFA_TRXN_PREFIX + ufanumber)\r\n\r\n\tif recBytes == nil {\r\n\t\tlogger.Info(\"Updating the transaction history for the first time\")\r\n\t\trecordList = make([]string, 0)\r\n\t} else {\r\n\t\terr := json.Unmarshal(recBytes, &recordList)\r\n\t\tif err != nil {\r\n\t\t\treturn errors.New(\"Failed to unmarshal appendUFATransactionHistory \")\r\n\t\t}\r\n\t}\r\n\trecordList = append(recordList, payload)\r\n\tbytesToStore, _ := json.Marshal(recordList)\r\n\tlogger.Info(\"After updating the transaction history\" + string(bytesToStore))\r\n\tstub.PutState(UFA_TRXN_PREFIX+ufanumber, bytesToStore)\r\n\tlogger.Info(\"Appending to transaction history \" + ufanumber + \" Done!!\")\r\n\treturn nil\r\n}", "func rewardRate(pool sdk.Coins, blocks int64) sdk.Coins {\n\tcoins := make([]sdk.Coin, 0)\n\tif blocks > 0 {\n\t\tfor _, coin := range pool {\n\t\t\tif coin.IsZero() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// divide by blocks, rounding fractions up\n\t\t\t// (coin.Amount - 1)/blocks + 1\n\t\t\trate := coin.Amount.SubRaw(1).QuoRaw(blocks).AddRaw(1)\n\t\t\tcoins = append(coins, sdk.NewCoin(coin.GetDenom(), rate))\n\t\t}\n\t}\n\treturn sdk.NewCoins(coins...)\n}", "func updateLoanState(ls *models.LoanState, db *gorm.DB) error {\n\tomitList := []string{\"id\", \"deleted_at\"}\n\terr := db.Model(ls).Omit(omitList...).Save(ls).Error\n\treturn err\n}", "func (t *Transaction) Reward() string {\n\treturn t.reward\n}", "func (s *PublicSfcAPI) GetRewardWeights(ctx context.Context, stakerID hexutil.Uint) (map[string]interface{}, error) {\n\tbaseRewardWeight, txRewardWeight, err := s.b.GetRewardWeights(ctx, idx.StakerID(stakerID))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif baseRewardWeight == nil || txRewardWeight == nil {\n\t\treturn nil, nil\n\t}\n\treturn map[string]interface{}{\n\t\t\"baseRewardWeight\": (*hexutil.Big)(baseRewardWeight),\n\t\t\"txRewardWeight\": (*hexutil.Big)(txRewardWeight),\n\t}, nil\n}", "func (puo *PatientrecordUpdateOne) AddHistorytaking(h ...*Historytaking) *PatientrecordUpdateOne {\n\tids := make([]int, len(h))\n\tfor i := range h {\n\t\tids[i] = h[i].ID\n\t}\n\treturn puo.AddHistorytakingIDs(ids...)\n}", "func (_Lmc *LmcCallerSession) GetUserAccumulatedReward(_userAddress common.Address, tokenIndex *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserAccumulatedReward(&_Lmc.CallOpts, _userAddress, tokenIndex)\n}" ]
[ "0.60458004", "0.5921379", "0.5813828", "0.5810859", "0.5760177", "0.5708256", "0.55621254", "0.5539769", "0.5532216", "0.5519636", "0.54246104", "0.54080886", "0.53458893", "0.53254765", "0.53176856", "0.5306303", "0.527214", "0.52710384", "0.5270779", "0.52167344", "0.52020156", "0.519777", "0.5185662", "0.5153718", "0.5133926", "0.51125324", "0.5074409", "0.50686735", "0.50390923", "0.5005286", "0.49994683", "0.49931285", "0.49490663", "0.48490855", "0.48377857", "0.48355532", "0.4807257", "0.4791437", "0.4788366", "0.4766619", "0.4763956", "0.47606927", "0.47573155", "0.47008535", "0.46892628", "0.46884656", "0.46807548", "0.464973", "0.46467662", "0.46101218", "0.46069148", "0.460262", "0.45972082", "0.45884174", "0.45785868", "0.45712173", "0.4571051", "0.45468858", "0.454088", "0.45397553", "0.4518378", "0.45065743", "0.45047334", "0.4489074", "0.44792497", "0.4476978", "0.4462474", "0.44492346", "0.44427308", "0.4438635", "0.44327915", "0.44317624", "0.44269827", "0.4425374", "0.44243112", "0.44235653", "0.44193897", "0.4414898", "0.44132087", "0.4410489", "0.43958464", "0.43925664", "0.4391789", "0.4384897", "0.43841484", "0.43781507", "0.43696457", "0.4366985", "0.43651843", "0.43622646", "0.43598148", "0.43512934", "0.43504715", "0.4348658", "0.434649", "0.43428314", "0.43399417", "0.4332627", "0.4328184", "0.43269345" ]
0.8028916
0
stringToBigInt transforms a string to big int
func stringToBigInt(estr string) (*big.Int, error) { ret, ok := big.NewInt(0).SetString(estr, 10) if !ok { return nil, errors.New("failed to parse string to big int") } return ret, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func stringToBigInt(stringValue string) *big.Int {\n\n\tintToReturn := big.NewInt(0)\n\tintToReturn.SetString(stringValue, 10)\n\n\treturn intToReturn\n}", "func HexStrToBigInt(s string) (*big.Int, error) {\n\tregstr := \"^0[xX][0-9a-fA-F]+$\"\n\tif matched, err := regexp.Match(regstr, []byte(s)); err != nil || !matched {\n\t\treturn nil, errors.New(\"Invalid hex string\")\n\t}\n\n\ti, b := new(big.Int).SetString(s[2:], 16)\n\tif !b {\n\t\treturn nil, errors.New(\"Invalid hex string\")\n\t}\n\n\treturn i, nil\n}", "func HexStrToBigInt(hexString string) (*big.Int, error) {\n\tvalue := new(big.Int)\n\t_, ok := value.SetString(Trim0x(hexString), 16)\n\tif !ok {\n\t\treturn value, fmt.Errorf(\"Could not transform hex string to big int: %s\", hexString)\n\t}\n\n\treturn value, nil\n}", "func ParseBigInt(s string, base int) (*big.Int, error) {\n\tif base < 0 || base > 16 {\n\t\treturn nil, errors.New(\"ParseBigInt: invalid base\")\n\t}\n\ts = strings.Replace(s, \"\\n\", \"\", -1)\n\tz, ok := new(big.Int).SetString(s, base)\n\tif !ok {\n\t\treturn nil, errors.New(\"ParseBigInt: invalid string\")\n\t}\n\treturn z, nil\n}", "func ParseBigInt(s string, base int) (*big.Int, error) {\n\tif base < 0 || base > 16 {\n\t\treturn nil, errors.New(\"ParseBigInt: invalid base\")\n\t}\n\ts = strings.Replace(s, \"\\n\", \"\", -1)\n\tz, ok := new(big.Int).SetString(s, base)\n\tif !ok {\n\t\treturn nil, errors.New(\"ParseBigInt: invalid string\")\n\t}\n\treturn z, nil\n}", "func ParseBigInt(str string) (_ *cells.BinaryCell, err error) {\n\tbytes, err := hex.DecodeString(str)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn cells.New(OpUint64, bytes), nil\n}", "func DecodeBigInt(txt string) (*big.Int, error) {\n\tif txt == \"\" {\n\t\treturn new(big.Int), nil // Defaults to 0\n\t}\n\tres, success := new(big.Int).SetString(txt, 10)\n\tif !success {\n\t\treturn nil, fmt.Errorf(\"cannot decode %v into big.Int\", txt)\n\t}\n\treturn res, nil\n}", "func B64ToBigInt(in string, b *big.Int) (err error) {\n\tlength := base64.StdEncoding.DecodedLen(len(in))\n\tbuff := make([]byte, length)\n\tn, err := base64.StdEncoding.Decode(buff, bytes.NewBufferString(in).Bytes())\n\t//neg := false\n\tif err == nil {\n\t\tbuff = buff[0:n]\n\t\t//if buff[0]&0x80 == 0x80 {\n\t\t//\tneg = true\n\t\t//\tbuff[0] &= 0x7f\n\t\t//}\n\t\tb.SetBytes(buff)\n\t\t// In case the passed in big was negative...\n\t\t//b.Abs(b)\n\t\t//if neg {\n\t\t//\tb.Neg(b)\n\t\t//}\n\t}\n\treturn\n}", "func mustBigInt(v string) *big.Int {\n\tvar x big.Int\n\tif err := x.UnmarshalText([]byte(v)); err != nil {\n\t\tpanic(err)\n\t}\n\treturn &x\n}", "func NewBigInt(v string, base int) *big.Int {\n b := big.NewInt(0)\n b.SetString(v, base)\n return b\n}", "func chunkIDAsBigInt(chunkID string) (*big.Int, error) {\n\tif chunkID == \"\" {\n\t\t// \"\" indicates start of table. This is one before\n\t\t// ID 00000 .... 00000.\n\t\treturn big.NewInt(-1), nil\n\t}\n\tidBytes, err := hex.DecodeString(chunkID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tid := big.NewInt(0)\n\tid.SetBytes(idBytes)\n\treturn id, nil\n}", "func decodeBigInt(s *Stream, val reflect.Value) error {\n\t// get the content of the string\n\tb, err := s.Bytes()\n\tif err != nil {\n\t\treturn wrapStreamError(err, val.Type())\n\t}\n\n\t// assign i to *big.Int type (its' original type instead of the reflect.val type)\n\ti := val.Interface().(*big.Int)\n\n\t// this means the storage prepared to store the data has some flaws, it pointed nil\n\t// therefore, we need to fix this storage, and make this storage be able to store the data\n\tif i == nil {\n\t\t// allocated space and let i pointed to it\n\t\ti = new(big.Int)\n\t\t// pass the address pointed by i (ValuOf(i)) to val (data synchronization)\n\t\tval.Set(reflect.ValueOf(i))\n\t}\n\n\t// no leading 0s\n\tif len(b) > 0 && b[0] == 0 {\n\t\treturn wrapStreamError(ErrCanonInt, val.Type())\n\t}\n\n\t// assigning values\n\ti.SetBytes(b)\n\treturn nil\n}", "func lettersToBigInt(seq alphabet.Letters) (*big.Int, error) {\n\tout := big.NewInt(0)\n\twords := make([]big.Word, len(seq)/33+1)\n\tfor i := range seq {\n\t\tindex := alphabet.DNA.IndexOf(seq[len(seq)-i-1])\n\t\tif index < 0 {\n\t\t\treturn out, fmt.Errorf(\"Sequence is not a valid DNA sequence at position %d\\n\", i+1)\n\t\t} else {\n\t\t\twordIndex := i / 32\n\t\t\tshiftDist := uint(i-wordIndex*32) * 2\n\t\t\twords[wordIndex] |= big.Word(index << shiftDist)\n\t\t}\n\t}\n\treturn out.SetBits(words), nil\n}", "func toInt(str string) (int64, error) {\n\tres, err := strconv.ParseInt(str, 0, 64)\n\tif err != nil {\n\t\tres = 0\n\t}\n\treturn res, err\n}", "func ToInt(x string) *big.Int {\n\tvar i big.Int\n\ti.SetBytes([]byte(x[:]))\n\treturn &i\n}", "func (b64) FromBigInt(b *big.Int, size int) string {\n\tdata := b.Bytes()\n\tif size > 0 {\n\t\tdata = bytesPadding(data, size)\n\t}\n\n\treturn base64.RawURLEncoding.EncodeToString(data)\n}", "func Txt2int(s string) uint64 {\n\tx := uint64(len(s)) * Prime\n\tfor i := len(s) - 1; i >= 0; i-- {\n\t\tx ^= uint64(s[i])\n\t\tx *= Prime\n\t}\n\treturn x\n}", "func (t TokenID) BigInt() *big.Int {\n\treturn utils.ByteSliceToBigInt(t[:])\n}", "func DecodeBig(input string) (*big.Int, error) {\n\traw, err := checkNumber(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(raw) > 64 {\n\t\treturn nil, ErrBig256Range\n\t}\n\twords := make([]big.Word, len(raw)/bigWordNibbles+1)\n\tend := len(raw)\n\tfor i := range words {\n\t\tstart := end - bigWordNibbles\n\t\tif start < 0 {\n\t\t\tstart = 0\n\t\t}\n\t\tfor ri := start; ri < end; ri++ {\n\t\t\tnib := decodeNibble(raw[ri])\n\t\t\tif nib == badNibble {\n\t\t\t\treturn nil, ErrSyntax\n\t\t\t}\n\t\t\twords[i] *= 16\n\t\t\twords[i] += big.Word(nib)\n\t\t}\n\t\tend = start\n\t}\n\tdec := new(big.Int).SetBits(words)\n\treturn dec, nil\n}", "func StringToInt(s string) (int64, error) {\n\treturn strconv.ParseInt(s, 10, 64)\n}", "func (z *Element22) ToBigInt(res *big.Int) *big.Int {\n\tbits := (*[22]big.Word)(unsafe.Pointer(z))\n\treturn res.SetBits(bits[:])\n}", "func base64ToInt(s string) *big.Int {\n\tvar tmp big.Int\n\tsb := []byte(s)\n\tfor i := len(sb) - 1; i >= 0; i-- {\n\t\tb := big.NewInt(base64de[sb[i]])\n\t\ttmp.Lsh(&tmp, 6).Or(&tmp, b)\n\t}\n\treturn &tmp\n}", "func IntSetString(z *big.Int, s string, base int) (*big.Int, bool)", "func Hex(s string) Integer { return integer{x: bigint.MustHex(s)} }", "func IntString(x *big.Int,) string", "func byteArrayToBigInt(bytes []byte) *big.Int {\n\ttotalPower := big.NewInt(1)\n\tpower := big.NewInt(256)\n\tbigInt := big.NewInt(0)\n\tfor _, value := range bytes {\n\t\tbigValue := big.NewInt(int64(value))\n\t\tbigInt.Add(bigInt, bigValue.Mul(totalPower, bigValue))\n\t\ttotalPower.Mul(totalPower, power)\n\t}\n\treturn bigInt\n}", "func ByteToBigInt(in []byte) *big.Int {\n\tout := new(big.Int)\n\tout.SetBytes(in)\n\treturn out\n}", "func NewIntFromString(s string) *big.Int {\n\tv, ok := new(big.Int).SetString(s, 10) //nolint:gomnd\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Bad base 10 string %s\", s))\n\t}\n\treturn v\n}", "func MustParseBigInt(str string) *cells.BinaryCell {\n\tc, err := ParseBigInt(str)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn c\n}", "func IntText(x *big.Int, base int) string", "func AddBigInt(a, b string) (*big.Int, error) {\n\taInt, bInt, err := DecodeTwoBigInts(a, b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn aInt.Add(aInt, bInt), nil\n}", "func (n *Node) SetBigInt(x *big.Int)", "func main() {\r\n\t\r\n//\tn, _ := new(big.Int).SetString(\"16329805957987392833\", 10)\r\n//\tfmt.Println(n)\r\n\r\n\tstrs := []string{ \"0\", \"15\", \"53\", \"193\", \"456\", \"46340\", \"4200000000\",\r\n\t\t\t\t\t\t\t\"12345678901234567\", \"1111111111111\", \"9223372036854775807\", \"18446744073709551615\",\r\n\t\t\t\t\t\t\t\"1844674407370955161518446744073709551615\",\r\n\t\t\t\t\t\t\t\"999999999999999999999999999999999999999\",\r\n\t\t\t\t\t\t\t\"340282366920938463426481119284349108225\", \t\t// 18446744073709551615 * 18446744073709551615 => 340282366920938463426481119284349108225\r\n\t\t\t\t\t\t\t };\r\n\r\n\tfor _, v := range strs {\r\n\t bigInt := &big.Int{}\r\n\t value, _ := bigInt.SetString(v, 10)\r\n\t sqrt := bigInt.Sqrt( value )\r\n\t fmt.Println( v, \" : sqrt( \", v , \") = \", sqrt)\r\n\t}\r\n\r\n\r\n}", "func HashString(elt string) *big.Int {\n\thasher := sha1.New()\n\thasher.Write([]byte(elt))\n\treturn new(big.Int).SetBytes(hasher.Sum(nil))\n}", "func (d Decimal) BigInt() *big.Int {\n\tscaledD := d.rescale(0)\n\ti := &big.Int{}\n\ti.SetString(scaledD.String(), 10)\n\treturn i\n}", "func atoi(s string) int64 {\n\tsCopy := s\n\tif sCopy[0] == '-' {\n\t\tsCopy = s[1:]\n\t}\n\n\tres := 0\n\tfor _, myrune := range sCopy {\n\t\tres = res*10 + (int(myrune) - 48)\n\t}\n\n\tif s[0] == '-' {\n\t\tres = res * -1\n\t}\n\n\tvar result int64\n\tresult = int64(res)\n\n\treturn result\n}", "func str2dec(what string) uint64 {\n\twhat = strings.TrimLeft(what, \"0\")\n\tif len(what) == 0 {\n\t\treturn 0\n\t}\n\tout, err := strconv.ParseUint(what, 10, 64)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn out\n}", "func StringToTokenValue(input string, decimals uint8) (*big.Int, error) {\n\toutput := big.NewInt(0)\n\tif input == \"\" {\n\t\treturn output, nil\n\t}\n\n\t// Count the number of items after the decimal point.\n\tparts := strings.Split(input, \".\")\n\tvar additionalZeros int\n\tif len(parts) == 2 {\n\t\t// There is a decimal place.\n\t\tadditionalZeros = int(decimals) - len(parts[1])\n\t} else {\n\t\t// There is not a decimal place.\n\t\tadditionalZeros = int(decimals)\n\t}\n\t// Remove the decimal point.\n\ttmp := strings.ReplaceAll(input, \".\", \"\")\n\t// Add zeros to ensure that there are an appropriate number of decimals.\n\ttmp += strings.Repeat(\"0\", additionalZeros)\n\n\t// Set the output\n\toutput.SetString(tmp, 10)\n\n\treturn output, nil\n}", "func SetString(z *big.Int, s string, base int) (*big.Int, bool) {\n\treturn z.SetString(s, base)\n}", "func ProtoToBigInt(pb *BigInt, bi *big.Int) error {\n\tif pb == nil {\n\t\treturn errors.New(\"nil proto big_int\")\n\t}\n\tbi.SetBytes(pb.Raw)\n\treturn nil\n}", "func ParseInteger(s string) (Number, error) {\n\tif res, ok := new(big.Int).SetString(s, 0); ok {\n\t\tif res.IsInt64() {\n\t\t\treturn Integer(res.Int64()), nil\n\t\t}\n\t\treturn (*BigInt)(res), nil\n\t}\n\treturn nil, fmt.Errorf(ErrExpectedInteger, s)\n}", "func atoi(b []byte) (int, error) {\n\tif len(b) > len(powers) {\n\t\treturn 0, fmt.Errorf(\"sam: integer overflow: %q\", b)\n\t}\n\tvar n int64\n\tk := len(b) - 1\n\tfor i, v := range b {\n\t\tn += int64(v-'0') * powers[k-i]\n\t\tif int64(int(n)) != n {\n\t\t\treturn 0, fmt.Errorf(\"sam: integer overflow: %q at %d\", b, i)\n\t\t}\n\t}\n\treturn int(n), nil\n}", "func parseEthereumBig256(s string) (*big.Int, bool) {\n\tif s == \"\" {\n\t\treturn new(big.Int), true\n\t}\n\tvar bigint *big.Int\n\tvar ok bool\n\tif len(s) >= 2 && (s[:2] == \"0x\" || s[:2] == \"0X\") {\n\t\tbigint, ok = new(big.Int).SetString(s[2:], 16)\n\t} else {\n\t\tbigint, ok = new(big.Int).SetString(s, 10)\n\t}\n\tif ok && bigint.BitLen() > 256 {\n\t\tbigint, ok = nil, false\n\t}\n\treturn bigint, ok\n}", "func ReadBigInt(r io.Reader) (*big.Int, error) {\n\td, err := ReadBytes(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn new(big.Int).SetBytes(d), nil\n}", "func parseInt64FromString(content string, aggErr *AggregateError) int64 {\n result, err := strconv.ParseInt(content, 10, 64)\n if err != nil {\n aggErr.Append(err)\n }\n return result\n}", "func ConvertToDec(numS string, fromBase int) (string, error) {\n\tif fromBase < 2 {\n\t\treturn \"\", fmt.Errorf(\"Invalid base: %v\", fromBase)\n\t}\n\tif numS == \"\" {\n\t\treturn \"\", fmt.Errorf(\"Nothing to convert\")\n\t}\n\n\t// If the number is negative: flagNeg = 1 and \"saving\" the negative sign.\n\tnum := []rune(numS)\n\tif num[0] == rune('-') {\n\t\tflagNeg = 1\n\t\tnum = append(num[1:])\n\t}\n\t// After removing \"-\" we have to check our number again.\n\tif len(num) == 0 {\n\t\treturn \"\", fmt.Errorf(\"Nothing to convert\")\n\t}\n\t// Converting the number to decimal base using math/big library in order to get\n\t// numbers bigger than int64.\n\tbDecNum := big.NewInt(0)\n\tiFromEnd := len(num) - 1\n\tfor _, vRune := range num {\n\t\tvInt, err := letterToNum(vRune, fromBase)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tbVint := big.NewInt(int64(vInt))\n\t\tbFromBase := big.NewInt(int64(fromBase))\n\t\tbIfromEnd := big.NewInt(int64(iFromEnd))\n\t\tbPow := big.NewInt(0)\n\t\tmod := big.NewInt(0)\n\t\tbRight := big.NewInt(0)\n\t\tbDecNum.Add(bDecNum, bRight.Mul(bPow.Exp(bFromBase, bIfromEnd, mod), bVint))\n\t\tiFromEnd--\n\t}\n\tif flagNeg == 1 {\n\t\t// If original number was negative - making new one negative too.\n\t\tbDecNum.Neg(bDecNum)\n\t}\n\n\treturn bDecNum.String(), nil\n}", "func BigIntToHexStr(i *big.Int) string {\n\th := i.Text(16)\n\tif len(h)%2 == 1 {\n\t\th = \"0\" + h // make sure that the length is even\n\t}\n\treturn \"0x\" + h\n}", "func stringToInt(s string) (int64, NumberType) {\n\tn, f, tp := StringToNumber(s)\n\tswitch tp {\n\tcase IsInt:\n\t\treturn n, IsInt\n\tcase IsFloat:\n\t\treturn FloatToInt(f)\n\t}\n\treturn 0, NaN\n}", "func String2Int(shortURL string) (seq uint64) {\n\tshortURL = reverse(shortURL)\n\tfor index, char := range shortURL {\n\t\tbase := uint64(math.Pow(float64(BaseStringLength), float64(index)))\n\t\tseq += uint64(strings.Index(BaseString, string(char))) * base\n\t}\n\treturn\n}", "func (d LegacyDec) BigInt() *big.Int {\n\tif d.IsNil() {\n\t\treturn nil\n\t}\n\n\tcp := new(big.Int)\n\treturn cp.Set(d.i)\n}", "func BigIntToProto(x *big.Int) *BigInt {\n\tif x == nil {\n\t\treturn nil\n\t}\n\tpb := new(BigInt)\n\tpb.Raw = x.Bytes()\n\treturn pb\n}", "func ToI(str string) int64 {\n\tval, err := strconv.ParseInt(str, 10, 64)\n\tL.IsError(err, str)\n\treturn val\n}", "func (z Element22) ToBigIntRegular(res *big.Int) *big.Int {\n\tz.FromMont()\n\tbits := (*[22]big.Word)(unsafe.Pointer(&z))\n\treturn res.SetBits(bits[:])\n}", "func ToInt(str string) (int64, error) {\n\tres, err := strconv.ParseInt(str, 0, 64)\n\tif err != nil {\n\t\tres = 0\n\t}\n\treturn res, err\n}", "func ToInt(str string) (int64, error) {\n\tres, err := strconv.ParseInt(str, 0, 64)\n\tif err != nil {\n\t\tres = 0\n\t}\n\treturn res, err\n}", "func SetBigInt(gauge prometheus.Gauge, arg *big.Int) {\n\tgauge.Set(float64(arg.Int64()))\n}", "func (z *Element22) SetBigInt(v *big.Int) *Element22 {\n\tz.SetZero()\n\n\tzero := big.NewInt(0)\n\tq := element22ModulusBigInt()\n\n\t// copy input\n\tvv := new(big.Int).Set(v)\n\n\t// while v < 0, v+=q\n\tfor vv.Cmp(zero) == -1 {\n\t\tvv.Add(vv, q)\n\t}\n\t// while v > q, v-=q\n\tfor vv.Cmp(q) == 1 {\n\t\tvv.Sub(vv, q)\n\t}\n\t// if v == q, return 0\n\tif vv.Cmp(q) == 0 {\n\t\treturn z\n\t}\n\t// v should\n\tvBits := vv.Bits()\n\tfor i := 0; i < len(vBits); i++ {\n\t\tz[i] = uint64(vBits[i])\n\t}\n\treturn z.ToMont()\n}", "func StringToInt64(str string) (i int64, err error) {\n\ti, err = strconv.ParseInt(str, 10, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn i, nil\n}", "func strToInt64Point(s string) (int64, error) {\n\treturn strconv.ParseInt(s, 10, 64)\n}", "func MaxBigInt(a, b *big.Int) *big.Int {\n\tif a.Cmp(b) > 0 {\n\t\treturn a\n\t}\n\treturn b\n}", "func stringToInt64(s string) (int64, error) {\n\tvar v int64\n\tif strings.Contains(s, \".\") {\n\t\tfv, err := strconv.ParseFloat(s, 64)\n\t\tif err != nil {\n\t\t\treturn v, err\n\t\t}\n\t\tv = int64(fv)\n\t} else {\n\t\tiv, err := strconv.ParseInt(s, 10, 64)\n\t\tif err != nil {\n\t\t\treturn v, err\n\t\t}\n\t\tv = iv\n\t}\n\treturn v, nil\n}", "func hashString(x string) uint64 {\n\thash := offset64\n\tfor _, codepoint := range x {\n\t\thash ^= uint64(codepoint)\n\t\thash *= prime64\n\t}\n\treturn hash\n}", "func StringToInt64(s string) int64 {\n\ti, _ := strconv.ParseInt(s, 10, 64)\n\treturn i\n}", "func asciiToNumber(c byte) (uint8, error) {\n\tif c < one || c > nine {\n\t\treturn uint8(c), ErrParseInvalidNumber\n\t}\n\treturn uint8(c) - zero, nil\n}", "func StringToHashID(str string) (string, error) {\n\th, err := newHashID()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Convert string to []int\n\tvar ss []int\n\tfor _, v := range str {\n\t\tss = append(ss, int(v))\n\t}\n\n\t// Encode\n\te, err := h.Encode(ss)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn e, nil\n}", "func TestLipmaaDecompositionWithBigInteger(t *testing.T) {\n\tbigInt, _ := new(big.Int).SetString(\"16714772973240639959372252262788596420406994288943442724185217359247384753656472309049760952976644136858333233015922583099687128195321947212684779063190875332970679291085543110146729439665070418750765330192961290161474133279960593149307037455272278582955789954847238104228800942225108143276152223829168166008095539967222363070565697796008563529948374781419181195126018918350805639881625937503224895840081959848677868603567824611344898153185576740445411565094067875133968946677861528581074542082733743513314354002186235230287355796577107626422168586230066573268163712626444511811717579062108697723640288393001520781671\", 10)\n\troots, _ := lipmaaDecompose(bigInt)\n\tif squareRootsAndSum(roots).Cmp(bigInt) != 0 {\n\t\tt.Errorf(\"decomposition does not work correctly for a large integer\")\n\t}\n\n}", "func StrintToInt64(s string) int64 {\n\ti, err := strconv.ParseInt(s, 10, 64)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn i\n}", "func parseInt(s string, base int) (Value, error) {\n\tvar n int64\n\tvar err error\n\tvar cutoff, maxVal int64\n\tvar sign bool\n\ti := 0\n\n\tif len(s) < 1 {\n\t\terr = strconv.ErrSyntax\n\t\tgoto Error\n\t}\n\n\tswitch s[0] {\n\tcase '-':\n\t\tsign = true\n\t\ts = s[1:]\n\tcase '+':\n\t\ts = s[1:]\n\t}\n\n\tif len(s) < 1 {\n\t\terr = strconv.ErrSyntax\n\t\tgoto Error\n\t}\n\n\t// Look for hex prefix.\n\tif s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X') {\n\t\tif base == 0 || base == 16 {\n\t\t\tbase = 16\n\t\t\ts = s[2:]\n\t\t}\n\t}\n\n\tswitch {\n\tcase len(s) < 1:\n\t\terr = strconv.ErrSyntax\n\t\tgoto Error\n\n\tcase 2 <= base && base <= 36:\n\t// valid base; nothing to do\n\n\tcase base == 0:\n\t\t// Look for hex prefix.\n\t\tswitch {\n\t\tcase s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):\n\t\t\tif len(s) < 3 {\n\t\t\t\terr = strconv.ErrSyntax\n\t\t\t\tgoto Error\n\t\t\t}\n\t\t\tbase = 16\n\t\t\ts = s[2:]\n\t\tdefault:\n\t\t\tbase = 10\n\t\t}\n\n\tdefault:\n\t\terr = errors.New(\"invalid base \" + strconv.Itoa(base))\n\t\tgoto Error\n\t}\n\n\t// Cutoff is the smallest number such that cutoff*base > maxInt64.\n\t// Use compile-time constants for common cases.\n\tswitch base {\n\tcase 10:\n\t\tcutoff = math.MaxInt64/10 + 1\n\tcase 16:\n\t\tcutoff = math.MaxInt64/16 + 1\n\tdefault:\n\t\tcutoff = math.MaxInt64/int64(base) + 1\n\t}\n\n\tmaxVal = math.MaxInt64\n\tfor ; i < len(s); i++ {\n\t\tif n >= cutoff {\n\t\t\t// n*base overflows\n\t\t\treturn parseLargeInt(float64(n), s[i:], base, sign)\n\t\t}\n\t\tv := digitVal(s[i])\n\t\tif v >= base {\n\t\t\tbreak\n\t\t}\n\t\tn *= int64(base)\n\n\t\tn1 := n + int64(v)\n\t\tif n1 < n || n1 > maxVal {\n\t\t\t// n+v overflows\n\t\t\treturn parseLargeInt(float64(n)+float64(v), s[i+1:], base, sign)\n\t\t}\n\t\tn = n1\n\t}\n\n\tif i == 0 {\n\t\terr = strconv.ErrSyntax\n\t\tgoto Error\n\t}\n\n\tif sign {\n\t\tn = -n\n\t}\n\treturn intToValue(n), nil\n\nError:\n\treturn _NaN, err\n}", "func (l *BigInt) String() string {\n\treturn (*big.Int)(l).String()\n}", "func dtoi(s string) (n int, i int, ok bool) {\n\tn = 0\n\tfor i = 0; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ {\n\t\tn = n*10 + int(s[i]-'0')\n\t\tif n >= bigInt {\n\t\t\treturn bigInt, i, false\n\t\t}\n\t}\n\tif i == 0 {\n\t\treturn 0, 0, false\n\t}\n\treturn n, i, true\n}", "func BigIntToB64(i *big.Int) string {\n\tb := BigBytes(i)\n\tbuff := make([]byte, base64.StdEncoding.EncodedLen(len(b)))\n\tbase64.StdEncoding.Encode(buff, b)\n\treturn string(buff)\n}", "func ConvertSizeToBytes(s string) (string, error) {\n\ts = strings.TrimSpace(strings.ToLower(s))\n\n\t// spin until we find a match, if no match return original string\n\tfor _, k := range units {\n\t\tvar y int = lookupTable[k]\n\t\tif strings.HasSuffix(s, k) {\n\t\t\ts = s[:len(s)-len(k)]\n\t\t\ti, err := strconv.Atoi(s)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\ti = i * Pow(1024, y)\n\t\t\ts = strconv.Itoa(i)\n\t\t\treturn s, nil\n\t\t}\n\t}\n\n\treturn s, nil\n}", "func (s VerbatimString) ToInt64() (int64, error) { return _verbatimString(s).ToInt64() }", "func BigNumStrToHex(s string) string {\n\tbignum := ethutil.Big(s)\n\tbignum_bytes := ethutil.BigToBytes(bignum, 16)\n\treturn ethutil.Bytes2Hex(bignum_bytes)\n}", "func StringToSuperint(s string) Superint {\n\tvar si Superint\n\t//TODO add validation later\n\tif s[0] == '-' {\n\t\tsi.negative = true\n\t\ts = s[1:]\n\t}\n\t// \"12345\" would be stored as [5, 4, 3, 2, 1]\n\tfor i := len(s) - 1; i >= 0; i-- {\n\t\tnextDigit, _ := strconv.Atoi(string(s[i]))\n\t\tsi.digits = append(si.digits, nextDigit)\n\t}\n\treturn si\n}", "func FloatIntStr(valInt string) (out *big.Float, err error) {\n\ti, err := DecodeBigInt(valInt)\n\tif err != nil {\n\t\treturn\n\t}\n\tout, err = FloatInt(i)\n\treturn\n}", "func SplitBigIntToChars(num *big.Int) []string {\n\treturn strings.Split(num.String(), \"\")\n}", "func ParseStringToInt(str string) (int, error) {\n\ti64, err := strconv.ParseInt(str, 10, 0)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn int(i64), nil\n}", "func ConvertFromDec(numS string, toBase int) (string, error) {\n\tif toBase < 2 {\n\t\treturn \"\", fmt.Errorf(\"Invalid base: %v\", toBase)\n\t}\n\tif numS == \"\" {\n\t\treturn \"\", fmt.Errorf(\"Nothing to convert\")\n\t}\n\t// Converting string to the number bigger than int64.\n\tbNum := big.NewInt(0)\n\tbNum, ok := bNum.SetString(numS, 10)\n\tif ok != true {\n\t\treturn \"\", fmt.Errorf(\"Invalid number: %v\", numS)\n\t}\n\tbZero := big.NewInt(0)\n\t// If original number is equal to \"0...0\".\n\tif bNum.Cmp(bZero) == 0 {\n\t\treturn \"0\", nil\n\t}\n\t// If the number is negativ: flagNeg = 1 and \"saving\" the negative sign.\n\tif bNum.Cmp(bZero) == -1 {\n\t\tflagNeg = 1\n\t\tbNum.Neg(bNum)\n\t}\n\t// Converting the number to needed base (toBase).\n\tvar newNum string\n\tbToBase := big.NewInt(int64(toBase))\n\tbReminder := big.NewInt(0)\n\tfor bNum.Cmp(bZero) != 0 {\n\t\tbNum.DivMod(bNum, bToBase, bReminder)\n\t\treminderHex := numToLetter(bReminder.Int64(), toBase)\n\t\tnewNum += reminderHex\n\t}\n\n\tif flagNeg != 0 {\n\t\t// If original number was negative - making new one negative too.\n\t\tnewNum += \"-\"\n\t\tflagNeg = 0\n\t}\n\t// Reversing the number.\n\tnumRunes := []rune(newNum)\n\tleft := 0\n\tright := len(numRunes) - 1\n\tfor left < len(numRunes)/2 {\n\t\tnumRunes[left], numRunes[right] = numRunes[right], numRunes[left]\n\t\tleft++\n\t\tright--\n\t}\n\n\treturn string(numRunes), nil\n}", "func (t *Table) BigInt(colNm string) *Table {\n\tt.columns = append(t.columns, &column{Name: colNm, ColumnType: TypeBigInt})\n\treturn t\n}", "func StringToUint(s string) (uint64, error) {\n\treturn strconv.ParseUint(s, 10, 64)\n}", "func ToCodepoint(s string) (int64, error) {\n\ts = strings.ToUpper(s)\n\tvar base = 16\n\tswitch {\n\tcase strings.HasPrefix(s, \"0X\"), strings.HasPrefix(s, \"U+\"):\n\t\ts = s[2:]\n\tcase strings.HasPrefix(s, \"U\"):\n\t\ts = s[1:]\n\tcase strings.HasPrefix(s, \"0O\"):\n\t\ts = s[2:]\n\t\tbase = 8\n\tcase strings.HasPrefix(s, \"0B\"):\n\t\ts = s[2:]\n\t\tbase = 2\n\t}\n\treturn strconv.ParseInt(s, base, 64)\n}", "func toBase(bi *big.Int, destBase []string) string {\n\t// Hack in order to \"clone\" the big.Int and avoid changing it.\n\tsrc := big.NewInt(0)\n\tsrc.Add(bi, big.NewInt(0))\n\n\tif big.NewInt(0).Cmp(src) == 0 {\n\t\treturn destBase[0]\n\t}\n\n\tvar digits []string\n\tnumericBase := big.NewInt(int64(len(destBase)))\n\n\t// Keep going while bi is greater than 0.\n\tfor src.Cmp(big.NewInt(0)) > 0 {\n\t\tremainder := big.NewInt(0).Rem(src, numericBase)\n\t\tsrc.Div(src, numericBase)\n\t\tdigits = append(digits, destBase[remainder.Int64()])\n\t}\n\n\treturn strings.Join(digits, \" \")\n}", "func FromInterface(i1 interface{}) big.Int {\n\tvar val big.Int\n\n\tswitch c1 := i1.(type) {\n\tcase big.Int:\n\t\tval.Set(&c1)\n\tcase *big.Int:\n\t\tval.Set(c1)\n\tcase uint64:\n\t\tval.SetUint64(c1)\n\tcase int:\n\t\tval.SetInt64(int64(c1))\n\tcase string:\n\t\tif _, ok := val.SetString(c1, 10); !ok {\n\t\t\tpanic(\"unable to set big.Int from base10 string\")\n\t\t}\n\tcase []byte:\n\t\tval.SetBytes(c1)\n\tdefault:\n\t\tif v, ok := i1.(toBigIntInterface); ok {\n\t\t\tv.ToBigIntRegular(&val)\n\t\t\treturn val\n\t\t} else if reflect.ValueOf(i1).Kind() == reflect.Ptr {\n\t\t\tvv := reflect.ValueOf(i1).Elem()\n\t\t\tif vv.CanInterface() {\n\t\t\t\tif v, ok := vv.Interface().(toBigIntInterface); ok {\n\t\t\t\t\tv.ToBigIntRegular(&val)\n\t\t\t\t\treturn val\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tpanic(\"unsupported type\")\n\t}\n\n\treturn val\n}", "func Int(s string) int {\n\tif s == \"\" {\n\t\treturn 0\n\t}\n\n\tresult, err := strconv.ParseInt(s, 10, 64)\n\n\tif err != nil {\n\t\treturn 0\n\t}\n\n\treturn int(result)\n}", "func HexStrToInt64(hexString string) (int64, error) {\n\tif !strings.HasPrefix(hexString, \"0x\") {\n\t\thexString = \"0x\" + hexString\n\t}\n\treturn strconv.ParseInt(hexString, 0, 64)\n}", "func StringAsInteger(s string) int {\n\tif s == \"\" {\n\t\treturn 0\n\t}\n\n\tif i, err := strconv.ParseInt(s, 10, 32); err == nil {\n\t\treturn int(i)\n\t}\n\n\treturn 0\n}", "func parseInt(b []byte, t reflect.Type) (int64, []byte, error) {\n\tvar value int64\n\tvar count int\n\n\tif len(b) == 0 {\n\t\treturn 0, b, syntaxError(b, \"cannot decode integer from an empty input\")\n\t}\n\n\tif b[0] == '-' {\n\t\tconst max = math.MinInt64\n\t\tconst lim = max / 10\n\n\t\tif len(b) == 1 {\n\t\t\treturn 0, b, syntaxError(b, \"cannot decode integer from '-'\")\n\t\t}\n\n\t\tif len(b) > 2 && b[1] == '0' && '0' <= b[2] && b[2] <= '9' {\n\t\t\treturn 0, b, syntaxError(b, \"invalid leading character '0' in integer\")\n\t\t}\n\n\t\tfor _, d := range b[1:] {\n\t\t\tif !(d >= '0' && d <= '9') {\n\t\t\t\tif count == 0 {\n\t\t\t\t\tb, err := inputError(b, t)\n\t\t\t\t\treturn 0, b, err\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif value < lim {\n\t\t\t\treturn 0, b, unmarshalOverflow(b, t)\n\t\t\t}\n\n\t\t\tvalue *= 10\n\t\t\tx := int64(d - '0')\n\n\t\t\tif value < (max + x) {\n\t\t\t\treturn 0, b, unmarshalOverflow(b, t)\n\t\t\t}\n\n\t\t\tvalue -= x\n\t\t\tcount++\n\t\t}\n\n\t\tcount++\n\t} else {\n\t\tconst max = math.MaxInt64\n\t\tconst lim = max / 10\n\n\t\tif len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' {\n\t\t\treturn 0, b, syntaxError(b, \"invalid leading character '0' in integer\")\n\t\t}\n\n\t\tfor _, d := range b {\n\t\t\tif !(d >= '0' && d <= '9') {\n\t\t\t\tif count == 0 {\n\t\t\t\t\tb, err := inputError(b, t)\n\t\t\t\t\treturn 0, b, err\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tx := int64(d - '0')\n\n\t\t\tif value > lim {\n\t\t\t\treturn 0, b, unmarshalOverflow(b, t)\n\t\t\t}\n\n\t\t\tif value *= 10; value > (max - x) {\n\t\t\t\treturn 0, b, unmarshalOverflow(b, t)\n\t\t\t}\n\n\t\t\tvalue += x\n\t\t\tcount++\n\t\t}\n\t}\n\n\tif count < len(b) {\n\t\tswitch b[count] {\n\t\tcase '.', 'e', 'E': // was this actually a float?\n\t\t\tv, r, err := parseNumber(b)\n\t\t\tif err != nil {\n\t\t\t\tv, r = b[:count+1], b[count+1:]\n\t\t\t}\n\t\t\treturn 0, r, unmarshalTypeError(v, t)\n\t\t}\n\t}\n\n\treturn value, b[count:], nil\n}", "func StrToInt64(s string) int64 {\n\treturn int64(StrToInt(s))\n}", "func decodeToBigLE(src []byte) big.Int {\n\tn := len(src)\n\ttt := make([]byte, n)\n\tfor i := 0; i < n; i ++ {\n\t\ttt[i] = src[n - 1 - i]\n\t}\n\tvar x big.Int\n\tx.SetBytes(tt)\n\treturn x\n}", "func String2Int(v string) int {\n\treturn int(String2Int64(v))\n}", "func (z *Element22) SetString(s string) *Element22 {\n\tx, ok := new(big.Int).SetString(s, 10)\n\tif !ok {\n\t\tpanic(\"Element22.SetString failed -> can't parse number in base10 into a big.Int\")\n\t}\n\treturn z.SetBigInt(x)\n}", "func parseInt64(content []byte, aggErr *AggregateError) int64 {\n result, err := strconv.ParseInt(string(content), 10, 64)\n if err != nil {\n aggErr.Append(err)\n }\n return result\n}", "func hashString(text string) int64 {\n\thasher := fnv.New64()\n\thasher.Write([]byte(text))\n\tvalue := int64(hasher.Sum64())\n\t// Flip the sign if we wrapped\n\tif value < 0 {\n\t\treturn -value\n\t}\n\treturn value\n}", "func StringToHash(s string) int {\n\tv := int(crc32.ChecksumIEEE([]byte(s)))\n\tif v >= 0 {\n\t\treturn v\n\t}\n\tif -v >= 0 {\n\t\treturn -v\n\t}\n\t// v == MinInt\n\treturn 0\n}", "func parseIntEx(s string, bitSize int) (int64, error) {\n\tif s[0:2] == \"0b\" {\n\t\treturn strconv.ParseInt(s[2:], 2, bitSize)\n\t} else {\n\t\treturn strconv.ParseInt(s, 0, bitSize)\n\t}\n}", "func strToInt(input string) int {\n\toutput, _ := strconv.Atoi(input)\n\treturn output\n}", "func ConvertStringToInt(str string) int {\n\ts, err := strconv.Atoi(str)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn s\n}", "func StringToInteger(s string) (r int) {\n\tr, err := strconv.Atoi(s)\n\tif err != nil {\n\t\tlog.Fatal(\"ERROR: Invalid data type! Please use an integer instead of a string. Exiting!\\n\\n\")\n\t}\n\treturn r\n}", "func String2Int64(v string) int64 {\n\ti, err := strconv.ParseInt(v, 10, 64)\n\tif err != nil {\n\t\t// Log conversion error\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"string\": v,\n\t\t\t\"error\": err.Error(),\n\t\t}).Warn(\"Error converting string to int64\")\n\n\t\treturn 0\n\t}\n\n\treturn i\n}" ]
[ "0.8503275", "0.8007486", "0.7599278", "0.74680173", "0.74680173", "0.73421395", "0.70222", "0.6859304", "0.68342394", "0.6643473", "0.6609426", "0.6585258", "0.6492842", "0.63858724", "0.6366537", "0.63351583", "0.631695", "0.6280062", "0.62524414", "0.6239087", "0.6191838", "0.6183829", "0.6172", "0.6140018", "0.6129758", "0.61147565", "0.60841566", "0.60381263", "0.6008889", "0.5995458", "0.5953314", "0.5913333", "0.5842075", "0.5825297", "0.5805031", "0.5783098", "0.57770115", "0.57747436", "0.5766547", "0.5729747", "0.5689134", "0.56739086", "0.56576693", "0.5651874", "0.56484884", "0.56146324", "0.56138724", "0.5603463", "0.56016946", "0.55855817", "0.55754566", "0.55749583", "0.55706525", "0.55561525", "0.55561525", "0.5517805", "0.55085194", "0.5496422", "0.54901713", "0.5477559", "0.5468399", "0.546378", "0.5459693", "0.54415613", "0.54383665", "0.54225695", "0.5421711", "0.541823", "0.5417489", "0.541675", "0.5412852", "0.54041797", "0.5403946", "0.5402474", "0.5389868", "0.53876793", "0.53819954", "0.5376196", "0.5369262", "0.5367784", "0.5359723", "0.5347965", "0.533819", "0.5337022", "0.53360385", "0.5332845", "0.5331582", "0.5322452", "0.53211886", "0.5320343", "0.53182405", "0.53137726", "0.53125757", "0.531059", "0.5306465", "0.5302368", "0.52936673", "0.5289738", "0.52811134", "0.52772725" ]
0.8299093
1
NewStorageReader creates a new instance of StorageReader
func NewStorageReader(accountName, accountKey, containerName string) (*StorageReader, error) { logp.Debug( "storage_reader", "Creating new instance of storage reader", ) c := &Config{ accountName: accountName, accountKey: accountKey, containerName: containerName, } sr := &StorageReader{ config: c, } err := sr.initialize() if err != nil { return nil, err } return sr, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func StorageReader(storage StorageAPI, volume, path string, offset int64) io.Reader {\n\treturn &storageReader{storage, volume, path, offset}\n}", "func newStorage() *storage {\n\tr := make(map[string][]byte)\n\treturn &storage{\n\t\trepository: r,\n\t}\n}", "func NewReader(r io.Reader) io.Reader {\n return reader{r}\n}", "func NewRawStorage(config *storagebackend.ConfigForResource, newFunc func() runtime.Object) (storage.Interface, factory.DestroyFunc, error) {\n\treturn factory.Create(*config, newFunc)\n}", "func NewReader() Reader {\n\treturn reader{}\n}", "func (s *StandAloneStorage) Reader(ctx *kvrpcpb.Context) (storage.StorageReader, error) {\n\treturn &standAloneStorageReader{txn: s.db.NewTransaction(false)}, nil\n}", "func NewStorage(namespace, name string) (*Storage, error) {\n\tif err := k8sutil.CreateCRD(name); err != nil {\n\t\treturn nil, err\n\t}\n\tcli := k8sutil.NewRESTClient()\n\treturn &Storage{\n\t\tNamespace: namespace,\n\t\tName: strings.ToLower(name),\n\t\trestcli: cli,\n\t}, nil\n}", "func newStorage(account *account, prov provider.Account, cfg *config.Storage) (*storage, error) {\n\tlog.Debug(\"Initializing Storage\")\n\n\t// Validate the config.Storage object.\n\tif cfg.Buckets == nil {\n\t\treturn nil, fmt.Errorf(\"The buckets element is missing from the storage configuration\")\n\t}\n\n\ts := &storage{\n\t\tResources: resource.NewResources(),\n\t\tStorage: cfg,\n\t\taccount: account,\n\t}\n\n\tvar err error\n\ts.providerStorage, err = prov.NewStorage(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.buckets, err = newBuckets(s, prov, cfg.Buckets)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.Append(s.buckets)\n\treturn s, nil\n}", "func NewStorage() (*Storage, error) {\n\tvar err error\n\n\ts := new(Storage)\n\n\t_, filename, _, _ := runtime.Caller(0)\n\tp := path.Dir(filename)\n\n\ts.db, err = scribble.New(p+dir, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}", "func NewStorageReadClient(c datatypes.Storage_ReadClient) *StorageReadClient {\n\treturn &StorageReadClient{c: c}\n}", "func newStorageObject(URL string, source interface{}, fileInfo os.FileInfo) storage.Object {\n\tabstract := storage.NewAbstractStorageObject(URL, source, fileInfo)\n\tresult := &object{\n\t\tAbstractObject: abstract,\n\t}\n\tresult.AbstractObject.Object = result\n\treturn result\n}", "func newStorage() *storage {\n\treturn &storage{\n\t\tsto: make(map[uint16]mqtt.Message),\n\t\tmux: new(sync.RWMutex),\n\t}\n}", "func (lm *SimpleManager) NewReader(r io.Reader) *Reader {\n\tlr := NewReader(r)\n\tlm.Manage(lr)\n\treturn lr\n}", "func NewStorage() Storage {\n\treturn &storage{}\n}", "func NewReader(r io.Reader) *Reader { return &Reader{r: r} }", "func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager) *Storage {\n\tif l == nil {\n\t\tl = log.NewNopLogger()\n\t}\n\tlogger := logging.Dedupe(l, 1*time.Minute)\n\n\ts := &Storage{\n\t\tlogger: logger,\n\t\tlocalStartTimeCallback: stCallback,\n\t}\n\ts.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm)\n\treturn s\n}", "func NewStorage() *Storage {\r\n\treturn new(Storage)\r\n}", "func NewStorage(config StorageConfig) (spec.Storage, error) {\n\tnewStorage := &storage{\n\t\tStorageConfig: config,\n\n\t\tID: id.MustNew(),\n\t\tShutdownOnce: sync.Once{},\n\t\tType: ObjectType,\n\t}\n\n\t// Dependencies.\n\tif newStorage.Log == nil {\n\t\treturn nil, maskAnyf(invalidConfigError, \"logger must not be empty\")\n\t}\n\tif newStorage.Pool == nil {\n\t\treturn nil, maskAnyf(invalidConfigError, \"connection pool must not be empty\")\n\t}\n\t// Settings.\n\tif newStorage.BackOffFactory == nil {\n\t\treturn nil, maskAnyf(invalidConfigError, \"backoff factory must not be empty\")\n\t}\n\tif newStorage.Prefix == \"\" {\n\t\treturn nil, maskAnyf(invalidConfigError, \"prefix must not be empty\")\n\t}\n\n\tnewStorage.Log.Register(newStorage.GetType())\n\n\treturn newStorage, nil\n}", "func NewStorage(cfg *api.Config, rootPath string, syncFrequency time.Duration) (storage.Interface, error) {\n\tcfg.WaitTime = syncFrequency\n\n\t// Get a new client\n\tclient, err := api.NewClient(cfg)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"creating consul client\")\n\t}\n\n\treturn &Client{\n\t\tv1: &v1client{\n\t\t\tupstreams: &upstreamsClient{\n\t\t\t\tbase: base.NewConsulStorageClient(rootPath+\"/upstreams\", client),\n\t\t\t},\n\t\t\tvirtualHosts: &virtualHostsClient{\n\t\t\t\tbase: base.NewConsulStorageClient(rootPath+\"/virtualhosts\", client),\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func NewStorage(cfg *Config) *Storage {\n\tif cfg.Engine == nil {\n\t\tlog.Fatalln(\"Cannot create a ops proxy without an engine\")\n\t}\n\tif cfg.App == nil {\n\t\tnrConfig := newrelic.NewConfig(\"widget\", \"\")\n\t\tnrConfig.Enabled = false\n\t\tapp, err := newrelic.NewApplication(nrConfig)\n\t\tif err != nil {\n\t\t\tlogrus.WithField(\"error\", err).Fatalln(\"could not create dummy new relic app\")\n\t\t}\n\t\tcfg.App = app\n\t}\n\treturn &Storage{engine: cfg.Engine, newrelic: cfg.App}\n}", "func NewStorage(lgr *log.Logger) *Storage {\n\treturn &Storage{\n\t\tdataStack: graph.NewStack(),\n\t\tdataStorage: NewKVStorage(),\n\t\tlgr: lgr,\n\t}\n}", "func NewStorage() (*Storage, error) {\n\tvar err error\n\ts := new(Storage)\n\tcwd, _ := os.Getwd()\n\ts.db, err = scribble.New(cwd+viper.GetString(\"storage.basedir\"), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s, nil\n}", "func NewStorage() *Storage {\n\treturn &Storage{}\n}", "func NewStorage(t mockConstructorTestingTNewStorage) *Storage {\n\tmock := &Storage{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewStorage(t mockConstructorTestingTNewStorage) *Storage {\n\tmock := &Storage{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewStorage(client *clientv3.Client, codec codec.Codec) storage.Store {\n\treturn &Storage{\n\t\tclient: client,\n\t\tcodec: codec,\n\t}\n}", "func NewStorage(opts ...StorageOption) *Storage {\n\ts := &Storage{\n\t\tcr: config.DefaultManager,\n\t\tmu: sync.RWMutex{},\n\t}\n\tfor _, opt := range opts {\n\t\tif opt != nil {\n\t\t\topt(s)\n\t\t}\n\t}\n\treturn s\n}", "func New(path string) (Storage, error) {\n\tif path == \":memory:\" {\n\t\treturn newMemoryStorage(), nil\n\t}\n\n\treturn newFileStorage(path)\n}", "func NewReader(dev *Device, readerName string) *Reader {\n\tr := &Reader{\n\t\tdev: dev,\n\t\treaderName: readerName,\n\t}\n\treturn r\n}", "func NewStorage(mds MetadataStore, tr track.Tracker, chunks *chunk.Storage, opts ...StorageOption) *Storage {\n\ts := &Storage{\n\t\tstore: mds,\n\t\ttracker: tr,\n\t\tchunks: chunks,\n\t\tidxCache: index.NewCache(chunks, DefaultIndexCacheSize),\n\t\tmemThreshold: DefaultMemoryThreshold,\n\t\tshardConfig: &index.ShardConfig{\n\t\t\tNumFiles: index.DefaultShardNumThreshold,\n\t\t\tSizeBytes: index.DefaultShardSizeThreshold,\n\t\t},\n\t\tcompactionConfig: &CompactionConfig{\n\t\t\tLevelFactor: DefaultCompactionLevelFactor,\n\t\t},\n\t\tfilesetSem: semaphore.NewWeighted(math.MaxInt64),\n\t\tprefetchLimit: DefaultPrefetchLimit,\n\t}\n\tfor _, opt := range opts {\n\t\topt(s)\n\t}\n\tif s.compactionConfig.LevelFactor < 1 {\n\t\tpanic(\"level factor cannot be < 1\")\n\t}\n\treturn s\n}", "func NewStorage() *Storage {\n\tstorageHandler = new(Storage)\n\n\treturn storageHandler\n}", "func FakeNewStorage() *fakeStorage {\n\treturn &fakeStorage{}\n}", "func newFileFromReader(upload *Upload, name string, reader io.Reader) *File {\n\treturn newFileFromReadCloser(upload, name, io.NopCloser(reader))\n}", "func NewStorage(first, second storage.Storage) (*Storage, error) {\n\treturn &Storage{first, second}, nil\n}", "func NewReader(store EntryStore) Reader {\n\treturn defaultReader{\n\t\tstore: store,\n\t}\n}", "func New(timeout time.Duration) (*Storage, error) {\n\tif timeout <= 0 {\n\t\treturn nil, errors.New(\"timeout must be positive\")\n\t}\n\treturn &Storage{\n\t\tdata: make(map[string]interface{}),\n\t\tcancelFuncs: make(map[string]context.CancelFunc),\n\t\ttimeout: timeout,\n\t}, nil\n}", "func New(o *Options) *Storage {\n\ts := &Storage{}\n\tif o.GraphiteAddress != \"\" {\n\t\tc := graphite.NewClient(\n\t\t\to.GraphiteAddress, o.GraphiteTransport,\n\t\t\to.StorageTimeout, o.GraphitePrefix)\n\t\ts.queues = append(s.queues, NewStorageQueueManager(c, defaultConfig))\n\t}\n\tif o.OpentsdbURL != \"\" {\n\t\tc := opentsdb.NewClient(o.OpentsdbURL, o.StorageTimeout)\n\t\ts.queues = append(s.queues, NewStorageQueueManager(c, defaultConfig))\n\t}\n\tif o.InfluxdbURL != nil {\n\t\tconf := influx.Config{\n\t\t\tURL: *o.InfluxdbURL,\n\t\t\tUsername: o.InfluxdbUsername,\n\t\t\tPassword: o.InfluxdbPassword,\n\t\t\tTimeout: o.StorageTimeout,\n\t\t}\n\t\tc := influxdb.NewClient(conf, o.InfluxdbDatabase, o.InfluxdbRetentionPolicy)\n\t\tprometheus.MustRegister(c)\n\t\ts.queues = append(s.queues, NewStorageQueueManager(c, defaultConfig))\n\t}\n\tif o.GenericURL != \"\" {\n\t\theaders := http.Header{}\n\t\tif o.GenericHeaderName != \"\" {\n\t\t\theaders.Add(o.GenericHeaderName, o.GenericHeaderValue)\n\t\t}\n\t\tc := generic.NewClient(o.GenericURL, headers, o.StorageTimeout)\n\t\ts.queues = append(s.queues, NewStorageQueueManager(c, defaultConfig))\n\t}\n\tif len(s.queues) == 0 {\n\t\treturn nil\n\t}\n\treturn s\n}", "func New(configs ...Configurator) (*Storage, error) {\n\tinstance := &Storage{}\n\tfor _, configure := range configs {\n\t\tif err := configure(instance); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn instance, nil\n}", "func New(ctx context.Context, storageConfig *config.StorageConfig) *Storage {\n\tctx, cancel := context.WithCancel(ctx)\n\treturn &Storage{\n\t\tctx: ctx,\n\t\tconfig: storageConfig,\n\t\tcancel: cancel,\n\t\tstate: Created,\n\t\tlog: logger.GetLogger(),\n\t}\n}", "func NewReader(r io.Reader, rd int) (*Reader, error) {\n\tbg, err := bgzf.NewReader(r, rd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th, _ := sam.NewHeader(nil, nil)\n\tbr := &Reader{\n\t\tr: bg,\n\t\th: h,\n\n\t\treferences: int32(len(h.Refs())),\n\t}\n\terr = br.h.DecodeBinary(br.r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbr.lastChunk.End = br.r.LastChunk().End\n\treturn br, nil\n}", "func NewStorage(path string) (*Storage, error) {\n\tdb, err := bolt.Open(path, 0600, nil)\n\tif err != nil {\n\t\treturn nil, common.Error(common.InitFailed, err)\n\t}\n\tstore := &Storage{db}\n\treturn store, nil\n}", "func NewReader(sourceKey, filepath, dbFilename string) *Reader {\n\treturn &Reader{\n\t\tSourceKey: sourceKey,\n\t\tPath: filepath,\n\t\tFilename: dbFilename,\n\t}\n}", "func (s *MockQueueService) NewReader(topic string) queue.Reader {\n\treturn &mockReader{\n\t\tService: s,\n\t\tTopic: topic,\n\t}\n}", "func NewReader() Reader {\n\treturn &readerImpl{}\n}", "func NewStorage(size int, hash HashFunc) *Storage {\n\ts := &Storage{n: size, h: hash}\n\ts.Reset()\n\treturn s\n}", "func NewReader(r io.Reader) *Reader {\n\treturn &Reader{\n\t\tMaxObjectsLimit: DefaultMaxObjectsLimit,\n\n\t\tr: &trackingReader{r: r},\n\t\toffsets: make(map[int64]core.Hash, 0),\n\t}\n}", "func NewStorage(db *sql.DB) *Storage {\n\treturn &Storage{db}\n}", "func NewStorage(cat *repository.MongoCatalog, cache *rediscache.Redis) *Storage {\n\treturn &Storage{\n\t\tcat,\n\t\tcache,\n\t}\n}", "func NewStorage(typ string) (*Storage, error) {\n\tswitch typ {\n\tcase \"\", \"postgres\":\n\t\tpostgresStore, err := NewPostgresStorage()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tinst := Storage(postgresStore)\n\t\treturn &inst, nil\n\tcase \"inmem\":\n\t\tinst := Storage(new(InMemStorage))\n\t\treturn &inst, nil\n\tdefault:\n\t\treturn nil, errors.New(\"Invalid storage type provided.\")\n\t}\n}", "func NewStorage() (s *Storage, err error) {\n\ts = new(Storage)\n\n\terr = s.initDB()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tc := make(chan time.Time, 10)\n\ts.C = c\n\ts.c = c\n\n\treturn\n}", "func NewMockReaderStorage(ctrl *gomock.Controller) *MockReaderStorage {\n\tmock := &MockReaderStorage{ctrl: ctrl}\n\tmock.recorder = &MockReaderStorageMockRecorder{mock}\n\treturn mock\n}", "func New(ioReader io.Reader) (r KafkalogReader, err error) {\n\tr = &reader{\n\t\tioReader: ioReader,\n\t}\n\treturn\n}", "func New(config *config.ConfYaml) *Storage {\n\treturn &Storage{\n\t\tconfig: config,\n\t}\n}", "func NewReader(path string) (*Reader, error) {\n\tvar err error\n\tr := new(Reader)\n\n\tr.v, err = ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r, nil\n}", "func New() *Storage {\n\treturn &Storage{\n\t\tstat: &statApp{},\n\t}\n}", "func (c *S3StorageConfig) NewStorage() (Storage, error) {\n\treturn NewS3Storage(c, nil)\n}", "func NewStorage() Storage {\n\tmemtable := memtable.NewRollingMemtable()\n\n\tstorage := Storage{memtable: memtable, wal: wal.Wal{Memtable: &memtable}}\n\tstorage.wal.Start()\n\treturn storage\n}", "func NewReader(r UnderlyingReader) Reader {\n\treturn Reader{\n\t\trequest: r,\n\t}\n}", "func newFileFromReadCloser(upload *Upload, name string, reader io.ReadCloser) *File {\n\tfile := &File{}\n\tfile.upload = upload\n\tfile.Name = name\n\tfile.reader = reader\n\treturn file\n}", "func newStorageLayer(disk string) (storage StorageAPI, err error) {\n\tif !strings.ContainsRune(disk, ':') || filepath.VolumeName(disk) != \"\" {\n\t\t// Initialize filesystem storage API.\n\t\treturn newPosix(disk)\n\t}\n\t// Initialize rpc client storage API.\n\treturn newRPCClient(disk)\n}", "func newTestModel() *Storage {\n\treturn &Storage{}\n}", "func NewStorage(vol string) *Storage {\n\tloc := vol\n\n\tif vol[len(vol)-1:] != \"/\" {\n\t\tloc = fmt.Sprintf(\"%s/\", vol)\n\t}\n\n\treturn &Storage{\n\t\tloc,\n\t}\n}", "func New(ctx context.Context, bucket string) (fs.Interface, error) {\n\tclient, err := storage.NewClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &gcs{\n\t\tbucket: client.Bucket(bucket),\n\t}, nil\n}", "func NewReader(ctx context.Context, client pb.RoutedJournalClient, req pb.ReadRequest) *Reader {\n\tvar r = &Reader{\n\t\tRequest: req,\n\t\tctx: ctx,\n\t\tclient: client,\n\t}\n\treturn r\n}", "func NewReader(clients []drive.Client, t *time.Ticker) (*Reader, error) {\n\tc := &Reader{\n\t\tclients: clients,\n\t\tnodes: map[string]Node{\n\t\t\t\"/\": {\n\t\t\t\tFilename: \"/\",\n\t\t\t\tChildren: make(map[string]bool),\n\t\t\t}},\n\t}\n\tif err := c.refresh(); err != nil {\n\t\treturn nil, fmt.Errorf(\"initializing cache: %s\", err)\n\t}\n\tgo c.periodicRefresh(t)\n\treturn c, nil\n}", "func NewStorage() *Storage {\n\treturn &Storage{\n\t\tkv: btree.NewKVTree(),\n\t}\n}", "func NewReader(r io.Reader) *Reader {\n\treturn s2.NewReader(r, s2.ReaderMaxBlockSize(maxBlockSize))\n}", "func newOfferStorage() *offerStorage {\n\treturn &offerStorage{\n\t\tcidMap: make(map[string]*digestOffer),\n\t\tlock: sync.RWMutex{},\n\t}\n}", "func (r *Reference) NewReader(ctx context.Context, path iosrc.URI, readspan nano.Span) (*Reader, error) {\n\tsegspan := r.Span()\n\tspan := segspan.Intersect(readspan)\n\tobjectPath := r.RowObjectPath(path)\n\tif span.Dur == 0 {\n\t\treturn nil, fmt.Errorf(\"segment reader: segment does not intersect provided span: %s chunkspan %v readspan %v\", path, segspan, readspan)\n\t}\n\treader, err := iosrc.NewReader(ctx, objectPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsr := &Reader{\n\t\tReader: reader,\n\t\tCloser: reader,\n\t\tTotalBytes: r.Size,\n\t\tReadBytes: r.Size,\n\t}\n\tif span == segspan {\n\t\treturn sr, nil\n\t}\n\ts, err := seekindex.Open(ctx, r.SeekObjectPath(path))\n\tif err != nil {\n\t\tif zqe.IsNotFound(err) {\n\t\t\treturn sr, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer s.Close()\n\trg, err := s.Lookup(ctx, span)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trg = rg.TrimEnd(sr.TotalBytes)\n\tsr.ReadBytes = rg.Size()\n\tsr.Reader, err = rg.LimitReader(reader)\n\treturn sr, err\n}", "func NewStorageAPI(g func(ctx context.Context) (*grpc.ClientConn, error)) *StorageAPI {\n\treturn &StorageAPI{g}\n}", "func newStorage(\n\tmachineName,\n\ttablename,\n\tdatabase,\n\tretentionPolicy,\n\tusername,\n\tpassword,\n\tinfluxdbHost string,\n\tisSecure bool,\n\tbufferDuration time.Duration,\n) (*influxdbStorage, error) {\n\turl := &url.URL{\n\t\tScheme: \"http\",\n\t\tHost: influxdbHost,\n\t}\n\tif isSecure {\n\t\turl.Scheme = \"https\"\n\t}\n\n\tconfig := &influxdb.Config{\n\t\tURL: *url,\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tUserAgent: fmt.Sprintf(\"%v/%v\", \"cAdvisor\", version.Info[\"version\"]),\n\t}\n\tclient, err := influxdb.NewClient(*config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret := &influxdbStorage{\n\t\tclient: client,\n\t\tmachineName: machineName,\n\t\tdatabase: database,\n\t\tretentionPolicy: retentionPolicy,\n\t\tbufferDuration: bufferDuration,\n\t\tlastWrite: time.Now(),\n\t\tpoints: make([]*influxdb.Point, 0),\n\t}\n\tret.readyToFlush = ret.defaultReadyToFlush\n\treturn ret, nil\n}", "func NewStorage(DBName string) *Storage {\n\ts := &Storage{}\n\tmutex.Lock()\n\tif db == nil {\n\t\ts.checkMakeDatabase(DBName)\n\t\tdb = s.db\n\t\tif chQueryArgs == nil {\n\t\t\tchQueryArgs = make(chan *queryArgs, 100)\n\t\t\tgo executeStatements()\n\t\t}\n\n\t\ts.setupTables()\n\t}\n\tmutex.Unlock()\n\treturn s\n}", "func NewStorage(db *gorm.DB) *Storage {\n\treturn &Storage{\n\t\tdb: db,\n\t}\n}", "func NewStorage(cfg *configuration.Storage, timeout time.Duration) (*Storage, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\n\tclient, err := mongo.Connect(ctx, options.Client().ApplyURI(cfg.URI))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"connect to mongo %s: %s\", cfg.URI, err)\n\t}\n\n\terr = client.Ping(ctx, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ping mongo %s: %s\", cfg.URI, err)\n\t}\n\n\treturn &Storage{\n\t\tclient: client,\n\t\tcollection: client.Database(cfg.DBName).Collection(cfg.CollectionName),\n\t}, nil\n}", "func NewStorage(s map[string]interface{}) (Storage, error) {\n\tstype, ok := s[\"Type\"].(string)\n\tif !ok || stype == \"\" {\n\t\treturn nil, errors.New(\"Template do not have Storage type\")\n\t}\n\n\tswitch stype {\n\tcase \"Local\":\n\t\treturn newStorageLocal(s), nil\n\tcase \"S3\":\n\t\treturn newStorageS3(s)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unexecepted Storage type: %v\", stype)\n\t}\n}", "func NewReader(rd io.Reader) *Reader {\n\treturn &Reader{rd: rd}\n}", "func NewReader(cfg Config, plannerCfg PlannerConfig) (*Reader, error) {\n\tplanner, err := NewPlanner(plannerCfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tscanner, err := storage.NewChunkScanner(cfg.StorageType, cfg.StorageConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tid := fmt.Sprintf(\"%d_%d\", plannerCfg.FirstShard, plannerCfg.LastShard)\n\n\t// Default to one worker if none is set\n\tif cfg.NumWorkers < 1 {\n\t\tcfg.NumWorkers = 1\n\t}\n\n\treturn &Reader{\n\t\tcfg: cfg,\n\t\tid: id,\n\t\tplanner: planner,\n\t\tscanner: scanner,\n\t\tscanRequestsChan: make(chan chunk.ScanRequest),\n\t\tquit: make(chan struct{}),\n\t}, nil\n}", "func newReader(filePath string) (*Reader, func(), error) {\n\tf, err := os.Open(filePath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfz, err := gzip.NewReader(f)\n\tif err != nil {\n\t\tf.Close()\n\t\treturn nil, nil, err\n\t}\n\n\tcleanup := func() {\n\t\tf.Close()\n\t\tfz.Close()\n\t}\n\treturn &Reader{r: fz}, cleanup, nil\n}", "func NewReader(r io.Reader) *Reader {\n\treturn &Reader{\n\t\tr: bufio.NewReader(r),\n\t\tcurrentSection: \"default\",\n\t}\n}", "func NewReader(s string) *Reader { return &Reader{s, 0, -1} }", "func makeStorage(name string) ds.Storage {\n\tswitch name {\n\tcase \"skiplist\":\n\t\treturn ds.NewSkipList()\n\tcase \"dict\":\n\t\treturn ds.NewDict()\n\tcase \"b-tree\":\n\t\treturn ds.InitBTree(10)\n\t}\n\treturn ds.NewDict()\n}", "func New(db *badger.DB) *Storage {\n\treturn &Storage{DB: db}\n}", "func NewReader(r io.ReaderAt, size int64) (*Reader, error) {\n\tszr := new(Reader)\n\tif err := szr.init(r, size, false); err != nil {\n\t\treturn nil, err\n\t}\n\treturn szr, nil\n}", "func NewReader(reader io.Reader) (*Reader, error) {\n\treturn NewReaderMode(reader, DefaultMode)\n}", "func NewStorage() SafeMap {\n\tsm := make(safeMap)\n\tgo sm.run()\n\treturn sm\n}", "func NewGenericStorage(rawStorage RawStorage, serializer serializer.Serializer) Storage {\n\treturn &GenericStorage{rawStorage, serializer, patchutil.NewPatcher(serializer)}\n}", "func New(r io.Reader) *Reader {\n\treturn &Reader{reader: r}\n}", "func newStorage(\n\tmachineName,\n\ttablename,\n\tdatabase,\n\tusername,\n\tpassword,\n\tinfluxdbHost string,\n\tisSecure bool,\n\tbufferDuration time.Duration,\n) (*influxdbStorage, error) {\n\tconfig := &influxdb.ClientConfig{\n\t\tHost: influxdbHost,\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tDatabase: database,\n\t\tIsSecure: isSecure,\n\t}\n\tclient, err := influxdb.NewClient(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// TODO(monnand): With go 1.3, we cannot compress data now.\n\tclient.DisableCompression()\n\n\tret := &influxdbStorage{\n\t\tclient: client,\n\t\tmachineName: machineName,\n\t\ttableName: tablename,\n\t\tbufferDuration: bufferDuration,\n\t\tlastWrite: time.Now(),\n\t\tseries: make([]*influxdb.Series, 0),\n\t}\n\tret.readyToFlush = ret.defaultReadyToFlush\n\treturn ret, nil\n}", "func NewStorage(address common.Address, backend bind.ContractBackend) (*Storage, error) {\n\tcontract, err := bindStorage(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Storage{StorageCaller: StorageCaller{contract: contract}, StorageTransactor: StorageTransactor{contract: contract}, StorageFilterer: StorageFilterer{contract: contract}}, nil\n}", "func NewStorage(address common.Address, backend bind.ContractBackend) (*Storage, error) {\n\tcontract, err := bindStorage(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Storage{StorageCaller: StorageCaller{contract: contract}, StorageTransactor: StorageTransactor{contract: contract}, StorageFilterer: StorageFilterer{contract: contract}}, nil\n}", "func NewStorage(address common.Address, backend bind.ContractBackend) (*Storage, error) {\n\tcontract, err := bindStorage(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Storage{StorageCaller: StorageCaller{contract: contract}, StorageTransactor: StorageTransactor{contract: contract}, StorageFilterer: StorageFilterer{contract: contract}}, nil\n}", "func New(b string) *Storage {\n\treturn &Storage{new(atomic.Value), []byte(b), new(sync.RWMutex)}\n}", "func New(b string) *Storage {\n\treturn &Storage{new(atomic.Value), []byte(b), new(sync.RWMutex)}\n}", "func NewReader(base io.Reader) *Reader {\n\tbr, ok := base.(byteReader)\n\tif !ok {\n\t\tbr = simpleByteReader{Reader: base}\n\t}\n\treturn &Reader{base: br}\n}", "func NewReader(r io.ReaderAt, size int64) (*File, error) {\n\tf, err := odf.NewReader(r, size)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newFile(f)\n}", "func NewReader(r io.Reader) *Reader {\n\treturn NewReaderConfig(r, ReaderConfig{})\n\n}", "func NewReader(rd io.Reader) io.ReadCloser {\n\tif rd == nil {\n\t\treturn nil\n\t}\n\n\tret, err := NewReaderSize(rd, DefaultBuffers, DefaultBufferSize)\n\n\t// Should not be possible to trigger from other packages.\n\tif err != nil {\n\t\tpanic(\"unexpected error:\" + err.Error())\n\t}\n\treturn ret\n}", "func Open(backend Backend) (*Storage, error) {\n\ts := new(Storage)\n\ts.backend = backend\n\terr := s.readHeader()\n\tif err != nil {\n\t\tlog.Errorf(\"error reading storage header: %s\", err)\n\t\treturn nil, err\n\t}\n\treturn s, nil\n}", "func NewStorage() *Storage {\n\tConfig := util.NewConfig()\n\tses, err := mgo.Dial(string(Config.Mongo.Addr))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn &Storage{database: Config.Mongo.DB, table: Config.Mongo.Table, session: ses}\n}", "func newStorageImpl(db orm.DB) *storageImpl {\n\treturn &storageImpl{\n\t\tdb: db,\n\t}\n}" ]
[ "0.7486464", "0.6991443", "0.6769339", "0.6694343", "0.6630447", "0.6618453", "0.65807104", "0.65728444", "0.65710235", "0.6547296", "0.6508507", "0.6461586", "0.64510155", "0.6447975", "0.64442575", "0.643639", "0.64106154", "0.64028615", "0.64025176", "0.6393443", "0.63858974", "0.63664305", "0.63573116", "0.6268965", "0.6268965", "0.62672997", "0.6261094", "0.62588423", "0.62455034", "0.62413496", "0.6234098", "0.6219892", "0.62139815", "0.62067163", "0.62032855", "0.6193568", "0.619092", "0.6188907", "0.6151001", "0.61491066", "0.61483735", "0.6146575", "0.6143666", "0.6133681", "0.613246", "0.6127565", "0.6117868", "0.6116097", "0.6115075", "0.6087051", "0.60844755", "0.6077219", "0.6069329", "0.6059845", "0.6058852", "0.6040307", "0.6037868", "0.60370004", "0.6027065", "0.60243344", "0.60218644", "0.60163265", "0.6016274", "0.60111576", "0.60083246", "0.60046273", "0.60022444", "0.59985024", "0.5997143", "0.599365", "0.5992232", "0.59888196", "0.59869707", "0.59835976", "0.5976019", "0.5971808", "0.59702164", "0.5965017", "0.59582406", "0.5953995", "0.59468544", "0.59449583", "0.59434426", "0.59276044", "0.59089047", "0.5903224", "0.5901446", "0.5891993", "0.5889064", "0.5889064", "0.58883977", "0.58864605", "0.58864605", "0.5884565", "0.5881899", "0.5878274", "0.5862856", "0.5862103", "0.5860604", "0.5859196" ]
0.77779114
0
ListBlobsModifiedBetween Get list of blobs modified between two specified timestamps
func (sr *StorageReader) ListBlobsModifiedBetween(startTime, endTime int64) *[]BlobDetails { logp.Debug("storage_reader", "Listing blobs modified between %v and %v.", startTime, endTime) ctx := context.Background() var blobItems []BlobDetails i := 0 for marker := (azblob.Marker{}); marker.NotDone(); { listBlob, err := sr.container.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{}) marker = listBlob.NextMarker if err != nil { logp.Error(err) continue } for _, blobInfo := range listBlob.Segment.BlobItems { i++ lastModified := blobInfo.Properties.LastModified.UTC().Unix() if lastModified > startTime && lastModified < endTime { length := *blobInfo.Properties.ContentLength if length == int64(0) { continue } blobItems = append(blobItems, NewBlobDetails(blobInfo.Name, string(blobInfo.Properties.Etag), length, lastModified)) } } } logp.Info("Found %v blobs in container. Found %v blobs modified between %v and %v.", i, len(blobItems), startTime, endTime, ) return &blobItems }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (w *Wallet) ListSinceBlock(start, end, syncHeight int32) ([]btcjson.ListTransactionsResult, er.R) {\n\ttxList := []btcjson.ListTransactionsResult{}\n\terr := walletdb.View(w.db, func(tx walletdb.ReadTx) er.R {\n\t\ttxmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)\n\n\t\trangeFn := func(details []wtxmgr.TxDetails) (bool, er.R) {\n\t\t\tfor _, detail := range details {\n\t\t\t\tjsonResults := listTransactions(tx, &detail,\n\t\t\t\t\tw.Manager, syncHeight, w.chainParams)\n\t\t\t\ttxList = append(txList, jsonResults...)\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn w.TxStore.RangeTransactions(txmgrNs, start, end, rangeFn)\n\t})\n\treturn txList, err\n}", "func (bbcblr BlockBlobsCommitBlockListResponse) LastModified() time.Time {\n\ts := bbcblr.rawResponse.Header.Get(\"Last-Modified\")\n\tif s == \"\" {\n\t\treturn time.Time{}\n\t}\n\tt, err := time.Parse(time.RFC1123, s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn t\n}", "func (a *App) ListBlobs(w http.ResponseWriter, r *http.Request) {\n\tresource := a.eventID\n\tblobs, err := a.Blob.List(resource)\n\tif err != nil {\n\t\trespondWithError(err, http.StatusNotFound, w)\n\t\treturn\n\t}\n\tw.Header().Set(types.ContentType, types.ContentTypeApplicationJSON)\n\terr = json.NewEncoder(w).Encode(blobs)\n\tif err != nil {\n\t\trespondWithError(err, http.StatusInternalServerError, w)\n\t\treturn\n\t}\n}", "func ReadTimestamps(bkt *bolt.Bucket, created, updated *time.Time) error {\n\tfor _, f := range []struct {\n\t\tb []byte\n\t\tt *time.Time\n\t}{\n\t\t{bucketKeyCreatedAt, created},\n\t\t{bucketKeyUpdatedAt, updated},\n\t} {\n\t\tv := bkt.Get(f.b)\n\t\tif v != nil {\n\t\t\tif err := f.t.UnmarshalBinary(v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (b *BucketManager) ListFilesInTimeSlot(property string, ts string) ([]string, error) {\n\tsvc := s3.New(b.session)\n\tlogFiles := regexp.MustCompile(`([a-zA-Z0-9]+)\\.([0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]{2})\\.([a-z0-9]+)\\.gz`)\n\tresp, _ := svc.ListObjects(&s3.ListObjectsInput{\n\t\tBucket: &b.bucket,\n\t\tPrefix: aws.String(fmt.Sprintf(\"%s.%s.\", property, ts)),\n\t})\n\tif len(resp.Contents) == 0 {\n\t\treturn nil, errors.New(\"no log files matching the requested parameters in the bucket\")\n\t}\n\tvar files []string\n\tfor _, key := range resp.Contents {\n\t\tif logFiles.MatchString(*key.Key) {\n\t\t\tfiles = append(files, *key.Key)\n\t\t}\n\t}\n\treturn files, nil\n}", "func (ti *TimeIndex) Notify(blobs ...*blob.Blob) {\n ti.lock.Lock()\n defer ti.lock.Unlock()\n\n for _, b := range blobs {\n tp := b.Type()\n if tp == blob.NoType || tp == blob.Object {\n continue\n }\n\n t, err := b.Timestamp()\n if err != nil {\n continue\n }\n\n ti.entries = append(ti.entries, &timeEntry{tm: t, ref: b.Ref()})\n }\n}", "func (a *Account) ListSinceBlock(since, curBlockHeight int32,\n\tminconf int) ([]btcjson.ListTransactionsResult, error) {\n\n\ttxList := []btcjson.ListTransactionsResult{}\n\tfor _, txRecord := range a.TxStore.Records() {\n\t\t// Transaction records must only be considered if they occur\n\t\t// after the block height since.\n\t\tif since != -1 && txRecord.BlockHeight <= since {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Transactions that have not met minconf confirmations are to\n\t\t// be ignored.\n\t\tif !txRecord.Confirmed(minconf, curBlockHeight) {\n\t\t\tcontinue\n\t\t}\n\n\t\tjsonResults, err := txRecord.ToJSON(a.name, curBlockHeight,\n\t\t\ta.KeyStore.Net())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttxList = append(txList, jsonResults...)\n\t}\n\n\treturn txList, nil\n}", "func (a *Account) ListSinceBlock(since, curBlockHeight int32, minconf int) ([]map[string]interface{}, error) {\n\tvar txInfoList []map[string]interface{}\n\tfor _, txRecord := range a.TxStore.SortedRecords() {\n\t\t// check block number.\n\t\tif since != -1 && txRecord.Height() <= since {\n\t\t\tcontinue\n\t\t}\n\n\t\ttxInfoList = append(txInfoList,\n\t\t\ttxRecord.TxInfo(a.name, curBlockHeight, a.Net())...)\n\t}\n\n\treturn txInfoList, nil\n}", "func (am *AccountManager) ListSinceBlock(since, curBlockHeight int32,\n\tminconf int) ([]btcjson.ListTransactionsResult, error) {\n\n\t// Create and fill a map of account names and their balances.\n\tvar txList []btcjson.ListTransactionsResult\n\tfor _, a := range am.AllAccounts() {\n\t\ttxTmp, err := a.ListSinceBlock(since, curBlockHeight, minconf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttxList = append(txList, txTmp...)\n\t}\n\treturn txList, nil\n}", "func (fmd *FakeMysqlDaemon) ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (*mysqlctlpb.ReadBinlogFilesTimestampsResponse, error) {\n\treturn nil, nil\n}", "func BlobTime(v interface{}) (t time.Time) {\n\tvar (\n\t\tf *os.File\n\t\tfn string\n\t\terr error\n\t\tok bool\n\t)\n\tif fn, ok = v.(string); ok {\n\t\tif f, err = os.Open(fn); err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer f.Close()\n\t} else if f, ok = v.(*os.File); !ok {\n\t\treturn\n\t}\n\tf.Seek(BlobTimeOff, os.SEEK_SET)\n\t(NBOReader{f}).ReadNBO(&t)\n\treturn\n}", "func BlobTime(v interface{}) (t time.Time) {\n\tvar (\n\t\tf *os.File\n\t\tfn string\n\t\terr error\n\t\tok bool\n\t)\n\tif fn, ok = v.(string); ok {\n\t\tif f, err = os.Open(fn); err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer f.Close()\n\t} else if f, ok = v.(*os.File); !ok {\n\t\treturn\n\t}\n\tf.Seek(BlobTimeOff, os.SEEK_SET)\n\t(NBOReader{f}).ReadNBO(&t)\n\treturn\n}", "func (t *Tag) LastModified() (lastModified time.Time) {\n\tfor _, history := range t.History {\n\t\tif history.Created.After(lastModified) {\n\t\t\tlastModified = history.Created\n\t\t}\n\t}\n\treturn lastModified\n}", "func getRecentlyModified(match []string, modified int, verbose bool) []string {\n\tvar matches []string // slice to hold the matching file paths\n\tvar paths []string // slice to hold the file paths\n\tvar modTimes []time.Time // slice to hold the modification times of the files\n\n\t// Loop through the provided slice of file names\n\tfor _, file := range match {\n\t\t// Get the file info and handle any errors\n\t\tinfo, err := os.Stat(file)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\t// Append the file path and modification time to the corresponding slices\n\t\tpaths = append(paths, file)\n\t\tmodTimes = append(modTimes, info.ModTime())\n\t}\n\n\t// Sort the slices by modification time\n\tsort.SliceStable(paths, func(i, j int) bool {\n\t\treturn modTimes[i].After(modTimes[j])\n\t})\n\n\t// Get the current time\n\tnow := time.Now()\n\n\t// Loop through the sorted slice of file paths\n\tfor i, path := range paths {\n\t\t// Check if the file was modified within the last modified hours\n\t\tif now.Sub(modTimes[i]) < (time.Duration(modified) * time.Hour) {\n\t\t\t// If it was, append the file path to the matches slice\n\t\t\tmatches = append(matches, path)\n\t\t} else {\n\t\t\tif verbose {\n\t\t\t\tlog.Printf(\"[IGNORING] Last modified time: %s older than configured timeframe (%d hours): %s.\", modTimes[i], modified, path)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Return the slice of matching file paths\n\treturn matches\n}", "func SplitChangesFetchRaw(since int64) ([]byte, error) {\n\n\tvar bufferQuery bytes.Buffer\n\tbufferQuery.WriteString(\"/splitChanges\")\n\n\tif since >= -1 {\n\t\tbufferQuery.WriteString(\"?since=\")\n\t\tbufferQuery.WriteString(strconv.FormatInt(since, 10))\n\t}\n\n\tdata, err := sdkFetch(bufferQuery.String())\n\tif err != nil {\n\t\tlog.Error.Println(\"Error fetching split changes \", err)\n\t\treturn nil, err\n\t}\n\n\treturn data, nil\n}", "func (s *TaskService) ListWithLastModifiedDate(modifiedDate time.Time) ([]Task, *http.Response, error) {\n\tfilterDateType := \"DATE_TASK_MODIFIED\"\n\tfilterDateOperator := 3 // Is Later Than Or On\n\tmodifiedDateStr := modifiedDate.Format(\"2006-01-02\")\n\tresObj := new(TaskResponse)\n\tresp, err := s.sling.New().\n\t\tQueryStruct(CreateFunctionParam(\"gettasks\")).\n\t\tQueryStruct(&GetTasksParam{\n\t\t\tFilterFirstDate: &filterDateType,\n\t\t\tFilterFirstDateOperator: &filterDateOperator,\n\t\t\tFilterFirstDateValue: &modifiedDateStr,\n\t\t}).\n\t\tReceiveSuccess(resObj)\n\tif resObj != nil && len(resObj.Results) > 0 {\n\t\tif resObj.Results[0].ErrorDesc != nil {\n\t\t\treturn nil, resp, Error{*resObj.Results[0].ErrorDesc}\n\t\t}\n\t\treturn *(&resObj.Results), resp, err\n\t}\n\treturn make([]Task, 0), resp, err\n}", "func historyBetween(since string, until string) ([]gitobject.Commit, error) {\n\tall, err := history()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Filter the commits\n\tvar between []gitobject.Commit\n\tsawSince := false\n\tfor _, c := range all {\n\t\tif sawSince {\n\t\t\tbetween = append(between, c)\n\t\t}\n\n\t\tif c.Hash.String() == since {\n\t\t\tsawSince = true\n\t\t}\n\n\t\tif c.Hash.String() == until {\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\treturn between, nil\n}", "func (db *pg) GetLogsBetweenInterval(ctx context.Context, start time.Time, end time.Time, userID string) ([]*Log, error) {\n\tconst stmt = `SELECT * FROM audit.\"Logs\" WHERE \"UserId\" = $1 AND \"Timestamp\" >= $2 AND \"Timestamp\" <= $3;`\n\trows, err := db.QueryContext(ctx, stmt, userID, start, end)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlst := make([]*Log, 0, 100)\n\tfor rows.Next() {\n\t\tl := Log{}\n\t\tif err := rows.Scan(&l.Timestamp, &l.UserID, &l.Action); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlst = append(lst, &l)\n\t}\n\treturn lst, nil\n}", "func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) {\n\tifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()\n\treturn pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot,\n\t\tnil, // Get managed disk diff\n\t\thttpRange{offset: offset, count: count}.pointers(),\n\t\tac.LeaseAccessConditions.pointers(),\n\t\tifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,\n\t\tnil, // Blob ifTags\n\t\tnil)\n}", "func RecentBlockTargets(db *types.DB, size, length int) []string {\n\tif length == 0 {\n\t\tlength = db.Length\n\t}\n\n\tstart := length - size\n\tif start < 0 {\n\t\tstart = 0\n\t}\n\n\tvar ts []string\n\tfor index := start; index < length; index++ {\n\t\t// if not index in storage:\n\t\t// storage[index] = db_get(index, db)[\"target\"]\n\t\t_, ok := targets[index]\n\t\tif !ok {\n\t\t\ttargets[index] = db.GetBlock(index).Target\n\t\t}\n\n\t\tts = append(ts, targets[index])\n\t}\n\n\treturn ts\n}", "func (c *containerdCAS) ListBlobs() ([]string, error) {\n\tinfos, err := getContentInfoList()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ListBlobs: Exception while getting blob list. %s\", err.Error())\n\t}\n\tblobDigests := make([]string, 0)\n\tfor _, info := range infos {\n\t\tblobDigests = append(blobDigests, info.Digest.String())\n\t}\n\treturn blobDigests, nil\n}", "func (sto *overlayStorage) StatBlobs(ctx context.Context, blobs []blob.Ref, f func(blob.SizedRef) error) error {\n\texists := make([]blob.Ref, 0, len(blobs))\n\tfor _, br := range blobs {\n\t\tif !sto.isDeleted(br) {\n\t\t\texists = append(exists, br)\n\t\t}\n\t}\n\n\tseen := make(map[blob.Ref]struct{}, len(exists))\n\n\terr := sto.upper.StatBlobs(ctx, exists, func(sbr blob.SizedRef) error {\n\t\tseen[sbr.Ref] = struct{}{}\n\t\treturn f(sbr)\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlowerBlobs := make([]blob.Ref, 0, len(exists))\n\tfor _, br := range exists {\n\t\tif _, s := seen[br]; !s {\n\t\t\tlowerBlobs = append(lowerBlobs, br)\n\t\t}\n\t}\n\n\treturn sto.lower.StatBlobs(ctx, lowerBlobs, f)\n}", "func GetMessagesBetweenTimeStamps(context *gin.Context) {\n\trequest := context.Request\n\twriter := context.Writer\n\n\t// Check for required headers\n\ttoken, appID, isOK := auth.GetAuthData(request)\n\n\tif !isOK {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Validate token\n\tidentity, isOK := auth.VerifyToken(token)\n\n\t// If not valid return\n\tif !isOK || !identity.CanUseAppID(appID) {\n\t\twriter.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tchannelID := context.Params.ByName(\"channelID\")\n\n\tif channelID == \"\" {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tfirstTimeStampStr := context.Params.ByName(\"firstTimeStamp\")\n\tsecondTimeStampStr := context.Params.ByName(\"secondTimeStamp\")\n\n\tif firstTimeStampStr == \"\" || secondTimeStampStr == \"\" {\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tfirstTimeStamp, err := strconv.ParseInt(firstTimeStampStr, 10, 64)\n\tsecondTimeStamp, err := strconv.ParseInt(secondTimeStampStr, 10, 64)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"HTTP Get messages between timestamps: failed convert timestamp %v\\n\", err)\n\t}\n\n\t// Check if channel exists\n\texists, err := GetEngine().GetChannelRepository().ExistsAppChannel(appID, channelID)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"HTTP Get messages between timestamps: failed to check app channel existence %v\\n\", err)\n\t\twriter.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif !exists {\n\t\twriter.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\t// Get events\n\tevents, err := GetEngine().GetChannelRepository().GetChannelEventsAfterAndBefore(appID, channelID, firstTimeStamp, secondTimeStamp)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"HTTP Get messages between timestamps: failed fetch events %v\\n\", err)\n\t\twriter.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Prepare response\n\tresponse := getChannelEventsResponse{Events: events}\n\n\tvar json = jsoniter.ConfigCompatibleWithStandardLibrary\n\tdata, err := json.Marshal(response)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"HTTP Get messages since timestamp: failed to marshal response %v\\n\", err)\n\t\twriter.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\twriter.WriteHeader(http.StatusOK)\n\twriter.Write(data)\n}", "func bookChanged(timestamps1, timestamps2 []float64) bool {\n\tfor i := 0; i < 40; i++ {\n\t\tif math.Abs(timestamps1[i]-timestamps2[i]) > .5 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (dao *DAOName) GetUpdatedAfter(timestamp time.Time) ([]ReferenceModel, error) {\n\tm := []ReferenceModel{}\n\tif err := dao.db.Where(\"updated_at > ?\", timestamp).Find(&m).Error; err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}", "func (tl *TombstonesLoader) GetPendingTombstonesForInterval(userID string, from, to model.Time) (*TombstonesSet, error) {\n\tallTombstones, err := tl.GetPendingTombstones(userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !allTombstones.HasTombstonesForInterval(from, to) {\n\t\treturn &TombstonesSet{}, nil\n\t}\n\n\tfilteredSet := TombstonesSet{oldestTombstoneStart: model.Now()}\n\n\tfor _, tombstone := range allTombstones.tombstones {\n\t\tif !intervalsOverlap(model.Interval{Start: from, End: to}, model.Interval{Start: tombstone.StartTime, End: tombstone.EndTime}) {\n\t\t\tcontinue\n\t\t}\n\n\t\tfilteredSet.tombstones = append(filteredSet.tombstones, tombstone)\n\n\t\tif tombstone.StartTime < filteredSet.oldestTombstoneStart {\n\t\t\tfilteredSet.oldestTombstoneStart = tombstone.StartTime\n\t\t}\n\n\t\tif tombstone.EndTime > filteredSet.newestTombstoneEnd {\n\t\t\tfilteredSet.newestTombstoneEnd = tombstone.EndTime\n\t\t}\n\t}\n\n\treturn &filteredSet, nil\n}", "func (c *doubleCacheItem[T]) ListAfter(ts uint64) []T {\n\tc.mut.RLock()\n\tdefer c.mut.RUnlock()\n\tidx := sort.Search(len(c.data), func(idx int) bool {\n\t\treturn c.data[idx].Timestamp() >= ts\n\t})\n\t// not found\n\tif idx == len(c.data) {\n\t\treturn nil\n\t}\n\treturn c.data[idx:]\n}", "func (i *IndexBuilder) listModifiedChartDirs(c *git.Commit, tree *git.Tree) ([]string, error) {\n\tmodified, err := i.g.ModifiedFiles(c, tree)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trelativeDir := i.relativeDir()\n\tchartDirs := []string{}\n\tfor _, entry := range modified {\n\t\tabsEntryPath := fmt.Sprintf(\"/%s\", entry)\n\t\tif !strings.HasPrefix(absEntryPath, relativeDir) {\n\t\t\tcontinue\n\t\t}\n\n\t\tchartRelativePath := strings.TrimPrefix(absEntryPath, relativeDir)\n\t\tpathElements := strings.Split(chartRelativePath, string(os.PathSeparator))\n\t\tif len(pathElements) <= 1 {\n\t\t\tcontinue\n\t\t}\n\n\t\tchartRootDir := pathElements[0]\n\t\tif !ContainsStringSlice(chartDirs, chartRootDir) {\n\t\t\tchartDirs = append(chartDirs, pathElements[0])\n\t\t}\n\t}\n\treturn chartDirs, nil\n}", "func (p *Bucket) Ls() (m map[time.Time]string, err error) {\n\tresp, err := p.service.ListObjectsV2(&s3.ListObjectsV2Input{\n\t\tBucket: aws.String(p.name),\n\t\tPrefix: aws.String(p.path),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm = make(map[time.Time]string)\n\tfor _, item := range resp.Contents {\n\t\tm[*item.LastModified] = *item.Key\n\t}\n\treturn\n}", "func (p *commentsPlugin) saveTimestamps(token []byte, ts map[uint32]comments.CommentTimestamp) error {\n\t// Setup the blob entries\n\tblobs := make(map[string][]byte, len(ts))\n\tkeys := make([]string, 0, len(ts))\n\tfor cid, v := range ts {\n\t\tk, err := getTimestampKey(token, cid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tb, err := json.Marshal(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tblobs[k] = b\n\t\tkeys = append(keys, k)\n\t}\n\n\t// Delete exisiting digests\n\terr := p.tstore.CacheDel(keys)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Save the blob entries\n\treturn p.tstore.CachePut(blobs, false)\n}", "func (ar *accountRepository) RetrieveTransferredTokensInBlockAfter(\n\taccountId int64,\n\tconsensusTimestamp int64,\n) ([]types.Token, *rTypes.Error) {\n\ttokens := make([]pTypes.Token, 0)\n\tif err := ar.dbClient.Raw(\n\t\tselectTransferredTokensInBlockAfterTimestamp,\n\t\tsql.Named(\"account_id\", accountId),\n\t\tsql.Named(\"consensus_timestamp\", consensusTimestamp),\n\t).Scan(&tokens).Error; err != nil {\n\t\tlog.Errorf(databaseErrorFormat, hErrors.ErrDatabaseError.Message, err)\n\t\treturn nil, hErrors.ErrDatabaseError\n\t}\n\n\treturn getDomainTokens(tokens)\n}", "func (c *Client) ListChangedBlocks(ctx context.Context, params *ListChangedBlocksInput, optFns ...func(*Options)) (*ListChangedBlocksOutput, error) {\n\tif params == nil {\n\t\tparams = &ListChangedBlocksInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"ListChangedBlocks\", params, optFns, addOperationListChangedBlocksMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*ListChangedBlocksOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func FilterUnchanged(c cookoo.Context, p *cookoo.Params) (interface{}, cookoo.Interrupt) {\n\tfiles := p.Get(\"files\", []string{}).([]string)\n\tsince := p.Get(\"since\", time.Now().Add(time.Second * -10)).(time.Time)\n\n\tmodified := []string{}\n\tfor _, file := range files {\n\t\tstat, err := os.Stat(file)\n\t\tif err == nil && stat.ModTime().After(since) {\n\t\t\tmodified = append(modified, file)\n\t\t}\n\t}\n\n\treturn modified, nil\n}", "func fetchChanges(c context.Context, b *buildbot.Build) error {\n\tmemcache.Set(c, buildRevCache(c, b))\n\n\t// initialize the slice so that when serialized to JSON, it is [], not null.\n\tb.Sourcestamp.Changes = []buildbot.Change{}\n\n\thost, project, err := gitiles.ParseRepoURL(b.Sourcestamp.Repository)\n\tif err != nil {\n\t\tlogging.Warningf(\n\t\t\tc,\n\t\t\t\"build %q does not have a valid Gitiles repository URL, %q. Skipping blamelist computation\",\n\t\t\tb.ID(), b.Sourcestamp.Repository)\n\t\treturn nil\n\t}\n\n\tif !commitHashRe.MatchString(b.Sourcestamp.Revision) {\n\t\tlogging.Warningf(\n\t\t\tc,\n\t\t\t\"build %q revision %q is not a commit hash. Skipping blamelist computation\",\n\t\t\tb.Sourcestamp.Revision, b.ID())\n\t\treturn nil\n\t}\n\n\tprevRev, err := getPrevRev(c, b, 100)\n\tswitch {\n\tcase err != nil:\n\t\treturn errors.Annotate(err, \"failed to get prev revision for build %q\", b.ID()).Err()\n\tcase prevRev == \"\":\n\t\tlogging.Warningf(c, \"prev rev of build %q is unknown. Skipping blamelist computation\", b.ID())\n\t\treturn nil\n\t}\n\n\t// Note that prev build may be coming from buildbot and having commit different\n\t// from the true previous _LUCI_ build, which may cause blamelist to have\n\t// extra or missing commits. This matters only for the first build after\n\t// next build number bump.\n\n\t// we don't really need a blamelist with a length > 50\n\tcommits, err := git.Get(c).Log(c, host, project, b.Sourcestamp.Revision, &git.LogOptions{Limit: 50})\n\tswitch status.Code(err) {\n\tcase codes.OK:\n\t\tfor _, commit := range commits {\n\t\t\tif commit.Id == prevRev {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tchange := changeFromGitiles(b.Sourcestamp.Repository, \"master\", commit)\n\t\t\tb.Sourcestamp.Changes = append(b.Sourcestamp.Changes, change)\n\t\t}\n\t\treturn nil\n\n\tcase codes.NotFound:\n\t\tlogging.WithError(err).Warningf(\n\t\t\tc,\n\t\t\t\"gitiles.log returned 404 %s/+/%s\",\n\t\t\tb.Sourcestamp.Repository, b.Sourcestamp.Revision)\n\t\tb.Sourcestamp.Changes = nil\n\t\treturn nil\n\n\tdefault:\n\t\treturn err\n\t}\n}", "func cacheSnapshotBlobs(p *Progress, s Server, c *Cache, id backend.ID) (*Map, error) {\n\tdebug.Log(\"CacheSnapshotBlobs\", \"create cache for snapshot %v\", id.Str())\n\n\tsn, err := LoadSnapshot(s, id)\n\tif err != nil {\n\t\tdebug.Log(\"CacheSnapshotBlobs\", \"unable to load snapshot %v: %v\", id.Str(), err)\n\t\treturn nil, err\n\t}\n\n\tm := NewMap()\n\n\t// add top-level node\n\tm.Insert(sn.Tree)\n\n\tp.Report(Stat{Trees: 1})\n\n\t// start walker\n\tvar wg sync.WaitGroup\n\tch := make(chan WalkTreeJob)\n\n\twg.Add(1)\n\tgo func() {\n\t\tWalkTree(s, sn.Tree, nil, ch)\n\t\twg.Done()\n\t}()\n\n\tfor i := 0; i < maxConcurrencyPreload; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tfor job := range ch {\n\t\t\t\tif job.Tree == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tp.Report(Stat{Trees: 1})\n\t\t\t\tdebug.Log(\"CacheSnapshotBlobs\", \"got job %v\", job)\n\t\t\t\tm.Merge(job.Tree.Map)\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\twg.Wait()\n\n\t// save blob list for snapshot\n\treturn m, c.StoreMap(id, m)\n}", "func getAllDescChanges(\n\tctx context.Context,\n\tdb *client.DB,\n\tstartTime, endTime hlc.Timestamp,\n\tpriorIDs map[sqlbase.ID]sqlbase.ID,\n) ([]BackupDescriptor_DescriptorRevision, error) {\n\tstartKey := roachpb.Key(keys.MakeTablePrefix(keys.DescriptorTableID))\n\tendKey := startKey.PrefixEnd()\n\n\tallRevs, err := getAllRevisions(ctx, db, startKey, endKey, startTime, endTime)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar res []BackupDescriptor_DescriptorRevision\n\n\tfor _, revs := range allRevs {\n\t\tid, err := keys.DecodeDescMetadataID(revs.Key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, rev := range revs.Values {\n\t\t\tr := BackupDescriptor_DescriptorRevision{ID: sqlbase.ID(id), Time: rev.Timestamp}\n\t\t\tif len(rev.RawBytes) != 0 {\n\t\t\t\tvar desc sqlbase.Descriptor\n\t\t\t\tif err := rev.GetProto(&desc); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tr.Desc = &desc\n\t\t\t\tif t := desc.GetTable(); t != nil && t.ReplacementOf.ID != sqlbase.InvalidID {\n\t\t\t\t\tpriorIDs[t.ID] = t.ReplacementOf.ID\n\t\t\t\t}\n\t\t\t}\n\t\t\tres = append(res, r)\n\t\t}\n\t}\n\treturn res, nil\n}", "func GetWatchdogsViaTimestamp(offset int, limit int, Timestamp_ int, field string) (*[]*Watchdog, error) {\n\tvar _Watchdog = new([]*Watchdog)\n\terr := Engine.Table(\"watchdog\").Where(\"timestamp = ?\", Timestamp_).Limit(limit, offset).Desc(field).Find(_Watchdog)\n\treturn _Watchdog, err\n}", "func (that *Stream) ListSince(timestamp int64, pageModel *PageModel) (*StreamResponseModelList, error) {\n\n\tpath := fmt.Sprintf(\"%s/%s/%s/%s/%d\", lib.DefaultAPIURL, lib.DefaultAPIVersion,\n\t\t\"streams\", \"since\", timestamp/1e6)\n\tpath = paramsUtil(path, pageModel)\n\n\tdata, err := that.client.Get(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponseList := new(StreamResponseModelList)\n\terr = json.Unmarshal(data, &responseList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn responseList, nil\n}", "func (s *Server) List(ctx context.Context, in *proto.GetBlockRequest) (*proto.GetBlockResponse, error) {\n\ti, err := metrics.Gauge(\"List\", func() (interface{}, error) {\n\t\tresp := new(proto.GetBlockResponse)\n\n\t\tfor _, b := range s.Blockchain.Blocks {\n\t\t\tresp.Blocks = append(resp.Blocks, &proto.Block{\n\t\t\t\tPrevBlockHash: b.PrevBlockHash,\n\t\t\t\tData: b.Data,\n\t\t\t\tHash: b.Hash,\n\t\t\t})\n\t\t}\n\n\t\treturn resp, nil\n\t})\n\treturn i.(*proto.GetBlockResponse), err\n}", "func ImportBlobs(filepath string) ([]string, int64, error) {\n\n\tf, err := os.Open(filepath)\n\tif err != nil {\n\t\tlogger.Errorf(\"file of %s not exist\\n\", filepath)\n\t\treturn nil, 0, err\n\t}\n\tdefer f.Close()\n\n\tvar fileLength int64\n\thashList := make([]string, 0)\n\tfor {\n\t\tbuffer := make([]byte, configuration.BlobSize)\n\t\tn, err := f.Read(buffer)\n\t\tif err == io.EOF {\n\t\t\tif n > 0 {\n\t\t\t\tdgst := writeBlob(buffer[:n])\n\t\t\t\thashList = append(hashList, dgst)\n\t\t\t\tfileLength += int64(n)\n\t\t\t}\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tlogger.Errorf(\"read file error %s, %s bytes already read\\n\", err, filepath)\n\t\t\treturn nil, fileLength, err\n\t\t}\n\n\t\tdgst := writeBlob(buffer[:n])\n\t\tfileLength += int64(n)\n\t\thashList = append(hashList, dgst)\n\t}\n\n\treturn hashList, fileLength, nil\n}", "func (ts TombstonesSet) GetDeletedIntervals(lbls labels.Labels, from, to model.Time) []model.Interval {\n\tif len(ts.tombstones) == 0 || to < ts.oldestTombstoneStart || from > ts.newestTombstoneEnd {\n\t\treturn nil\n\t}\n\n\tvar deletedIntervals []model.Interval\n\trequestedInterval := model.Interval{Start: from, End: to}\n\n\tfor i := range ts.tombstones {\n\t\toverlaps, overlappingInterval := getOverlappingInterval(requestedInterval,\n\t\t\tmodel.Interval{Start: ts.tombstones[i].StartTime, End: ts.tombstones[i].EndTime})\n\n\t\tif !overlaps {\n\t\t\tcontinue\n\t\t}\n\n\t\tmatches := false\n\t\tfor _, matchers := range ts.tombstones[i].Matchers {\n\t\t\tif labels.Selector(matchers).Matches(lbls) {\n\t\t\t\tmatches = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !matches {\n\t\t\tcontinue\n\t\t}\n\n\t\tif overlappingInterval == requestedInterval {\n\t\t\t// whole interval deleted\n\t\t\treturn []model.Interval{requestedInterval}\n\t\t}\n\n\t\tdeletedIntervals = append(deletedIntervals, overlappingInterval)\n\t}\n\n\tif len(deletedIntervals) == 0 {\n\t\treturn nil\n\t}\n\n\treturn mergeIntervals(deletedIntervals)\n}", "func (c *SequenceClockImpl) findModified(other SequenceClock) (modified []uint16) {\n\tif c.hashEquals(other.GetHashedValue()) {\n\t\treturn nil\n\t}\n\tfor vb, sequence := range other.Value() {\n\t\tif sequence > c.value[vb] {\n\t\t\tmodified = append(modified, uint16(vb))\n\t\t}\n\t}\n\treturn modified\n}", "func TestGetStatusByIPAddressAtTimestamp2(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"}\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\t// just reuse the existing struct\n\tfakeCloudAssetChange.ARN = \"arn2\"\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-11T08:29:35+00:00\") // August 11\n\tfakeCloudAssetChange.ChangeTime = timestamp2\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tipAddress := \"88.77.66.55\"\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-10T08:29:35+00:00\") // query is for status on August 10\n\tnetworkChangeEvents, err := dbStorage.FetchByIP(ctx, at, ipAddress)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func cephRBDVolumeListSnapshots(clusterName string, poolName string,\n\tvolumeName string, volumeType string,\n\tuserName string) ([]string, error) {\n\tmsg, err := shared.RunCommand(\n\t\t\"rbd\",\n\t\t\"--id\", userName,\n\t\t\"--format\", \"json\",\n\t\t\"--cluster\", clusterName,\n\t\t\"--pool\", poolName,\n\t\t\"snap\",\n\t\t\"ls\", fmt.Sprintf(\"%s_%s\", volumeType, volumeName))\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tvar data []map[string]interface{}\n\terr = json.Unmarshal([]byte(msg), &data)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tsnapshots := []string{}\n\tfor _, v := range data {\n\t\t_, ok := v[\"name\"]\n\t\tif !ok {\n\t\t\treturn []string{}, fmt.Errorf(\"No \\\"name\\\" property found\")\n\t\t}\n\n\t\tname, ok := v[\"name\"].(string)\n\t\tif !ok {\n\t\t\treturn []string{}, fmt.Errorf(\"\\\"name\\\" property did not have string type\")\n\t\t}\n\n\t\tname = strings.TrimSpace(name)\n\t\tsnapshots = append(snapshots, name)\n\t}\n\n\tif len(snapshots) == 0 {\n\t\treturn []string{}, db.ErrNoSuchObject\n\t}\n\n\treturn snapshots, nil\n}", "func (client ContainerAppsRevisionsClient) ListRevisionsSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}", "func (b *Client) GetModifiedFiles(repo models.Repo, pull models.PullRequest) ([]string, error) {\n\tvar files []string\n\n\tnextPageURL := fmt.Sprintf(\"%s/2.0/repositories/%s/pullrequests/%d/diffstat\", b.BaseURL, repo.FullName, pull.Num)\n\t// We'll only loop 1000 times as a safety measure.\n\tmaxLoops := 1000\n\tfor i := 0; i < maxLoops; i++ {\n\t\tresp, err := b.makeRequest(\"GET\", nextPageURL, nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar diffStat DiffStat\n\t\tif err := json.Unmarshal(resp, &diffStat); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Could not parse response %q\", string(resp))\n\t\t}\n\t\tif err := validator.New().Struct(diffStat); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"API response %q was missing fields\", string(resp))\n\t\t}\n\t\tfor _, v := range diffStat.Values {\n\t\t\tif v.Old != nil {\n\t\t\t\tfiles = append(files, *v.Old.Path)\n\t\t\t}\n\t\t\tif v.New != nil {\n\t\t\t\tfiles = append(files, *v.New.Path)\n\t\t\t}\n\t\t}\n\t\tif diffStat.Next == nil || *diffStat.Next == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tnextPageURL = *diffStat.Next\n\t}\n\n\t// Now ensure all files are unique.\n\thash := make(map[string]bool)\n\tvar unique []string\n\tfor _, f := range files {\n\t\tif !hash[f] {\n\t\t\tunique = append(unique, f)\n\t\t\thash[f] = true\n\t\t}\n\t}\n\treturn unique, nil\n}", "func (p *commentsPlugin) cachedTimestamps(token []byte, commentIDs []uint32) (map[uint32]*comments.CommentTimestamp, error) {\n\t// Setup the timestamp keys\n\tkeys := make([]string, 0, len(commentIDs))\n\tfor _, cid := range commentIDs {\n\t\tk, err := getTimestampKey(token, cid)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tkeys = append(keys, k)\n\t}\n\n\t// Get the timestamp blob entries\n\tblobs, err := p.tstore.CacheGet(keys)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Decode the timestamps\n\tts := make(map[uint32]*comments.CommentTimestamp, len(blobs))\n\tcacheIDs := make([]uint32, 0, len(blobs))\n\tfor k, v := range blobs {\n\t\tvar t comments.CommentTimestamp\n\t\terr := json.Unmarshal(v, &t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcid, err := parseTimestampKey(k)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tts[cid] = &t\n\t\tcacheIDs = append(cacheIDs, cid)\n\t}\n\n\tlog.Debugf(\"Retrieved cached final comment timestamps of %v/%v\",\n\t\tlen(cacheIDs), len(commentIDs))\n\treturn ts, nil\n}", "func FindModifiedFiles() []string {\n\tresult := GitDiff(\"--name-only\", \"-z\")\n\tresult = append(result, GitDiff(\"--name-only\", \"--cached\", \"-z\")...)\n\n\treturn result\n}", "func (pb PageBlobURL) GetManagedDiskPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot *string, prevSnapshotURL *string, ac BlobAccessConditions) (*PageList, error) {\n\tifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()\n\n\treturn pb.pbClient.GetPageRangesDiff(ctx, nil, nil, prevSnapshot,\n\t\tprevSnapshotURL, // Get managed disk diff\n\t\thttpRange{offset: offset, count: count}.pointers(),\n\t\tac.LeaseAccessConditions.pointers(),\n\t\tifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,\n\t\tnil, // Blob ifTags\n\t\tnil)\n}", "func MessagesInRange(start, end, path string) ([]Message, error) {\n\tif isEmpty(start) {\n\t\tstart = \"HEAD\"\n\t}\n\n\tif isEmpty(path) {\n\t\tpath = \".\"\n\t}\n\n\tvar logrange string\n\tif isEmpty(end) {\n\t\tlogrange = start\n\t} else {\n\t\tlogrange = fmt.Sprintf(\"%s...%s\", start, end)\n\t}\n\n\tcmd := exec.Command(\"git\", \"log\", logrange, path)\n\tout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn []Message{}, nil\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn []Message{}, nil\n\t}\n\tdefer cmd.Wait()\n\treturn parseMessages(out)\n}", "func (g *Git) GetCommitsBetween(from *plumbing.Reference, to *plumbing.Reference) ([]*ChangelogItem, error) {\n\tvar history []*ChangelogItem\n\tvar exists bool\n\n\tcommits, err := g.repo.Log(&git.LogOptions{From: from.Hash()})\n\tif err != nil {\n\t\treturn history, err\n\t}\n\n\t// Iterate over all commits\n\t// Break when `to` has been found\n\terr = commits.ForEach(func(commit *object.Commit) error {\n\t\tif commit.Hash == to.Hash() {\n\t\t\texists = true\n\t\t\treturn errors.New(\"ErrStop\")\n\t\t}\n\n\t\t// Check if commit message contains issue in form `(#0..9)`\n\t\t// and add commit as a changelog item\n\t\tif hasIssue(commit.Message) {\n\t\t\thistory = append(history, &ChangelogItem{\n\t\t\t\tHash: commit.Hash.String(),\n\t\t\t\tText: commit.Message,\n\t\t\t\tIssueID: getIssueFrom(commit.Message),\n\t\t\t\tAuthor: commit.Author.Name,\n\t\t\t})\n\t\t}\n\t\treturn nil\n\t})\n\n\tif exists {\n\t\treturn history, nil\n\t}\n\n\treturn history, errors.Errorf(\"Unable to compare references, %v not found in history of %v\", to.Name().Short(), from.Name().Short())\n}", "func getRelevantDescChanges(\n\tctx context.Context,\n\tdb *client.DB,\n\tstartTime, endTime hlc.Timestamp,\n\tdescs []sqlbase.Descriptor,\n\texpanded []sqlbase.ID,\n\tpriorIDs map[sqlbase.ID]sqlbase.ID,\n) ([]BackupDescriptor_DescriptorRevision, error) {\n\n\tallChanges, err := getAllDescChanges(ctx, db, startTime, endTime, priorIDs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If no descriptors changed, we can just stop now and have RESTORE use the\n\t// normal list of descs (i.e. as of endTime).\n\tif len(allChanges) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// interestingChanges will be every descriptor change relevant to the backup.\n\tvar interestingChanges []BackupDescriptor_DescriptorRevision\n\n\t// interestingIDs are the descriptor for which we're interested in capturing\n\t// changes. This is initially the descriptors matched (as of endTime) by our\n\t// target spec, plus those that belonged to a DB that our spec expanded at any\n\t// point in the interval.\n\tinterestingIDs := make(map[sqlbase.ID]struct{}, len(descs))\n\n\t// The descriptors that currently (endTime) match the target spec (desc) are\n\t// obviously interesting to our backup.\n\tfor _, i := range descs {\n\t\tinterestingIDs[i.GetID()] = struct{}{}\n\t\tif t := i.GetTable(); t != nil {\n\t\t\tfor j := t.ReplacementOf.ID; j != sqlbase.InvalidID; j = priorIDs[j] {\n\t\t\t\tinterestingIDs[j] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n\n\t// We're also interested in any desc that belonged to a DB we're backing up.\n\t// We'll start by looking at all descriptors as of the beginning of the\n\t// interval and add to the set of IDs that we are interested any descriptor that\n\t// belongs to one of the parents we care about.\n\tinterestingParents := make(map[sqlbase.ID]struct{}, len(expanded))\n\tfor _, i := range expanded {\n\t\tinterestingParents[i] = struct{}{}\n\t}\n\n\tif !startTime.IsEmpty() {\n\t\tstarting, err := loadAllDescs(ctx, db, startTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, i := range starting {\n\t\t\tif table := i.GetTable(); table != nil {\n\t\t\t\t// We need to add to interestingIDs so that if we later see a delete for\n\t\t\t\t// this ID we still know it is interesting to us, even though we will not\n\t\t\t\t// have a parentID at that point (since the delete is a nil desc).\n\t\t\t\tif _, ok := interestingParents[table.ParentID]; ok {\n\t\t\t\t\tinterestingIDs[table.ID] = struct{}{}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif _, ok := interestingIDs[i.GetID()]; ok {\n\t\t\t\tdesc := i\n\t\t\t\t// We inject a fake \"revision\" that captures the starting state for\n\t\t\t\t// matched descriptor, to allow restoring to times before its first rev\n\t\t\t\t// actually inside the window. This likely ends up duplicating the last\n\t\t\t\t// version in the previous BACKUP descriptor, but avoids adding more\n\t\t\t\t// complicated special-cases in RESTORE, so it only needs to look in a\n\t\t\t\t// single BACKUP to restore to a particular time.\n\t\t\t\tinitial := BackupDescriptor_DescriptorRevision{Time: startTime, ID: i.GetID(), Desc: &desc}\n\t\t\t\tinterestingChanges = append(interestingChanges, initial)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, change := range allChanges {\n\t\t// A change to an ID that we are interested in is obviously interesting --\n\t\t// a change is also interesting if it is to a table that has a parent that\n\t\t// we are interested and thereafter it also becomes an ID in which we are\n\t\t// interested in changes (since, as mentioned above, to decide if deletes\n\t\t// are interesting).\n\t\tif _, ok := interestingIDs[change.ID]; ok {\n\t\t\tinterestingChanges = append(interestingChanges, change)\n\t\t} else if change.Desc != nil {\n\t\t\tif table := change.Desc.GetTable(); table != nil {\n\t\t\t\tif _, ok := interestingParents[table.ParentID]; ok {\n\t\t\t\t\tinterestingIDs[table.ID] = struct{}{}\n\t\t\t\t\tinterestingChanges = append(interestingChanges, change)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tsort.Slice(interestingChanges, func(i, j int) bool {\n\t\treturn interestingChanges[i].Time.Less(interestingChanges[j].Time)\n\t})\n\n\treturn interestingChanges, nil\n}", "func (core *coreService) LogsInRange(filter *logfilter.LogFilter, start, end, paginationSize uint64) ([]*action.Log, []hash.Hash256, error) {\n\tstart, end, err := core.correctQueryRange(start, end)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif paginationSize == 0 {\n\t\tpaginationSize = 1000\n\t}\n\tif paginationSize > 5000 {\n\t\tpaginationSize = 5000\n\t}\n\t// getLogs via range Blooom filter [start, end]\n\tblockNumbers, err := core.bfIndexer.FilterBlocksInRange(filter, start, end, paginationSize)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvar (\n\t\tlogs = []*action.Log{}\n\t\thashes = []hash.Hash256{}\n\t\tlogsInBlk = make([][]*action.Log, len(blockNumbers))\n\t\tHashInBlk = make([]hash.Hash256, len(blockNumbers))\n\t\tjobs = make(chan jobDesc, len(blockNumbers))\n\t\teg, ctx = errgroup.WithContext(context.Background())\n\t)\n\tif len(blockNumbers) == 0 {\n\t\treturn logs, hashes, nil\n\t}\n\n\tfor i, v := range blockNumbers {\n\t\tjobs <- jobDesc{i, v}\n\t}\n\tclose(jobs)\n\tfor w := 0; w < _workerNumbers; w++ {\n\t\teg.Go(func() error {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn ctx.Err()\n\t\t\t\tdefault:\n\t\t\t\t\tjob, ok := <-jobs\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\tlogsInBlock, err := core.logsInBlock(filter, job.blkNum)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tblkHash, err := core.dao.GetBlockHash(job.blkNum)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tlogsInBlk[job.idx] = logsInBlock\n\t\t\t\t\tHashInBlk[job.idx] = blkHash\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n\tif err := eg.Wait(); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfor i := 0; i < len(blockNumbers); i++ {\n\t\tfor j := range logsInBlk[i] {\n\t\t\tlogs = append(logs, logsInBlk[i][j])\n\t\t\thashes = append(hashes, HashInBlk[i])\n\t\t\tif len(logs) >= int(paginationSize) {\n\t\t\t\treturn logs, hashes, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn logs, hashes, nil\n}", "func ReadBlocksHashByTmSlice(db ReadIteration, slice uint64) ([]common.Hash, error) {\n\t//mTime := utils.GetMainTime(slice)\n\tvar hashes []common.Hash\n\tkey := blockLookUpKey(slice)\n\titerator := db.NewIteratorWithPrefix(key)\n\tif iterator == nil {\n\t\treturn nil, fmt.Errorf(\"Failed to get iterator\")\n\t}\n\n\tfor iterator.Next() {\n\t\thash := common.BytesToHash(iterator.Value())\n\t\thashes = append(hashes, hash)\n\t}\n\n\treturn hashes, nil\n}", "func (l Logfiles) FilterOld(oldTime time.Time) Logfiles {\n\tf := make(Logfiles, 0)\n\tfor _, logfile := range l {\n\t\tfinfo, err := os.Stat(logfile.FileName)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif finfo.ModTime().After(oldTime) {\n\t\t\tf = append(f, logfile)\n\t\t}\n\t}\n\treturn f\n}", "func (c *Client) ListHistory(ctx context.Context, userID, appID string, start, end int64, opts ...grpc.CallOption) (map[*trillian.SignedMapRoot][]byte, error) {\n\tif start < 0 {\n\t\treturn nil, fmt.Errorf(\"start=%v, want >= 0\", start)\n\t}\n\tvar currentProfile []byte\n\tprofiles := make(map[*trillian.SignedMapRoot][]byte)\n\tepochsReceived := int64(0)\n\tepochsWant := end - start + 1\n\tfor epochsReceived < epochsWant {\n\t\ttrustedSnapshot := c.trusted\n\t\tresp, err := c.cli.ListEntryHistory(ctx, &pb.ListEntryHistoryRequest{\n\t\t\tDomainId: c.domainID,\n\t\t\tUserId: userID,\n\t\t\tAppId: appID,\n\t\t\tFirstTreeSize: trustedSnapshot.TreeSize,\n\t\t\tStart: start,\n\t\t\tPageSize: min(int32((end-start)+1), pageSize),\n\t\t}, opts...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tepochsReceived += int64(len(resp.GetValues()))\n\n\t\tfor i, v := range resp.GetValues() {\n\t\t\tVlog.Printf(\"Processing entry for %v, epoch %v\", userID, start+int64(i))\n\t\t\terr = c.VerifyGetEntryResponse(ctx, c.domainID, appID, userID, trustedSnapshot, v)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.updateTrusted(v.GetLogRoot())\n\n\t\t\t// Compress profiles that are equal through time. All\n\t\t\t// nil profiles before the first profile are ignored.\n\t\t\tprofile := v.GetCommitted().GetData()\n\t\t\tif bytes.Equal(currentProfile, profile) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Append the slice and update currentProfile.\n\t\t\tprofiles[v.GetSmr()] = profile\n\t\t\tcurrentProfile = profile\n\t\t}\n\t\tif resp.NextStart == 0 {\n\t\t\tbreak // No more data.\n\t\t}\n\t\tstart = resp.NextStart // Fetch the next block of results.\n\t}\n\n\tif epochsReceived < epochsWant {\n\t\treturn nil, ErrIncomplete\n\t}\n\n\treturn profiles, nil\n}", "func printBlobList(blobClient *storage.BlobStorageClient, containerName string) error {\n\tfmt.Printf(\"Get blob list from container '%v'...\\n\", containerName)\n\tlist, err := blobClient.ListBlobs(containerName, storage.ListBlobsParameters{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"Blobs inside '%v' container:\\n\", containerName)\n\tfor _, b := range list.Blobs {\n\t\tfmt.Printf(\"\\t%v\\n\", b.Name)\n\t}\n\treturn nil\n}", "func (db *database) QueryByModified(lastTime time.Time, lastID int64, limit int) ([]snippet, error) {\n\tif lastTime.IsZero() && lastID == 0 {\n\t\tlastTime, lastID = maxTime, maxID // Find everything\n\t}\n\tvar ss []snippet\n\terr := db.db.View(func(tx *bolt.Tx) error {\n\t\t// Seek to the latest value that is immediately before the search key.\n\t\tbktByDate := tx.Bucket([]byte(bucketByDate))\n\t\tc := bktByDate.Cursor()\n\t\tsk := dualKey(lastID, lastTime)\n\t\tk, _ := c.Seek(sk)\n\t\tif k == nil {\n\t\t\tk, _ = c.Last()\n\t\t}\n\n\t\t// Iterate through all results.\n\t\tss = nil\n\t\tbktByID := tx.Bucket([]byte(bucketByID))\n\t\tfor ; k != nil; k, _ = c.Prev() {\n\t\t\tif len(ss) >= limit && limit >= 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif bytes.Compare(k, sk) >= 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar s snippet\n\t\t\tv := bktByID.Get(k[12:20]) // Extract ID from dual key\n\t\t\tif err := s.UnmarshalBinary(v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tss = append(ss, s)\n\t\t}\n\t\treturn nil\n\t})\n\treturn ss, err\n}", "func ListSnapshots(sg *snapshotgroup.SnapshotGroup) ([]GeminiSnapshot, error) {\n\tclient := kube.GetClient()\n\tsnapshots, err := client.SnapshotClient.Namespace(sg.ObjectMeta.Namespace).List(metav1.ListOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tGeminiSnapshots := []GeminiSnapshot{}\n\tfor _, snapshot := range snapshots.Items {\n\t\tsnapshotMeta, err := meta.Accessor(&snapshot)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tannotations := snapshotMeta.GetAnnotations()\n\t\tif managedBy, ok := annotations[managedByAnnotation]; !ok || managedBy != managerName {\n\t\t\tcontinue\n\t\t}\n\t\tif annotations[GroupNameAnnotation] != sg.ObjectMeta.Name {\n\t\t\tcontinue\n\t\t}\n\t\ttimestampStr := annotations[TimestampAnnotation]\n\t\ttimestamp, err := strconv.Atoi(timestampStr)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"%s/%s: failed to parse unix timestamp %s for %s\", sg.ObjectMeta.Namespace, sg.ObjectMeta.Name, timestampStr, snapshotMeta.GetName())\n\t\t\tcontinue\n\t\t}\n\t\tintervals := []string{}\n\t\tintervalsStr := annotations[IntervalsAnnotation]\n\t\tif intervalsStr != \"\" {\n\t\t\tintervals = strings.Split(intervalsStr, intervalsSeparator)\n\t\t}\n\t\tGeminiSnapshots = append(GeminiSnapshots, GeminiSnapshot{\n\t\t\tNamespace: snapshotMeta.GetNamespace(),\n\t\t\tName: snapshotMeta.GetName(),\n\t\t\tTimestamp: time.Unix(int64(timestamp), 0),\n\t\t\tIntervals: intervals,\n\t\t\tRestore: annotations[RestoreAnnotation],\n\t\t})\n\t}\n\tsort.Slice(GeminiSnapshots, func(i, j int) bool {\n\t\treturn GeminiSnapshots[j].Timestamp.Before(GeminiSnapshots[i].Timestamp)\n\t})\n\treturn GeminiSnapshots, nil\n}", "func (a *ChangeAgent) handleGetChanges(\n\tresp http.ResponseWriter, req *http.Request, params httprouter.Params) {\n\tqps := req.URL.Query()\n\n\tlimitStr := qps.Get(\"limit\")\n\tif limitStr == \"\" {\n\t\tlimitStr = defaultLimit\n\t}\n\tlmt, err := strconv.ParseUint(limitStr, 10, 32)\n\tif err != nil {\n\t\twriteError(resp, http.StatusBadRequest, errors.New(\"Invalid limit\"))\n\t\treturn\n\t}\n\tlimit := uint(lmt)\n\n\tsinceStr := qps.Get(\"since\")\n\tif sinceStr == \"\" {\n\t\tsinceStr = defaultSince\n\t}\n\tsince, err := strconv.ParseUint(sinceStr, 10, 64)\n\tif err != nil {\n\t\twriteError(resp, http.StatusBadRequest, errors.New(\"Invalid since\"))\n\t\treturn\n\t}\n\n\tblockStr := qps.Get(\"block\")\n\tif blockStr == \"\" {\n\t\tblockStr = defaultBlock\n\t}\n\tbk, err := strconv.ParseUint(blockStr, 10, 32)\n\tif err != nil {\n\t\twriteError(resp, http.StatusBadRequest, errors.New(\"Invalid block\"))\n\t\treturn\n\t}\n\tblock := time.Duration(bk)\n\n\ttags := qps[\"tag\"]\n\n\t// Fetch more than we need so we can see if we're at the beginning or end\n\tfetchLimit := limit + 1\n\tfetchSince := since\n\tif fetchSince > 0 {\n\t\tfetchSince--\n\t\tfetchLimit++\n\t}\n\n\tentries, lastFullChange, err := a.fetchEntries(fetchSince, fetchLimit, tags, resp)\n\tif err != nil {\n\t\treturn\n\t}\n\tentries, atStart, atEnd := pruneChanges(entries, since, limit)\n\n\tif (len(entries) == 0) && (block > 0) {\n\t\tnow := time.Now()\n\t\twaitEnd := now.Add(block * time.Second)\n\t\twaitFor := lastFullChange\n\t\tfor len(entries) == 0 && waitEnd.After(now) {\n\t\t\t// Because of tags, do this in a loop, so we check for tags every time and re-wait\n\t\t\twaitFor++\n\t\t\twaitRemaining := waitEnd.Sub(now)\n\t\t\tglog.V(2).Infof(\"Waiting %d milliseconds for the next change after %d\", waitRemaining, waitFor)\n\t\t\ta.raft.GetAppliedTracker().TimedWait(waitFor, waitRemaining)\n\t\t\tentries, _, err = a.fetchEntries(waitFor-1, fetchLimit, tags, resp)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tentries, atStart, atEnd = pruneChanges(entries, since, limit)\n\t\t\tglog.V(2).Infof(\"Got %d changes after blocking\", len(entries))\n\t\t\tnow = time.Now()\n\t\t}\n\t}\n\n\tresp.Header().Set(\"Content-Type\", jsonContent)\n\tmarshalChanges(entries, atStart, atEnd, resp)\n}", "func ListSnapshots(storagDriverConfig interface{}, pred func(id string) bool) (ids []string, err error) {\n\t// create (backup) storage driver (so we can list snapshot headers from it)\n\tdriver, err := newStorageDriver(storagDriverConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tids, err = driver.GetHeaders()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif pred == nil {\n\t\treturn ids, nil\n\t}\n\n\tfilterPos := 0\n\tvar ok bool\n\tfor _, id := range ids {\n\t\tok = pred(id)\n\t\tif ok {\n\t\t\tids[filterPos] = id\n\t\t\tfilterPos++\n\t\t}\n\t}\n\n\treturn ids[:filterPos], nil\n}", "func (g *GitRepo) ModifiedFiles(c *git.Commit, tree *git.Tree) ([]string, error) {\n\topts := &git.DiffOptions{}\n\tmodified := []string{}\n\tparentCount := c.ParentCount()\n\tfor i := uint(0); i <= parentCount; i++ {\n\t\tparentID := c.ParentId(i)\n\t\tif parentID == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debugf(\"Looking up parent commit-id '%s'\", parentID.String())\n\t\tparent, err := g.r.LookupCommit(parentID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer parent.Free()\n\n\t\tparentTree, err := parent.Tree()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer parentTree.Free()\n\n\t\tdiff, err := g.r.DiffTreeToTree(parentTree, tree, opts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer diff.Free()\n\n\t\t_ = diff.ForEach(func(f git.DiffDelta, p float64) (git.DiffForEachHunkCallback, error) {\n\t\t\tmodified = append(modified, f.OldFile.Path)\n\t\t\treturn nil, nil\n\t\t}, git.DiffDetailFiles)\n\t}\n\treturn modified, nil\n}", "func GetBlogVersionsesViaLastUpdated(offset int, limit int, LastUpdated_ time.Time, field string) (*[]*BlogVersions, error) {\n\tvar _BlogVersions = new([]*BlogVersions)\n\terr := Engine.Table(\"blog_versions\").Where(\"last_updated = ?\", LastUpdated_).Limit(limit, offset).Desc(field).Find(_BlogVersions)\n\treturn _BlogVersions, err\n}", "func (s Secret) Blobs() [][]byte {\n\tout := make([][]byte, 0, 1+len(s.Previous))\n\tout = append(out, s.Current)\n\tout = append(out, s.Previous...)\n\treturn out\n}", "func (r Repository) LogBetween(since, until time.Time, opt *LogBetweenOptions) ([]CommitInfo, error) {\n\tif opt == nil {\n\t\topt = &LogBetweenOptions{}\n\t}\n\targs := []string{\"log\",\n\t\t\"--since\", since.Format(time.RFC3339),\n\t\t\"--until\", until.Format(time.RFC3339),\n\t\tlogPrettyFormatArg,\n\t}\n\tout, err := r.run(nil, opt.Timeout, args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseLog(out)\n}", "func TestGetStatusByIPAddressAtTimestamp4(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"}\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-12T08:29:35+00:00\") // August 12\n\thostnames2 := []string{\"blarg.com\"}\n\tfakeCloudAssetChange2 := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames2, timestamp2, `arn2`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange2); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tipAddress := \"88.77.66.55\"\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-11T08:29:35+00:00\") // query is for status on August 11\n\tnetworkChangeEvents, err := dbStorage.FetchByIP(ctx, at, ipAddress)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func getHistory(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\ttype AuditHistory struct {\n\t\tTxId string `json:\"txId\"`\n\t\tValue Marble `json:\"value\"`\n\t}\n\tvar history []AuditHistory;\n\tvar marble Marble\n\n\tif len(args) != 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\tmarbleId := args[0]\n\tfmt.Printf(\"- start getHistoryForMarble: %s\\n\", marbleId)\n\n\t// Get History\n\tresultsIterator, err := stub.GetHistoryForKey(marbleId)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tdefer resultsIterator.Close()\n\n\tfor resultsIterator.HasNext() {\n\t\thistoryData, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\n\t\tvar tx AuditHistory\n\t\ttx.TxId = historyData.TxId //copy transaction id over\n\t\tjson.Unmarshal(historyData.Value, &marble) //un stringify it aka JSON.parse()\n\t\tif historyData.Value == nil { //marble has been deleted\n\t\t\tvar emptyMarble Marble\n\t\t\ttx.Value = emptyMarble //copy nil marble\n\t\t} else {\n\t\t\tjson.Unmarshal(historyData.Value, &marble) //un stringify it aka JSON.parse()\n\t\t\ttx.Value = marble //copy marble over\n\t\t}\n\t\thistory = append(history, tx) //add this tx to the list\n\t}\n\tfmt.Printf(\"- getHistoryForMarble returning:\\n%s\", history)\n\n\t//change to array of bytes\n\thistoryAsBytes, _ := json.Marshal(history) //convert to array of bytes\n\treturn shim.Success(historyAsBytes)\n}", "func (ct *ctrlerCtx) diffBucket(apicl apiclient.Services) {\n\topts := api.ListWatchOptions{}\n\n\t// get a list of all objects from API server\n\tobjlist, err := apicl.ObjstoreV1().Bucket().List(context.Background(), &opts)\n\tif err != nil {\n\t\tct.logger.Errorf(\"Error getting a list of objects. Err: %v\", err)\n\t\treturn\n\t}\n\n\tct.logger.Infof(\"diffBucket(): BucketList returned %d objects\", len(objlist))\n\n\t// build an object map\n\tobjmap := make(map[string]*objstore.Bucket)\n\tfor _, obj := range objlist {\n\t\tobjmap[obj.GetKey()] = obj\n\t}\n\n\tlist, err := ct.Bucket().List(context.Background(), &opts)\n\tif err != nil && !strings.Contains(err.Error(), \"not found in local cache\") {\n\t\tct.logger.Infof(\"Failed to get a list of objects. Err: %s\", err)\n\t\treturn\n\t}\n\n\t// if an object is in our local cache and not in API server, trigger delete for it\n\tfor _, obj := range list {\n\t\t_, ok := objmap[obj.GetKey()]\n\t\tif !ok {\n\t\t\tct.logger.Infof(\"diffBucket(): Deleting existing object %#v since its not in apiserver\", obj.GetKey())\n\t\t\tevt := kvstore.WatchEvent{\n\t\t\t\tType: kvstore.Deleted,\n\t\t\t\tKey: obj.GetKey(),\n\t\t\t\tObject: &obj.Bucket,\n\t\t\t}\n\t\t\tct.handleBucketEvent(&evt)\n\t\t}\n\t}\n\n\t// trigger create event for all others\n\tfor _, obj := range objlist {\n\t\tct.logger.Infof(\"diffBucket(): Adding object %#v\", obj.GetKey())\n\t\tevt := kvstore.WatchEvent{\n\t\t\tType: kvstore.Created,\n\t\t\tKey: obj.GetKey(),\n\t\t\tObject: obj,\n\t\t}\n\t\tct.handleBucketEvent(&evt)\n\t}\n}", "func TestGetStatusByHostnameAtTimestamp2(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"} // nolint\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\t// just reuse the existing struct\n\tfakeCloudAssetChange.ARN = \"arn2\"\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-11T08:29:35+00:00\") // August 11\n\tfakeCloudAssetChange.ChangeTime = timestamp2\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\thostname := \"yahoo.com\" // nolint\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-10T08:29:35+00:00\") // query is for status on August 10\n\tnetworkChangeEvents, err := dbStorage.FetchByHostname(ctx, at, hostname)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}} // nolint\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func (o *DeeplinkApp) SetModifiedTs(v int32) {\n\to.ModifiedTs = &v\n}", "func cephRBDSnapshotListClones(clusterName string, poolName string,\n\tvolumeName string, volumeType string,\n\tsnapshotName string, userName string) ([]string, error) {\n\tmsg, err := shared.RunCommand(\n\t\t\"rbd\",\n\t\t\"--id\", userName,\n\t\t\"--cluster\", clusterName,\n\t\t\"--pool\", poolName,\n\t\t\"children\",\n\t\t\"--image\", fmt.Sprintf(\"%s_%s\", volumeType, volumeName),\n\t\t\"--snap\", snapshotName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmsg = strings.TrimSpace(msg)\n\tclones := strings.Fields(msg)\n\tif len(clones) == 0 {\n\t\treturn nil, db.ErrNoSuchObject\n\t}\n\n\treturn clones, nil\n}", "func Diff(a, b string) []string {\n\topts := jsondiff.Options{\n\t\tAdded: jsondiff.Tag{Begin: \"{\\\"changed\\\":[\", End: \"]}\"},\n\t\tRemoved: jsondiff.Tag{Begin: \"{\\\"changed\\\":[\", End: \"]}\"},\n\t\tChanged: jsondiff.Tag{Begin: \"{\\\"changed\\\":[\", End: \"]}\"},\n\t\tChangedSeparator: \", \",\n\t\tIndent: \" \",\n\t}\n\n\tresult, comparedStr := jsondiff.Compare([]byte(a), []byte(b), &opts)\n\n\tif !(result == jsondiff.NoMatch || result == jsondiff.SupersetMatch) {\n\t\treturn nil\n\t}\n\n\treader := bufio.NewReader(bytes.NewReader([]byte(comparedStr)))\n\tdiffMap := make(map[string]bool)\n\tvar currentProperty string\n\tfor {\n\t\tstringRead, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlog.Printf(\"Error finding difference in json strings %v\", err)\n\t\t}\n\t\tif yes, str := isNewTopLevelProp(stringRead); yes {\n\t\t\tcurrentProperty = str\n\t\t}\n\t\tif strings.Contains(stringRead, changePattern) {\n\t\t\tdiffMap[currentProperty] = true\n\t\t}\n\t}\n\treturn mapKeysToSlice(reflect.ValueOf(diffMap).MapKeys())\n}", "func Difference(a, b []byte) []*Delta {\n\tn := len(a)\n\tm := len(b)\n\tedits := edits(a, b, n, m)\n\tif edits < 0 {\n\t\t// Error determining length of edit script\n\t\treturn nil\n\t}\n\tds := deltas(a, b, n, m, edits)\n\t// Compact deltas\n\tds = Compact(ds)\n\t// Rebase deltas into sequence\n\tvar change uint64\n\tfor _, d := range ds {\n\t\td.Offset += change\n\t\tchange -= d.Delete\n\t\tchange += uint64(len(d.Insert))\n\t}\n\treturn ds\n}", "func findModifiedContainers(podTemplate *v1.PodTemplateSpec, pod *v1.Pod) ([]hookv1alpha1.Argument, error) {\n\toldImages := make(map[string]string)\n\tfor _, container := range pod.Spec.Containers {\n\t\toldImages[container.Name] = container.Image\n\t}\n\n\targuments := make([]hookv1alpha1.Argument, 0)\n\tfor _, container := range podTemplate.Spec.Containers {\n\t\tif image, ok := oldImages[container.Name]; !ok || container.Image != image {\n\t\t\ttmp := new(string)\n\t\t\t*tmp = container.Name\n\t\t\timageArgs := hookv1alpha1.Argument{\n\t\t\t\tName: ModifiedArgKey + \"[\" + strconv.Itoa(len(arguments)) + \"]\",\n\t\t\t\tValue: tmp,\n\t\t\t}\n\t\t\targuments = append(arguments, imageArgs)\n\t\t}\n\n\t}\n\treturn arguments, nil\n}", "func getBlobsWithPrefix(tx *sql.Tx, prefix string) ([]*Blob, error) {\n\tvar blobs []*Blob\n\trows, err := tx.Query(\"SELECT * from blobinfo WHERE hasPrefix(digest, $1)\", prefix)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor rows.Next() {\n\t\tblob := &Blob{}\n\t\tif err := blobRowScan(rows, blob); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tblobs = append(blobs, blob)\n\t}\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn blobs, err\n}", "func (m *MockEnginePolicyManager) ListPKBetweenUpdatedAt(beginUpdatedAt, endUpdatedAt int64) ([]int64, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListPKBetweenUpdatedAt\", beginUpdatedAt, endUpdatedAt)\n\tret0, _ := ret[0].([]int64)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s BlobSet) List() BlobHandles {\n\tlist := make(BlobHandles, 0, len(s))\n\tfor h := range s {\n\t\tlist = append(list, h)\n\t}\n\n\tsort.Sort(list)\n\n\treturn list\n}", "func GetBlogVersionsesByDbVersionAndLastUpdated(offset int, limit int, DbVersion_ string, LastUpdated_ time.Time) (*[]*BlogVersions, error) {\n\tvar _BlogVersions = new([]*BlogVersions)\n\terr := Engine.Table(\"blog_versions\").Where(\"db_version = ? and last_updated = ?\", DbVersion_, LastUpdated_).Limit(limit, offset).Find(_BlogVersions)\n\treturn _BlogVersions, err\n}", "func TestGetStatusByIPAddressAtTimestamp5(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"}\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-12T08:29:35+00:00\") // August 12\n\thostnames2 := []string{\"blarg.com\"}\n\tfakeCloudAssetChange2 := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames2, timestamp2, `arn2`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange2); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\ttimestamp3, _ := time.Parse(time.RFC3339, \"2019-08-10T08:29:35+00:00\") // August 10, arn3\n\thostnames3 := []string{\"reddit.com\"}\n\tfakeCloudAssetChange3 := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames3, timestamp3, `arn3`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange3); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\ttimestamp4, _ := time.Parse(time.RFC3339, \"2019-08-10T08:39:35+00:00\") // August 10, 10 minutes later, arn3 drops the same IP address\n\tfakeCloudAssetChange4 := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames3, timestamp4, `arn3`, `rtype`, `aid`, `region`, nil, false)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange4); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tipAddress := \"88.77.66.55\"\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-13T08:29:35+00:00\") // query is for status on August 13\n\tnetworkChangeEvents, err := dbStorage.FetchByIP(ctx, at, ipAddress)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 2, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, //nolint\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"blarg.com\"}, \"rtype\", \"aid\", \"region\", \"arn2\", nil, domain.AccountOwner{}}, //nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func GetBlogVersionsesByBlogIdAndLastUpdated(offset int, limit int, BlogId_ int64, LastUpdated_ time.Time) (*[]*BlogVersions, error) {\n\tvar _BlogVersions = new([]*BlogVersions)\n\terr := Engine.Table(\"blog_versions\").Where(\"blog_id = ? and last_updated = ?\", BlogId_, LastUpdated_).Limit(limit, offset).Find(_BlogVersions)\n\treturn _BlogVersions, err\n}", "func (m *MockUploadService) GetAuditLogsForUpload(v0 context.Context, v1 int) ([]types.UploadLog, error) {\n\tr0, r1 := m.GetAuditLogsForUploadFunc.nextHook()(v0, v1)\n\tm.GetAuditLogsForUploadFunc.appendCall(UploadServiceGetAuditLogsForUploadFuncCall{v0, v1, r0, r1})\n\treturn r0, r1\n}", "func TestGetStatusByIPAddressAtTimestamp3(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"}\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\ttimestamp2, _ := time.Parse(time.RFC3339, \"2019-08-11T08:29:35+00:00\") // August 11\n\thostnames2 := []string{\"blarg.com\"}\n\tfakeCloudAssetChange2 := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames2, timestamp2, `arn2`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange2); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tipAddress := \"88.77.66.55\"\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-12T08:29:35+00:00\") // query is for status on August 12\n\tnetworkChangeEvents, err := dbStorage.FetchByIP(ctx, at, ipAddress)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 2, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"blarg.com\"}, \"rtype\", \"aid\", \"region\", \"arn2\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func ListTrackedTimesByRepository(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/times repository repoTrackedTimes\n\t// ---\n\t// summary: List a repo's tracked times\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: user\n\t// in: query\n\t// description: optional filter by user (available for issue managers)\n\t// type: string\n\t// - name: since\n\t// in: query\n\t// description: Only show times updated after the given time. This is a timestamp in RFC 3339 format\n\t// type: string\n\t// format: date-time\n\t// - name: before\n\t// in: query\n\t// description: Only show times updated before the given time. This is a timestamp in RFC 3339 format\n\t// type: string\n\t// format: date-time\n\t// - name: page\n\t// in: query\n\t// description: page number of results to return (1-based)\n\t// type: integer\n\t// - name: limit\n\t// in: query\n\t// description: page size of results\n\t// type: integer\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/TrackedTimeList\"\n\t// \"400\":\n\t// \"$ref\": \"#/responses/error\"\n\t// \"403\":\n\t// \"$ref\": \"#/responses/forbidden\"\n\n\tif !ctx.Repo.Repository.IsTimetrackerEnabled(ctx) {\n\t\tctx.Error(http.StatusBadRequest, \"\", \"time tracking disabled\")\n\t\treturn\n\t}\n\n\topts := &issues_model.FindTrackedTimesOptions{\n\t\tListOptions: utils.GetListOptions(ctx),\n\t\tRepositoryID: ctx.Repo.Repository.ID,\n\t}\n\n\t// Filters\n\tqUser := ctx.FormTrim(\"user\")\n\tif qUser != \"\" {\n\t\tuser, err := user_model.GetUserByName(ctx, qUser)\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusNotFound, \"User does not exist\", err)\n\t\t} else if err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserByName\", err)\n\t\t\treturn\n\t\t}\n\t\topts.UserID = user.ID\n\t}\n\n\tvar err error\n\tif opts.CreatedBeforeUnix, opts.CreatedAfterUnix, err = context.GetQueryBeforeSince(ctx.Base); err != nil {\n\t\tctx.Error(http.StatusUnprocessableEntity, \"GetQueryBeforeSince\", err)\n\t\treturn\n\t}\n\n\tcantSetUser := !ctx.Doer.IsAdmin &&\n\t\topts.UserID != ctx.Doer.ID &&\n\t\t!ctx.IsUserRepoWriter([]unit.Type{unit.TypeIssues})\n\n\tif cantSetUser {\n\t\tif opts.UserID == 0 {\n\t\t\topts.UserID = ctx.Doer.ID\n\t\t} else {\n\t\t\tctx.Error(http.StatusForbidden, \"\", fmt.Errorf(\"query by user not allowed; not enough rights\"))\n\t\t\treturn\n\t\t}\n\t}\n\n\tcount, err := issues_model.CountTrackedTimes(ctx, opts)\n\tif err != nil {\n\t\tctx.InternalServerError(err)\n\t\treturn\n\t}\n\n\ttrackedTimes, err := issues_model.GetTrackedTimes(ctx, opts)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetTrackedTimes\", err)\n\t\treturn\n\t}\n\tif err = trackedTimes.LoadAttributes(ctx); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"LoadAttributes\", err)\n\t\treturn\n\t}\n\n\tctx.SetTotalCountHeader(count)\n\tctx.JSON(http.StatusOK, convert.ToTrackedTimeList(ctx, trackedTimes))\n}", "func (vdb *VspDatabase) GetVoteChanges(ticketHash string) (map[uint32]VoteChangeRecord, error) {\n\n\trecords := make(map[uint32]VoteChangeRecord)\n\n\terr := vdb.db.View(func(tx *bolt.Tx) error {\n\t\tbkt := tx.Bucket(vspBktK).Bucket(voteChangeBktK).\n\t\t\tBucket([]byte(ticketHash))\n\n\t\tif bkt == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\terr := bkt.ForEach(func(k, v []byte) error {\n\t\t\tvar record VoteChangeRecord\n\t\t\terr := json.Unmarshal(v, &record)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not unmarshal vote change record: %w\", err)\n\t\t\t}\n\n\t\t\trecords[bytesToUint32(k)] = record\n\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error iterating over vote change bucket: %w\", err)\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn records, err\n}", "func ListBlobs(ctx context.Context, accountName, accountGroupName, containerName string) (*azblob.ListBlobsFlatSegmentResponse, error) {\n\tu := getContainerURL(ctx, accountName, accountGroupName, containerName)\n\treturn u.ListBlobsFlatSegment(\n\t\tctx,\n\t\tazblob.Marker{},\n\t\tazblob.ListBlobsSegmentOptions{\n\t\t\tDetails: azblob.BlobListingDetails{\n\t\t\t\tSnapshots: true,\n\t\t\t},\n\t\t})\n}", "func (i *interactor) Diff(head, sha string) ([]string, error) {\n\ti.logger.Infof(\"Finding the differences between %q and %q\", head, sha)\n\tout, err := i.executor.Run(\"diff\", head, sha, \"--name-only\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar changes []string\n\tscan := bufio.NewScanner(bytes.NewReader(out))\n\tscan.Split(bufio.ScanLines)\n\tfor scan.Scan() {\n\t\tchanges = append(changes, scan.Text())\n\t}\n\treturn changes, nil\n}", "func (t *TrillianLogRPCServer) GetLeavesByRange(ctx context.Context, req *trillian.GetLeavesByRangeRequest) (*trillian.GetLeavesByRangeResponse, error) {\n\tctx, spanEnd := spanFor(ctx, \"GetLeavesByRange\")\n\tdefer spanEnd()\n\tif err := validateGetLeavesByRangeRequest(req); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttree, ctx, err := t.getTreeAndContext(ctx, req.LogId, optsLogRead)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttx, err := t.snapshotForTree(ctx, tree, \"GetLeavesByRange\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer t.closeAndLog(ctx, tree.TreeId, tx, \"GetLeavesByRange\")\n\n\tslr, err := tx.LatestSignedLogRoot(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar root types.LogRootV1\n\tif err := root.UnmarshalBinary(slr.LogRoot); err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"Could not read current log root: %v\", err)\n\t}\n\n\tr := &trillian.GetLeavesByRangeResponse{SignedLogRoot: slr}\n\n\tif req.StartIndex < int64(root.TreeSize) {\n\t\tleaves, err := tx.GetLeavesByRange(ctx, req.StartIndex, req.Count)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tt.fetchedLeaves.Add(float64(len(leaves)))\n\t\tr.Leaves = leaves\n\t}\n\n\tif err := t.commitAndLog(ctx, req.LogId, tx, \"GetLeavesByRange\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r, nil\n}", "func blobGets(fn string) string {\n\tf, err := os.Open(fn)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn \"\"\n\t\t}\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\tpos := blobSeek(f)\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tb := make([]byte, int(fi.Size()-pos))\n\tif _, err = f.Read(b); err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(b)\n}", "func FindFailedCommitQueuePatchesInTimeRange(projectID string, startTime, endTime time.Time) ([]Patch, error) {\n\tquery := bson.M{\n\t\tProjectKey: projectID,\n\t\tStatusKey: evergreen.PatchFailed,\n\t\tAliasKey: evergreen.CommitQueueAlias,\n\t\t\"$or\": []bson.M{\n\t\t\t{\"$and\": []bson.M{\n\t\t\t\t{StartTimeKey: bson.M{\"$lte\": endTime}},\n\t\t\t\t{StartTimeKey: bson.M{\"$gte\": startTime}},\n\t\t\t}},\n\t\t\t{\"$and\": []bson.M{\n\t\t\t\t{FinishTimeKey: bson.M{\"$lte\": endTime}},\n\t\t\t\t{FinishTimeKey: bson.M{\"$gte\": startTime}},\n\t\t\t}},\n\t\t},\n\t}\n\treturn Find(db.Query(query).Sort([]string{CreateTimeKey}))\n}", "func (sc *snapshotInfoContainer) RemoveRecentThanTS(tsVbuuid *common.TsVbuuid) error {\n\tnewList := list.New()\n\tts := getSeqTsFromTsVbuuid(tsVbuuid)\n\tfor e := sc.snapshotList.Front(); e != nil; e = e.Next() {\n\t\tsnapshot := e.Value.(SnapshotInfo)\n\t\tsnapTsVbuuid := snapshot.Timestamp()\n\t\tsnapTs := getSeqTsFromTsVbuuid(snapTsVbuuid)\n\n\t\tif !snapTs.GreaterThan(ts) {\n\t\t\tnewList.PushBack(snapshot)\n\t\t}\n\t}\n\n\tsc.snapshotList = newList\n\treturn nil\n}", "func BlobLT(v []byte) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldBlob), v))\n\t})\n}", "func TestGetStatusByIPAddressAtTimestamp1(t *testing.T) {\n\n\tbefore(t, dbStorage, dbSchema)\n\n\tprivateIPs := []string{\"44.33.22.11\"}\n\tpublicIPs := []string{\"88.77.66.55\"} // nolint\n\thostnames := []string{\"yahoo.com\"} // nolint\n\ttimestamp, _ := time.Parse(time.RFC3339, \"2019-08-09T08:29:35+00:00\")\n\n\tfakeCloudAssetChange := newFakeCloudAssetChange(privateIPs, publicIPs, hostnames, timestamp, `arn`, `rtype`, `aid`, `region`, nil, true)\n\tif err := dbStorage.Store(ctx, fakeCloudAssetChange); err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tipAddress := \"88.77.66.55\" // nolint\n\tat, _ := time.Parse(time.RFC3339, \"2019-08-10T08:29:35+00:00\")\n\tnetworkChangeEvents, err := dbStorage.FetchByIP(ctx, at, ipAddress)\n\tif err != nil {\n\t\tt.Fatal(err.Error())\n\t}\n\n\tassert.Equal(t, 1, len(networkChangeEvents))\n\n\texpected := []domain.CloudAssetDetails{\n\t\tdomain.CloudAssetDetails{nil, []string{\"88.77.66.55\"}, []string{\"yahoo.com\"}, \"rtype\", \"aid\", \"region\", \"arn\", nil, domain.AccountOwner{}}, // nolint\n\t}\n\n\tassertArrayEqualIgnoreOrder(t, expected, networkChangeEvents)\n\n}", "func (zh *zipHandler) blobList(dirPath string, dirBlob blob.Ref) ([]*blobFile, error) {\n\t//\tdr := zh.search.NewDescribeRequest()\n\t//\tdr.Describe(dirBlob, 3)\n\t//\tres, err := dr.Result()\n\t//\tif err != nil {\n\t//\t\treturn nil, fmt.Errorf(\"Could not describe %v: %v\", dirBlob, err)\n\t//\t}\n\tres, err := zh.describeMembers(dirBlob)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdescribed := res.Meta[dirBlob.String()]\n\tmembers := described.Members()\n\tdirBlobPath, _, isDir := described.PermanodeDir()\n\tif len(members) == 0 && !isDir {\n\t\treturn nil, nil\n\t}\n\tvar list []*blobFile\n\tif isDir {\n\t\tdirRoot := dirBlobPath[1]\n\t\tchildren, err := zh.blobsFromDir(\"/\", dirRoot)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Could not get list of blobs from %v: %v\", dirRoot, err)\n\t\t}\n\t\tlist = append(list, children...)\n\t\treturn list, nil\n\t}\n\tfor _, member := range members {\n\t\tif fileBlobPath, fileInfo, ok := getFileInfo(member.BlobRef, res.Meta); ok {\n\t\t\t// file\n\t\t\tlist = append(list,\n\t\t\t\t&blobFile{fileBlobPath[1], path.Join(dirPath, fileInfo.FileName)})\n\t\t\tcontinue\n\t\t}\n\t\tif dirBlobPath, dirInfo, ok := getDirInfo(member.BlobRef, res.Meta); ok {\n\t\t\t// directory\n\t\t\tnewZipRoot := dirBlobPath[1]\n\t\t\tchildren, err := zh.blobsFromDir(\n\t\t\t\tpath.Join(dirPath, dirInfo.FileName), newZipRoot)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Could not get list of blobs from %v: %v\", newZipRoot, err)\n\t\t\t}\n\t\t\tlist = append(list, children...)\n\t\t\t// TODO(mpl): we assume a directory permanode does not also have members.\n\t\t\t// I know there is nothing preventing it, but does it make any sense?\n\t\t\tcontinue\n\t\t}\n\t\t// it might have members, so recurse\n\t\t// If it does have members, we must consider it as a pseudo dir,\n\t\t// so we can build a fullpath for each of its members.\n\t\t// As a dir name, we're using its title if it has one, its (shortened)\n\t\t// blobref otherwise.\n\t\tpseudoDirName := member.Title()\n\t\tif pseudoDirName == \"\" {\n\t\t\tpseudoDirName = member.BlobRef.DigestPrefix(10)\n\t\t}\n\t\tfullpath := path.Join(dirPath, pseudoDirName)\n\t\tmoreMembers, err := zh.blobList(fullpath, member.BlobRef)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Could not get list of blobs from %v: %v\", member.BlobRef, err)\n\t\t}\n\t\tlist = append(list, moreMembers...)\n\t}\n\treturn list, nil\n}", "func (c *Client) ContainerChanges(id string) ([]Change, error) {\n\tpath := \"/containers/\" + id + \"/changes\"\n\tresp, err := c.do(http.MethodGet, path, doOptions{})\n\tif err != nil {\n\t\tvar e *Error\n\t\tif errors.As(err, &e) && e.Status == http.StatusNotFound {\n\t\t\treturn nil, &NoSuchContainer{ID: id}\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tvar changes []Change\n\tif err := json.NewDecoder(resp.Body).Decode(&changes); err != nil {\n\t\treturn nil, err\n\t}\n\treturn changes, nil\n}", "func updatedTokens() []Token {\n\tdata := database.ReadFile(path)\n\tfmt.Println(\"Successfully opened tokens file\")\n\n\tvar tokens, validTokens []Token\n\tjson.Unmarshal(data, &tokens)\n\n\tfor i := 0; i < len(tokens); i++ {\n\t\tif time.Now().Unix()-tokens[i].CreatedAt < timeLimit {\n\t\t\tvalidTokens = append(validTokens, tokens[i])\n\t\t}\n\t}\n\n\tdata, err := json.Marshal(validTokens)\n\n\tif err != nil {\n\t\tlog.Fatal(\"[ERROR] Error in converting to JSON\\n\" + err.Error())\n\t}\n\n\tif err = database.WriteFile(path, data); err != nil {\n\t\tlog.Fatal(\"[ERROR] Unable to write in tokens file\\n\" + err.Error())\n\t}\n\n\treturn validTokens\n}", "func TestClient_GetModifiedFilesPagination(t *testing.T) {\n\trespTemplate := `\n{\n \"values\": [\n {\n \"path\": {\n \"toString\": \"%s\"\n }\n },\n {\n \"path\": {\n \"toString\": \"%s\"\n }\n }\n ],\n \"size\": 2,\n \"isLastPage\": true,\n \"start\": 0,\n \"limit\": 2,\n \"nextPageStart\": null\n}\n`\n\tfirstResp := fmt.Sprintf(respTemplate, \"file1.txt\", \"file2.txt\")\n\tsecondResp := fmt.Sprintf(respTemplate, \"file2.txt\", \"file3.txt\")\n\tvar serverURL string\n\n\ttestServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.RequestURI {\n\t\t// The first request should hit this URL.\n\t\tcase \"/rest/api/1.0/projects/ow/repos/repo/pull-requests/1/changes?start=0\":\n\t\t\tresp := strings.Replace(firstResp, `\"isLastPage\": true`, `\"isLastPage\": false`, -1)\n\t\t\tresp = strings.Replace(resp, `\"nextPageStart\": null`, `\"nextPageStart\": 3`, -1)\n\t\t\tw.Write([]byte(resp)) // nolint: errcheck\n\t\t\treturn\n\t\t\t// The second should hit this URL.\n\t\tcase \"/rest/api/1.0/projects/ow/repos/repo/pull-requests/1/changes?start=3\":\n\t\t\tw.Write([]byte(secondResp)) // nolint: errcheck\n\t\tdefault:\n\t\t\tt.Errorf(\"got unexpected request at %q\", r.RequestURI)\n\t\t\thttp.Error(w, \"not found\", http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\t}))\n\tdefer testServer.Close()\n\n\tserverURL = testServer.URL\n\tclient, err := bitbucketserver.NewClient(http.DefaultClient, \"user\", \"pass\", serverURL, \"runatlantis.io\")\n\tOk(t, err)\n\n\tfiles, err := client.GetModifiedFiles(models.Repo{\n\t\tFullName: \"owner/repo\",\n\t\tOwner: \"owner\",\n\t\tName: \"repo\",\n\t\tSanitizedCloneURL: fmt.Sprintf(\"%s/scm/ow/repo.git\", serverURL),\n\t\tVCSHost: models.VCSHost{\n\t\t\tType: models.BitbucketCloud,\n\t\t\tHostname: \"bitbucket.org\",\n\t\t},\n\t}, models.PullRequest{\n\t\tNum: 1,\n\t})\n\tOk(t, err)\n\tEquals(t, []string{\"file1.txt\", \"file2.txt\", \"file3.txt\"}, files)\n}", "func (f *UploadServiceGetListTagsFunc) History() []UploadServiceGetListTagsFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]UploadServiceGetListTagsFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}", "func GetHistory(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\ttype KeyModificationWrapper struct {\n\t\tRealValue interface{} `json:\"InterfaceValue\"`\n\t\tTx queryresult.KeyModification\n\t}\n\tvar sliceReal []KeyModificationWrapper\n\n\tvar history []queryresult.KeyModification\n\tvar value interface{}\n\n\tif len(args) != 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\tkey := args[0]\n\tfmt.Printf(\"- start GetHistory: %s\\n\", key)\n\n\t// Get History\n\tresultsIterator, err := stub.GetHistoryForKey(key)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\tdefer resultsIterator.Close()\n\n\tfor resultsIterator.HasNext() {\n\t\thistoryData, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn shim.Error(err.Error())\n\t\t}\n\n\t\tvar singleReal KeyModificationWrapper\n\t\tvar tx queryresult.KeyModification\n\t\tsingleReal.Tx.TxId = historyData.TxId //copy transaction id over\n\t\tjson.Unmarshal(historyData.Value, &value) //un stringify it aka JSON.parse()\n\t\tif historyData.Value == nil { //value has been deleted\n\t\t\tvar emptyBytes []byte\n\t\t\tsingleReal.Tx.Value = emptyBytes //copy nil value\n\t\t} else {\n\t\t\tjson.Unmarshal(historyData.Value, &value) //un stringify it aka JSON.parse()\n\t\t\tsingleReal.Tx.Value = historyData.Value //copy value over\n\t\t\tsingleReal.Tx.Timestamp = historyData.Timestamp\n\t\t\tsingleReal.Tx.IsDelete = historyData.IsDelete\n\t\t\tsingleReal.RealValue = value\n\t\t}\n\t\thistory = append(history, tx) //add this Tx to the list\n\t\tsliceReal = append(sliceReal, singleReal)\n\t}\n\t// fmt.Printf(\"- getHistoryForService returning:\\n%s\", history)\n\tPrettyPrintHistory(history)\n\n\t//change to array of bytes\n\t// historyAsBytes, _ := json.Marshal(history) //convert to array of bytes\n\n\trealAsBytes, _ := json.Marshal(sliceReal)\n\treturn shim.Success(realAsBytes)\n}", "func (b *logEventBuffer) peekRange(start, end int64) []fetchedLog {\n\tb.lock.RLock()\n\tdefer b.lock.RUnlock()\n\n\tblocksInRange := b.getBlocksInRange(int(start), int(end))\n\n\tvar results []fetchedLog\n\tfor _, block := range blocksInRange {\n\t\t// double checking that we don't have any gaps in the range\n\t\tif block.blockNumber < start || block.blockNumber > end {\n\t\t\tcontinue\n\t\t}\n\t\tresults = append(results, block.logs...)\n\t}\n\n\tsort.SliceStable(results, func(i, j int) bool {\n\t\treturn results[i].log.BlockNumber < results[j].log.BlockNumber\n\t})\n\n\tb.lggr.Debugw(\"Peeked logs\", \"results\", len(results), \"start\", start, \"end\", end)\n\n\treturn results\n}", "func (_TellorMesosphere *TellorMesosphereCaller) Timestamps(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _TellorMesosphere.contract.Call(opts, &out, \"timestamps\", arg0, arg1)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}" ]
[ "0.5353702", "0.50202537", "0.4984611", "0.49060214", "0.4898732", "0.4884065", "0.48787692", "0.48241475", "0.48237765", "0.47981906", "0.47307202", "0.47307202", "0.47245994", "0.4690016", "0.4667888", "0.46565905", "0.46333495", "0.45517814", "0.45401368", "0.4535804", "0.45274872", "0.44983017", "0.44820955", "0.44628102", "0.44481578", "0.44253832", "0.44161403", "0.44023532", "0.43945006", "0.4386002", "0.43805748", "0.4376409", "0.43416312", "0.4338946", "0.43111756", "0.43102202", "0.4310045", "0.43051895", "0.43050998", "0.43029574", "0.43022552", "0.4297824", "0.429432", "0.4293291", "0.42920136", "0.42887828", "0.4287077", "0.4279522", "0.42738897", "0.4266608", "0.42665768", "0.42652637", "0.42519894", "0.42077488", "0.41997457", "0.41974163", "0.41957626", "0.41826916", "0.41823816", "0.4182345", "0.4178998", "0.41766602", "0.41760632", "0.4173336", "0.41719788", "0.41718662", "0.41497588", "0.41473207", "0.41401494", "0.41351628", "0.4131712", "0.41244298", "0.41228244", "0.41227216", "0.41150948", "0.41127792", "0.41104534", "0.4106742", "0.41015577", "0.40908805", "0.40903997", "0.4083502", "0.4079127", "0.40738612", "0.40664086", "0.40662745", "0.40654987", "0.4061564", "0.40605563", "0.4052441", "0.40480652", "0.40476853", "0.40475821", "0.40470845", "0.40412357", "0.4038616", "0.40349832", "0.40332216", "0.40303028", "0.40270504" ]
0.8394095
0
ReadBlobData Reads blob from specified starting location
func (sr *StorageReader) ReadBlobData(path string, startIndex, length int64) []byte { ctx := context.Background() blobURL := sr.container.NewBlockBlobURL(path) downloadResponse, err := blobURL.Download(ctx, startIndex, length, azblob.BlobAccessConditions{}, false) logp.Info("Attempting to download blob %s at %v", path, startIndex) bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 10}) downloadedData := bytes.Buffer{} _, err = downloadedData.ReadFrom(bodyStream) if err != nil { panic(err) } return downloadedData.Bytes() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ReadBlob(length int32, data []byte) ([]byte, int64) {\n\tl := length\n\tif length > int32(len(data)) {\n\t\tl = int32(len(data))\n\t}\n\n\tvar idx int32\n\tfor idx = l; (idx % 4) != 0; idx++ {\n\t\tif idx >= int32(len(data)) {\n\t\t\tdata = append(data, 0)\n\t\t}\n\t}\n\treturn data[:idx], int64(idx)\n}", "func (fh *FilesystemHandler) ReadBlob(container models.SimpleContainer, blobName string) models.SimpleBlob {\n\tvar blob models.SimpleBlob\n\n\tdirPath := fh.generateFullPath(&container)\n\tfullPath := filepath.Join(dirPath, blobName)\n\n\tblob.DataCachedAtPath = fullPath\n\tblob.BlobInMemory = false\n\tblob.Name = blobName\n\tblob.ParentContainer = &container\n\tblob.Origin = container.Origin\n\tblob.URL = fullPath\n\treturn blob\n}", "func readBlob(nd *Node) *Blob {\n\treturn nd.ReadMemo(blobNodeKey{}, func() interface{} {\n\t\tfn := nd.Path()\n\t\tsrc, err := ioutil.ReadFile(fn)\n\t\tif err != nil {\n\t\t\treturn &Blob{err: err}\n\t\t}\n\t\treturn &Blob{src: src}\n\t}).(*Blob)\n}", "func blobSeek(r io.ReadSeeker) int64 {\n\tvar b [1]byte\n\t_, err := r.Seek(BlobNameLenOff, os.SEEK_SET)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t_, err = r.Read(b[:])\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tn, err := r.Seek(int64(b[0]), os.SEEK_CUR)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn n\n}", "func blobSeek(r io.ReadSeeker) int64 {\n\tvar b [1]byte\n\t_, err := r.Seek(BlobNameLenOff, os.SEEK_SET)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t_, err = r.Read(b[:])\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tn, err := r.Seek(int64(b[0]), os.SEEK_CUR)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn n\n}", "func (d *swiftDriver) ReadBlob(account keppel.Account, storageID string) (io.ReadCloser, uint64, error) {\n\tc, _, err := d.getBackendConnection(account)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\to := blobObject(c, storageID)\n\thdr, err := o.Headers()\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\treader, err := o.Download(nil).AsReadCloser()\n\treturn reader, hdr.SizeBytes().Get(), err\n}", "func (b Base) GetBlob(sum string) (ReadSeekCloser, error) {\n\treturn os.Open(b.blobPath(sum))\n}", "func (c *containerdCAS) ReadBlob(blobHash string) (io.Reader, error) {\n\tshaDigest := digest.Digest(blobHash)\n\t_, err := contentStore.Info(ctrdCtx, shaDigest)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ReadBlob: Exception getting info of blob: %s. %s\", blobHash, err.Error())\n\t}\n\treaderAt, err := contentStore.ReaderAt(ctrdCtx, spec.Descriptor{Digest: shaDigest})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ReadBlob: Exception while reading blob: %s. %s\", blobHash, err.Error())\n\t}\n\treturn content.NewReader(readerAt), nil\n}", "func (is *ObjectStorage) GetBlobPartial(repo string, digest godigest.Digest, mediaType string, from, to int64,\n) (io.ReadCloser, int64, int64, error) {\n\tvar lockLatency time.Time\n\n\tif err := digest.Validate(); err != nil {\n\t\treturn nil, -1, -1, err\n\t}\n\n\tblobPath := is.BlobPath(repo, digest)\n\n\tis.RLock(&lockLatency)\n\tdefer is.RUnlock(&lockLatency)\n\n\tbinfo, err := is.store.Stat(context.Background(), blobPath)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to stat blob\")\n\n\t\treturn nil, -1, -1, zerr.ErrBlobNotFound\n\t}\n\n\tend := to\n\n\tif to < 0 || to >= binfo.Size() {\n\t\tend = binfo.Size() - 1\n\t}\n\n\tblobHandle, err := is.store.Reader(context.Background(), blobPath, from)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob\")\n\n\t\treturn nil, -1, -1, err\n\t}\n\n\tblobReadCloser, err := NewBlobStream(blobHandle, from, end)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob stream\")\n\n\t\treturn nil, -1, -1, err\n\t}\n\n\t// is a 'deduped' blob?\n\tif binfo.Size() == 0 {\n\t\tdefer blobReadCloser.Close()\n\n\t\t// Check blobs in cache\n\t\tdstRecord, err := is.checkCacheBlob(digest)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"digest\", digest.String()).Msg(\"cache: not found\")\n\n\t\t\treturn nil, -1, -1, zerr.ErrBlobNotFound\n\t\t}\n\n\t\tbinfo, err := is.store.Stat(context.Background(), dstRecord)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"blob\", dstRecord).Msg(\"failed to stat blob\")\n\n\t\t\treturn nil, -1, -1, zerr.ErrBlobNotFound\n\t\t}\n\n\t\tend := to\n\n\t\tif to < 0 || to >= binfo.Size() {\n\t\t\tend = binfo.Size() - 1\n\t\t}\n\n\t\tblobHandle, err := is.store.Reader(context.Background(), dstRecord, from)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"blob\", dstRecord).Msg(\"failed to open blob\")\n\n\t\t\treturn nil, -1, -1, err\n\t\t}\n\n\t\tblobReadCloser, err := NewBlobStream(blobHandle, from, end)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob stream\")\n\n\t\t\treturn nil, -1, -1, err\n\t\t}\n\n\t\treturn blobReadCloser, end - from + 1, binfo.Size(), nil\n\t}\n\n\t// The caller function is responsible for calling Close()\n\treturn blobReadCloser, end - from + 1, binfo.Size(), nil\n}", "func (s *server) ReadBlob(ctx context.Context, req *pb.ReadBlobRequest) (*pb.ReadBlobResponse, error) {\n\n\t// Run a Get request to Azure with the incoming blob key\n\tresp, err := http.Get(strings.Join([]string{base_uri,req.Key},\"\"))\n\tif err != nil {\n\t\tlog.Fatalf(\"failed get request: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\t// Read the body of the http response containing the blob data\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed response read: %v\", err)\n\t}\n\n\t// Return the body of the response to the client\n\treturn &pb.ReadBlobResponse{Data: body}, nil\n}", "func (r *azblobObjectReader) Read(p []byte) (n int, err error) {\n\tmaxCnt := r.totalSize - r.pos\n\tif maxCnt > int64(len(p)) {\n\t\tmaxCnt = int64(len(p))\n\t}\n\tif maxCnt == 0 {\n\t\treturn 0, io.EOF\n\t}\n\tresp, err := r.blobClient.DownloadStream(r.ctx, &blob.DownloadStreamOptions{\n\t\tRange: blob.HTTPRange{\n\t\t\tOffset: r.pos,\n\t\t\tCount: maxCnt,\n\t\t},\n\n\t\tCPKInfo: r.cpkInfo,\n\t})\n\tif err != nil {\n\t\treturn 0, errors.Annotatef(err, \"Failed to read data from azure blob, data info: pos='%d', count='%d'\", r.pos, maxCnt)\n\t}\n\tbody := resp.NewRetryReader(r.ctx, &blob.RetryReaderOptions{\n\t\tMaxRetries: azblobRetryTimes,\n\t})\n\tn, err = body.Read(p)\n\tif err != nil && err != io.EOF {\n\t\treturn 0, errors.Annotatef(err, \"Failed to read data from azure blob response, data info: pos='%d', count='%d'\", r.pos, maxCnt)\n\t}\n\tr.pos += int64(n)\n\treturn n, body.Close()\n}", "func (fr *FileReader) readerForOffset(off int64) (io.ReadCloser, error) {\n\tif off < 0 {\n\t\tpanic(\"negative offset\")\n\t}\n\tif off >= fr.size {\n\t\treturn eofReader, nil\n\t}\n\toffRemain := off\n\tparts := fr.ss.Parts\n\tfor len(parts) > 0 && parts[0].Size <= uint64(offRemain) {\n\t\toffRemain -= int64(parts[0].Size)\n\t\tparts = parts[1:]\n\t}\n\tif len(parts) == 0 {\n\t\treturn eofReader, nil\n\t}\n\tp0 := parts[0]\n\tvar rsc blobref.ReadSeekCloser\n\tvar err error\n\tswitch {\n\tcase p0.BlobRef != nil && p0.BytesRef != nil:\n\t\treturn nil, fmt.Errorf(\"part illegally contained both a blobRef and bytesRef\")\n\tcase p0.BlobRef == nil && p0.BytesRef == nil:\n\t\treturn &nZeros{int(p0.Size - uint64(offRemain))}, nil\n\tcase p0.BlobRef != nil:\n\t\trsc, _, err = fr.fetcher.Fetch(p0.BlobRef)\n\tcase p0.BytesRef != nil:\n\t\trsc, err = NewFileReader(fr.fetcher, p0.BytesRef)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toffRemain += int64(p0.Offset)\n\tif offRemain > 0 {\n\t\tnewPos, err := rsc.Seek(offRemain, os.SEEK_SET)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif newPos != offRemain {\n\t\t\tpanic(\"Seek didn't work\")\n\t\t}\n\t}\n\treturn struct {\n\t\tio.Reader\n\t\tio.Closer\n\t}{\n\t\tio.LimitReader(rsc, int64(p0.Size)),\n\t\trsc,\n\t}, nil\n}", "func blobGets(fn string) string {\n\tf, err := os.Open(fn)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn \"\"\n\t\t}\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\tpos := blobSeek(f)\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tb := make([]byte, int(fi.Size()-pos))\n\tif _, err = f.Read(b); err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(b)\n}", "func BlobSeek(r io.ReadSeeker) (n int64, err error) {\n\tdefer func() {\n\t\tif perr := recover(); perr != nil {\n\t\t\terr = perr.(error)\n\t\t}\n\t}()\n\tn = blobSeek(r)\n\treturn\n}", "func BlobSeek(r io.ReadSeeker) (n int64, err error) {\n\tdefer func() {\n\t\tif perr := recover(); perr != nil {\n\t\t\terr = perr.(error)\n\t\t}\n\t}()\n\tn = blobSeek(r)\n\treturn\n}", "func (c *INDIClient) GetBlob(deviceName, propName, blobName string) (rdr io.ReadCloser, fileName string, length int64, err error) {\n\tdevice, err := c.findDevice(deviceName)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tprop, ok := device.BlobProperties[propName]\n\tif !ok {\n\t\terr = ErrPropertyNotFound\n\t\treturn\n\t}\n\n\tval, ok := prop.Values[blobName]\n\tif !ok {\n\t\terr = ErrPropertyValueNotFound\n\t\treturn\n\t}\n\n\trdr, err = c.fs.Open(val.Value)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfileName = filepath.Base(val.Value)\n\n\tlength = val.Size\n\treturn\n}", "func ReadPointer(reader io.Reader) (Pointer, error) {\n\tbuf := make([]byte, blobSizeCutoff)\n\tn, err := io.ReadFull(reader, buf)\n\tif err != nil && err != io.ErrUnexpectedEOF {\n\t\treturn Pointer{}, err\n\t}\n\tbuf = buf[:n]\n\n\treturn ReadPointerFromBuffer(buf)\n}", "func (d *DriveDB) ReadFiledata(f *File, offset, size, filesize int64) ([]byte, error) {\n\tvar ret []byte\n\t// Read all the necessary chunks\n\tchunk0, chunkN := d.chunkNumbers(offset, size)\n\tfor chunk := chunk0; chunk <= chunkN; chunk++ {\n\t\tdata, err := d.readChunk(f, chunk, filesize)\n\t\tif err != nil {\n\t\t\tlog.Printf(\" chunk %v read error: %v\", chunk, err)\n\t\t\treturn nil, err\n\t\t}\n\t\tret = append(ret, data...)\n\t}\n\n\t// We may have too much data here -- before offset and after end. Return an appropriate slice.\n\tdsize := int64(len(ret))\n\tlow := offset - chunk0*(*driveCacheChunk)\n\tif low < 0 {\n\t\tlow = 0\n\t}\n\tif low > dsize {\n\t\treturn nil, fmt.Errorf(\"tried to read past end of chunk (low:%d, dsize:%d): fileId: %s, offset:%d, size:%d, filesize:%d\", low, dsize, f.Id, offset, size, filesize)\n\t}\n\thigh := low + size\n\tif high > dsize {\n\t\thigh = dsize\n\t}\n\tbuf := ret[low:high]\n\treturn buf, nil\n}", "func FileReadAt(f *os.File, b []byte, off int64) (int, error)", "func (is *ObjectStorage) GetBlob(repo string, digest godigest.Digest, mediaType string) (io.ReadCloser, int64, error) {\n\tvar lockLatency time.Time\n\n\tif err := digest.Validate(); err != nil {\n\t\treturn nil, -1, err\n\t}\n\n\tblobPath := is.BlobPath(repo, digest)\n\n\tis.RLock(&lockLatency)\n\tdefer is.RUnlock(&lockLatency)\n\n\tbinfo, err := is.store.Stat(context.Background(), blobPath)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to stat blob\")\n\n\t\treturn nil, -1, zerr.ErrBlobNotFound\n\t}\n\n\tblobReadCloser, err := is.store.Reader(context.Background(), blobPath, 0)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob\")\n\n\t\treturn nil, -1, err\n\t}\n\n\t// is a 'deduped' blob?\n\tif binfo.Size() == 0 {\n\t\t// Check blobs in cache\n\t\tdstRecord, err := is.checkCacheBlob(digest)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"digest\", digest.String()).Msg(\"cache: not found\")\n\n\t\t\treturn nil, -1, zerr.ErrBlobNotFound\n\t\t}\n\n\t\tbinfo, err := is.store.Stat(context.Background(), dstRecord)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"blob\", dstRecord).Msg(\"failed to stat blob\")\n\n\t\t\treturn nil, -1, zerr.ErrBlobNotFound\n\t\t}\n\n\t\tblobReadCloser, err := is.store.Reader(context.Background(), dstRecord, 0)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"blob\", dstRecord).Msg(\"failed to open blob\")\n\n\t\t\treturn nil, -1, err\n\t\t}\n\n\t\treturn blobReadCloser, binfo.Size(), nil\n\t}\n\n\t// The caller function is responsible for calling Close()\n\treturn blobReadCloser, binfo.Size(), nil\n}", "func readFileAtOffset(file *os.File, offset int, size int) []byte {\n\tbytes := make([]byte, size)\n\tfile.Seek(int64(offset), 0)\n\n\tdata, err := file.Read(bytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn bytes[:data]\n}", "func SMBExtractValueFromOffset(blob []byte, idx int) ([]byte, int, error) {\n\tres := []byte{}\n\n\tif len(blob) < (idx + 6) {\n\t\treturn res, idx, fmt.Errorf(\"data truncated\")\n\t}\n\n\tlen1 := binary.LittleEndian.Uint16(blob[idx:])\n\tidx += 2\n\n\t// len2 := binary.LittleEndian.Uint16(blob[idx:])\n\tidx += 2\n\n\toff := binary.LittleEndian.Uint32(blob[idx:])\n\tidx += 4\n\n\t// Allow zero length values\n\tif len1 == 0 {\n\t\treturn res, idx, nil\n\t}\n\n\tif len(blob) < int(off+uint32(len1)) {\n\t\treturn res, idx, fmt.Errorf(\"data value truncated\")\n\t}\n\n\tres = append(res, blob[off:off+uint32(len1)]...)\n\treturn res, idx, nil\n}", "func (is *ImageStoreLocal) GetBlobPartial(repo string, digest godigest.Digest, mediaType string, from, to int64,\n) (io.ReadCloser, int64, int64, error) {\n\tvar lockLatency time.Time\n\n\tif err := digest.Validate(); err != nil {\n\t\treturn nil, -1, -1, err\n\t}\n\n\tblobPath := is.BlobPath(repo, digest)\n\n\tis.RLock(&lockLatency)\n\tdefer is.RUnlock(&lockLatency)\n\n\tbinfo, err := os.Stat(blobPath)\n\tif err != nil {\n\t\tis.log.Debug().Err(err).Str(\"blob\", blobPath).Msg(\"failed to stat blob\")\n\n\t\treturn nil, -1, -1, zerr.ErrBlobNotFound\n\t}\n\n\tif to < 0 || to >= binfo.Size() {\n\t\tto = binfo.Size() - 1\n\t}\n\n\tblobReadCloser, err := newBlobStream(blobPath, from, to)\n\tif err != nil {\n\t\tis.log.Debug().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob\")\n\n\t\treturn nil, -1, -1, err\n\t}\n\n\t// The caller function is responsible for calling Close()\n\treturn blobReadCloser, to - from + 1, binfo.Size(), nil\n}", "func FromDataBlob(blob *serialization.DataBlob) ([]byte, string) {\n\tif blob == nil || len(blob.Data) == 0 {\n\t\treturn nil, \"\"\n\t}\n\treturn blob.Data, string(blob.Encoding)\n}", "func (r *Repository) PullBlob(digest string) (size int64, data io.ReadCloser, err error) {\n\treq, err := http.NewRequest(\"GET\", buildBlobURL(r.Endpoint.String(), r.Name, digest), nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresp, err := r.client.Do(req)\n\tif err != nil {\n\t\terr = parseError(err)\n\t\treturn\n\t}\n\n\tif resp.StatusCode == http.StatusOK {\n\t\tcontengLength := resp.Header.Get(http.CanonicalHeaderKey(\"Content-Length\"))\n\t\tsize, err = strconv.ParseInt(contengLength, 10, 64)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdata = resp.Body\n\t\treturn\n\t}\n\t// can not close the connect if the status code is 200\n\tdefer resp.Body.Close()\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = &commonhttp.Error{\n\t\tCode: resp.StatusCode,\n\t\tMessage: string(b),\n\t}\n\n\treturn\n}", "func (h *Handler) readOneBlob(ctx context.Context, getTime *timeReadWrite,\n\tserviceController controller.ServiceController,\n\tblob blobGetArgs, sortedVuids []sortedVuid, shards [][]byte) error {\n\tspan := trace.SpanFromContextSafe(ctx)\n\n\ttactic := blob.CodeMode.Tactic()\n\tsizes, err := ec.GetBufferSizes(int(blob.BlobSize), tactic)\n\tif err != nil {\n\t\treturn err\n\t}\n\tempties := emptyDataShardIndexes(sizes)\n\n\tdataN, dataParityN := tactic.N, tactic.N+tactic.M\n\tminShardsRead := dataN + h.MinReadShardsX\n\tif minShardsRead > len(sortedVuids) {\n\t\tminShardsRead = len(sortedVuids)\n\t}\n\tshardSize, shardOffset, shardReadSize := blob.ShardSize, blob.ShardOffset, blob.ShardReadSize\n\n\tstopChan := make(chan struct{})\n\tnextChan := make(chan struct{}, len(sortedVuids))\n\tshardPipe := func() <-chan shardData {\n\t\tch := make(chan shardData)\n\t\tgo func() {\n\t\t\twg := new(sync.WaitGroup)\n\t\t\tdefer func() {\n\t\t\t\twg.Wait()\n\t\t\t\tclose(ch)\n\t\t\t}()\n\n\t\t\tfor _, vuid := range sortedVuids[:minShardsRead] {\n\t\t\t\tif _, ok := empties[vuid.index]; !ok {\n\t\t\t\t\twg.Add(1)\n\t\t\t\t\tgo func(vuid sortedVuid) {\n\t\t\t\t\t\tch <- h.readOneShard(ctx, serviceController, blob, vuid, stopChan)\n\t\t\t\t\t\twg.Done()\n\t\t\t\t\t}(vuid)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, vuid := range sortedVuids[minShardsRead:] {\n\t\t\t\tif _, ok := empties[vuid.index]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tselect {\n\t\t\t\tcase <-stopChan:\n\t\t\t\t\treturn\n\t\t\t\tcase <-nextChan:\n\t\t\t\t}\n\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func(vuid sortedVuid) {\n\t\t\t\t\tch <- h.readOneShard(ctx, serviceController, blob, vuid, stopChan)\n\t\t\t\t\twg.Done()\n\t\t\t\t}(vuid)\n\t\t\t}\n\t\t}()\n\n\t\treturn ch\n\t}()\n\n\treceived := make(map[int]bool, minShardsRead)\n\tfor idx := range empties {\n\t\treceived[idx] = true\n\t\th.memPool.Zero(shards[idx])\n\t}\n\n\tstartRead := time.Now()\n\treconstructed := false\n\tfor shard := range shardPipe {\n\t\t// swap shard buffer\n\t\tif shard.status {\n\t\t\tbuf := shards[shard.index]\n\t\t\tshards[shard.index] = shard.buffer\n\t\t\th.memPool.Put(buf)\n\t\t}\n\n\t\treceived[shard.index] = shard.status\n\t\tif len(received) < dataN {\n\t\t\tcontinue\n\t\t}\n\n\t\t// bad data index\n\t\tbadIdx := make([]int, 0, 8)\n\t\tfor i := 0; i < dataN; i++ {\n\t\t\tif succ, ok := received[i]; !ok || !succ {\n\t\t\t\tbadIdx = append(badIdx, i)\n\t\t\t}\n\t\t}\n\t\tif len(badIdx) == 0 {\n\t\t\treconstructed = true\n\t\t\tclose(stopChan)\n\t\t\tbreak\n\t\t}\n\n\t\t// update bad parity index\n\t\tfor i := dataN; i < dataParityN; i++ {\n\t\t\tif succ, ok := received[i]; !ok || !succ {\n\t\t\t\tbadIdx = append(badIdx, i)\n\t\t\t}\n\t\t}\n\n\t\tbadShards := 0\n\t\tfor _, succ := range received {\n\t\t\tif !succ {\n\t\t\t\tbadShards++\n\t\t\t}\n\t\t}\n\t\t// it will not wait all the shards, cos has no enough shards to reconstruct\n\t\tif badShards > dataParityN-dataN {\n\t\t\tspan.Infof(\"%s bad(%d) has no enough to reconstruct\", blob.ID(), badShards)\n\t\t\tclose(stopChan)\n\t\t\tbreak\n\t\t}\n\n\t\t// has bad shards, but have enough shards to reconstruct\n\t\tif len(received) >= dataN+badShards {\n\t\t\tvar err error\n\t\t\tif shardReadSize < shardSize {\n\t\t\t\tspan.Debugf(\"bid(%d) ready to segment ec reconstruct data\", blob.Bid)\n\t\t\t\treportDownload(blob.Cid, \"EC\", \"segment\")\n\t\t\t\tsegments := make([][]byte, len(shards))\n\t\t\t\tfor idx := range shards {\n\t\t\t\t\tsegments[idx] = shards[idx][shardOffset : shardOffset+shardReadSize]\n\t\t\t\t}\n\t\t\t\terr = h.encoder[blob.CodeMode].ReconstructData(segments, badIdx)\n\t\t\t} else {\n\t\t\t\tspan.Debugf(\"bid(%d) ready to ec reconstruct data\", blob.Bid)\n\t\t\t\terr = h.encoder[blob.CodeMode].ReconstructData(shards, badIdx)\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\treconstructed = true\n\t\t\t\tclose(stopChan)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tspan.Errorf(\"%s ec reconstruct data error:%s\", blob.ID(), err.Error())\n\t\t}\n\n\t\tif len(received) >= len(sortedVuids) {\n\t\t\tclose(stopChan)\n\t\t\tbreak\n\t\t}\n\t\tnextChan <- struct{}{}\n\t}\n\tgetTime.IncR(time.Since(startRead))\n\n\t// release buffer of delayed shards\n\tgo func() {\n\t\tfor shard := range shardPipe {\n\t\t\tif shard.status {\n\t\t\t\th.memPool.Put(shard.buffer)\n\t\t\t}\n\t\t}\n\t}()\n\n\tif reconstructed {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"broken %s\", blob.ID())\n}", "func (s *AzureBlobStorage) ReadFile(ctx context.Context, name string) ([]byte, error) {\n\tclient := s.containerClient.NewBlockBlobClient(s.withPrefix(name))\n\tresp, err := client.DownloadStream(ctx, &blob.DownloadStreamOptions{\n\t\tCPKInfo: s.cpkInfo,\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Annotatef(err, \"Failed to download azure blob file, file info: bucket(container)='%s', key='%s'\", s.options.Bucket, s.withPrefix(name))\n\t}\n\tbody := resp.NewRetryReader(ctx, &blob.RetryReaderOptions{\n\t\tMaxRetries: azblobRetryTimes,\n\t})\n\tdata, err := io.ReadAll(body)\n\tif err != nil {\n\t\treturn nil, errors.Annotatef(err, \"Failed to read azure blob file, file info: bucket(container)='%s', key='%s'\", s.options.Bucket, s.withPrefix(name))\n\t}\n\treturn data, body.Close()\n}", "func (w *wrapped) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {\n\treturn nil, nil, fmt.Errorf(\"internal error: GetBlobAt is not supported by the %q transport\", w.Reference().Transport().Name())\n}", "func ReadBytes(ctx context.Context, filename string) ([]byte, error) {\n\tif strings.HasPrefix(filename, \"gs://\") {\n\t\treturn readGCSObject(ctx, filename)\n\t}\n\treturn ioutil.ReadFile(filename)\n}", "func (b *ByteArray) ReadSeek(offset int, whence int) (absolute int, err error) {\n\tswitch whence {\n\tcase SEEK_SET:\n\t\tabsolute = offset\n\tcase SEEK_CUR:\n\t\tabsolute = b.readPos.current + offset\n\tcase SEEK_END:\n\t\tabsolute = b.usedBytes - offset\n\t}\n\tif absolute < 0 {\n\t\tabsolute = 0\n\t\terr = io.EOF\n\t}\n\tif absolute > b.usedBytes {\n\t\tabsolute = b.usedBytes\n\t\terr = io.EOF\n\t}\n\tb.readPos = b.seek(b.readPos, absolute, SEEK_SET)\n\treturn b.readPos.current, err\n}", "func (mcm *MinioChunkManager) ReadAt(ctx context.Context, filePath string, off int64, length int64) ([]byte, error) {\n\tif off < 0 || length < 0 {\n\t\treturn nil, io.EOF\n\t}\n\n\topts := minio.GetObjectOptions{}\n\terr := opts.SetRange(off, off+length-1)\n\tif err != nil {\n\t\tlog.Warn(\"failed to set range\", zap.String(\"bucket\", mcm.bucketName), zap.String(\"path\", filePath), zap.Error(err))\n\t\treturn nil, err\n\t}\n\n\tobject, err := mcm.getMinioObject(ctx, mcm.bucketName, filePath, opts)\n\tif err != nil {\n\t\tlog.Warn(\"failed to get object\", zap.String(\"bucket\", mcm.bucketName), zap.String(\"path\", filePath), zap.Error(err))\n\t\treturn nil, err\n\t}\n\tdefer object.Close()\n\n\tdata, err := Read(object, length)\n\tif err != nil {\n\t\terrResponse := minio.ToErrorResponse(err)\n\t\tif errResponse.Code == \"NoSuchKey\" {\n\t\t\treturn nil, WrapErrNoSuchKey(filePath)\n\t\t}\n\t\tlog.Warn(\"failed to read object\", zap.String(\"bucket\", mcm.bucketName), zap.String(\"path\", filePath), zap.Error(err))\n\t\treturn nil, err\n\t}\n\tmetrics.PersistentDataKvSize.WithLabelValues(metrics.DataGetLabel).Observe(float64(length))\n\treturn data, nil\n}", "func (q *QQwry) readData(num int, offset ...int64) (rs []byte) {\n\tif len(offset) > 0 {\n\t\tq.setOffset(offset[0])\n\t}\n\tnums := int64(num)\n\tend := q.Offset + nums\n\tdataNum := int64(len(q.Data.Data))\n\tif q.Offset > dataNum {\n\t\treturn nil\n\t}\n\n\tif end > dataNum {\n\t\tend = dataNum\n\t}\n\trs = q.Data.Data[q.Offset:end]\n\tq.Offset = end\n\treturn\n}", "func (sto *unionStorage) ReceiveBlob(ctx context.Context, br blob.Ref, src io.Reader) (sb blob.SizedRef, err error) {\n\treturn blob.SizedRef{}, blobserver.ErrReadonly\n}", "func peekBlob(nd *Node) *Blob {\n\tb, _ := nd.PeekMemo(blobNodeKey{}).(*Blob)\n\treturn b\n}", "func DecodeBlob(b []byte, preAlloc ...int) (byte, [][]byte, error) {\n\tif len(b) == 0 {\n\t\treturn 0, nil, fmt.Errorf(\"zero length blob not allowed\")\n\t}\n\tver := b[0]\n\tb = b[1:]\n\tpushes, err := ExtractPushes(b, preAlloc...)\n\treturn ver, pushes, err\n}", "func (r *chunkReader) Read(data []byte) (int, error) {\n\tbytesToRead := len(data)\n\tr.l.Debug(\"Start cafs reader Read\", zap.Int(\"length\", bytesToRead))\n\n\tif r.lastChunk && r.rdr == nil {\n\t\treturn 0, io.EOF\n\t}\n\tfor {\n\t\tkey := r.keys[r.idx]\n\t\tif r.rdr == nil {\n\t\t\trdr, err := r.fs.Get(context.Background(), r.pather(key))\n\t\t\tif err != nil {\n\t\t\t\treturn r.readSoFar, err\n\t\t\t}\n\t\t\tr.rdr = rdr\n\t\t}\n\n\t\tn, errRead := r.rdr.Read(data[r.readSoFar:])\n\n\t\tdefer func() {\n\t\t\tif r.MetricsEnabled() && errRead == nil {\n\t\t\t\tr.m.Volume.Blobs.IncBlob(\"read\")\n\t\t\t\tr.m.Volume.Blobs.Size(int64(n), \"read\")\n\t\t\t}\n\t\t\tr.l.Debug(\"End cafs reader Read\", zap.Int(\"length\", bytesToRead))\n\t\t}()\n\n\t\tr.currLeaf = append(r.currLeaf, data[r.readSoFar:r.readSoFar+n]...)\n\t\tif errRead != nil {\n\t\t\tr.rdr.Close() // TODO(fred): nice - why are we ignoring errors here?\n\t\t\tr.readSoFar += n\n\t\t\tif errRead == io.EOF { // we reached the end of the stream for this key\n\t\t\t\tr.idx++\n\t\t\t\tr.rdr = nil\n\t\t\t\tr.lastChunk = r.idx == len(r.keys)\n\t\t\t\tif r.withVerifyHash {\n\t\t\t\t\tnodeOffset := r.idx\n\t\t\t\t\tisLastNode := false\n\n\t\t\t\t\t// NOTE: we follow the checksumming scheme adopted by the writer.\n\t\t\t\t\t// The writer behaves in a way a bit unexpected here: not only offets don't start at zero\n\t\t\t\t\t// as one might expect, but the last node is not flagged as the last one\n\t\t\t\t\t// when the content size is aligned with the leaf size.\n\t\t\t\t\tif r.lastChunk && uint32(len(r.currLeaf)) != r.leafSize {\n\t\t\t\t\t\tnodeOffset--\n\t\t\t\t\t\tisLastNode = true\n\t\t\t\t\t}\n\t\t\t\t\tr.l.Debug(\"cafs reader Read: hash verification\", zap.Stringer(\"key\", key))\n\t\t\t\t\tif err := r.verifyHash(key, r.currLeaf, nodeOffset, isLastNode); err != nil {\n\t\t\t\t\t\treturn 0, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif r.lastChunk { // this was the last chunk, so also EOF for this hash\n\t\t\t\t\tif n == bytesToRead {\n\t\t\t\t\t\treturn n, nil\n\t\t\t\t\t}\n\t\t\t\t\treturn r.readSoFar, io.EOF\n\t\t\t\t}\n\t\t\t\t// move on to the next key\n\t\t\t\tr.currLeaf = make([]byte, 0)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn n, errRead\n\t\t}\n\t\t// we filled up the entire byte slice but still have data remaining in the reader,\n\t\t// we should move on to receive the next buffer\n\t\tr.readSoFar += n\n\t\tif r.readSoFar >= bytesToRead {\n\t\t\tr.readSoFar = 0\n\t\t\t// return without error\n\t\t\treturn bytesToRead, nil\n\t\t}\n\t}\n}", "func parseBinaryBlob(blob []byte) (*Digest, error) {\n\t// Read a single item from the binary stream. Timeout after\n\t// five seconds because it shouldn't take that long to parse\n\t// a blob that is already loaded into memory.\n\tch := parseBinaryStream(bytes.NewReader(blob))\n\tselect {\n\tcase out := <-ch:\n\t\treturn out.Digest, out.Error\n\tcase <-time.After(5 * time.Second):\n\t\treturn nil, errors.New(\"timed out waiting for parser to finish\")\n\t}\n}", "func (fs *FileStream) ReadAt(b []byte, offset int64) (int, error) {\n\treturn fs.f.ReadAt(b, offset)\n}", "func (sto *overlayStorage) ReceiveBlob(ctx context.Context, br blob.Ref, src io.Reader) (sb blob.SizedRef, err error) {\n\tsb, err = sto.upper.ReceiveBlob(ctx, br, src)\n\tif err == nil && sto.deleted != nil {\n\t\terr = sto.deleted.Delete(br.String())\n\t}\n\treturn sb, err\n}", "func GetBlob(blobSum string, digest string) *Blob {\n\n\tb := new(Blob)\n\tb.ID = digest\n\n\tif !b.IsExist() {\n\t\tlogger.Errorf(\"blob of %s not exist\\n\", digest)\n\t\treturn nil\n\t}\n\n\tfd, err := os.Open(b.FilePath())\n\tif err != nil {\n\t\tlogger.Errorf(\"open file of %s error\\n\", b.FilePath())\n\t\treturn nil\n\t}\n\n\tdefer fd.Close()\n\n\tdata, err := ioutil.ReadAll(fd)\n\tif err != nil {\n\t\tlogger.Errorf(\"read file from %s error\\n\", b.FilePath())\n\t\treturn nil\n\t}\n\n\tb.Data = data\n\tb.Size = utils.GetFileSize(b.FilePath())\n\tb.RefCount = b.GetRefCount()\n\n\treturn b\n}", "func (w *blobWriter) ReadFrom(r io.Reader) (n int64, err error) {\n\treturn w.BlobWriter.ReadFrom(r)\n}", "func (g *deltaGenerator) readSourceData(source *sourceInfo, offset int64, size int64) ([]byte, error) {\n\t_, err := g.analysis.sourceData.Seek(int64(source.offset+offset), 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbuf := make([]byte, size)\n\t_, err = io.ReadFull(g.analysis.sourceData, buf)\n\treturn buf, err\n}", "func ReadFromManagedReader(oid uint64, data []byte) (int, int) {\n\tri, ok := GetManagedObject(oid)\n\tif !ok {\n\t\tpanic(\"failed to get reader\")\n\t}\n\tr := ri.(io.Reader)\n\tn, err := r.Read(data)\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn 0, 0\n\t\t}\n\t\tplog.Errorf(\"got err %+v when reading from the snapshot reader\", err)\n\t\treturn -1, getErrnoFromError(err)\n\t}\n\treturn n, 0\n}", "func (i *DataIndex) getBlob(hash string, fpath string) error {\n\n\t// disallow empty paths\n\tif len(fpath) == 0 {\n\t\treturn fmt.Errorf(\"get blob %.7s - error: no path supplied\", hash)\n\t}\n\n\tfpath = path.Clean(fpath)\n\n\tpErr(\"get blob %.7s %s\\n\", hash, fpath)\n\tw, err := createFile(fpath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer w.Close()\n\n\treturn i.copyBlob(hash, w)\n}", "func (c *Client) Read(path gfs.Path, offset gfs.Offset, data []byte) (n int, err error) {\n\tvar f gfs.GetFileInfoReply\n\terr = util.Call(c.master, \"Master.RPCGetFileInfo\", gfs.GetFileInfoArg{path}, &f)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tif int64(offset/gfs.MaxChunkSize) > f.Chunks {\n\t\treturn -1, fmt.Errorf(\"read offset exceeds file size\")\n\t}\n\n\tpos := 0\n\tfor pos < len(data) {\n\t\tindex := gfs.ChunkIndex(offset / gfs.MaxChunkSize)\n\t\tchunkOffset := offset % gfs.MaxChunkSize\n\n\t\tif int64(index) >= f.Chunks {\n\t\t\terr = gfs.Error{gfs.ReadEOF, \"EOF over chunks\"}\n\t\t\tbreak\n\t\t}\n\n\t\tvar handle gfs.ChunkHandle\n\t\thandle, err = c.GetChunkHandle(path, index)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tvar n int\n\t\t//wait := time.NewTimer(gfs.ClientTryTimeout)\n\t\t//loop:\n\t\tfor {\n\t\t\t//select {\n\t\t\t//case <-wait.C:\n\t\t\t// err = gfs.Error{gfs.Timeout, \"Read Timeout\"}\n\t\t\t// break loop\n\t\t\t//default:\n\t\t\t//}\n\t\t\tn, err = c.ReadChunk(handle, chunkOffset, data[pos:])\n\t\t\tif err == nil || err.(gfs.Error).Code == gfs.ReadEOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlog.Warning(\"Read \", handle, \" connection error, try again: \", err)\n\t\t}\n\n\t\toffset += gfs.Offset(n)\n\t\tpos += n\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err != nil && err.(gfs.Error).Code == gfs.ReadEOF {\n\t\treturn pos, io.EOF\n\t} else {\n\t\treturn pos, err\n\t}\n}", "func TestReadByByte(t *testing.T) {\n\trun.skipIfNoFUSE(t)\n\n\tvar data = []byte(\"hellohello\")\n\trun.createFile(t, \"testfile\", string(data))\n\trun.checkDir(t, \"testfile 10\")\n\n\tfor i := 0; i < len(data); i++ {\n\t\tfd, err := os.Open(run.path(\"testfile\"))\n\t\tassert.NoError(t, err)\n\t\tfor j := 0; j < i; j++ {\n\t\t\tbuf := make([]byte, 1)\n\t\t\tn, err := io.ReadFull(fd, buf)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, 1, n)\n\t\t\tassert.Equal(t, buf[0], data[j])\n\t\t}\n\t\terr = fd.Close()\n\t\tassert.NoError(t, err)\n\t}\n\n\trun.rm(t, \"testfile\")\n}", "func (f *FileBlob) ReadCloser() (io.ReadCloser, error) {\n\tif f.blob == nil {\n\t\treturn nil, fmt.Errorf(\"underlying blob ([]byte) is nil\")\n\t}\n\treturn blob.NewBufferedReadCloser(f.blob), nil\n}", "func (r *bytesReader) ReadAt(b []byte, offset int64) (n int, err error) {\n\tif offset < 0 {\n\t\treturn 0, errors.New(\"buffer.bytesReader.ReadAt: negative offset\")\n\t}\n\tif offset >= int64(len(r.bs)) {\n\t\treturn 0, io.EOF\n\t}\n\tn = copy(b, r.bs[offset:])\n\tif n < len(b) {\n\t\terr = io.EOF\n\t}\n\treturn\n}", "func (c *Client) GetObjectData(ctx context.Context, obj string) ([]byte, error) {\n\tr, err := c.gcsClient.Bucket(c.bucket).Object(obj).NewReader(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GetObjectData: failed to create reader for object %q in bucket %q: %q\", obj, c.bucket, err)\n\t}\n\tdefer r.Close()\n\n\treturn io.ReadAll(r)\n}", "func (bsf *bucketstorefile) ReadAt(p []byte, off int64) (n int, err error) {\n\tbsf.apply(func() {\n\t\tatomic.AddInt64(&bsf.stats.Reads, 1)\n\t\tn, err = bsf.file.ReadAt(p, off)\n\t\tif err != nil {\n\t\t\tatomic.AddInt64(&bsf.stats.ReadErrors, 1)\n\t\t}\n\t\tatomic.AddInt64(&bsf.stats.ReadBytes, int64(n))\n\t})\n\treturn n, err\n}", "func (s *Snappy) DecompressBlob(ctx context.Context, data []byte) ([]byte, error) {\n\treturn gs.Decode(nil, data)\n}", "func newDataFileReaderBytes(buf []byte, datumReader DatumReader) (reader *DataFileReader, err error) {\n\tif len(buf) < len(magic) || !bytes.Equal(magic, buf[0:4]) {\n\t\treturn nil, NotAvroFile\n\t}\n\n\tdec := NewBinaryDecoder(buf)\n\tblockDecoder := NewBinaryDecoder(nil)\n\treader = &DataFileReader{\n\t\tdata: buf,\n\t\tdec: dec,\n\t\tblockDecoder: blockDecoder,\n\t\tdatum: datumReader,\n\t}\n\n\tif reader.header, err = readObjFileHeader(dec); err != nil {\n\t\treturn nil, err\n\t}\n\n\tschema, err := ParseSchema(string(reader.header.Meta[schemaKey]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treader.datum.SetSchema(schema)\n\treader.block = &DataBlock{}\n\n\tif reader.hasNextBlock() {\n\t\tif err := reader.NextBlock(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn reader, nil\n}", "func (self *Blob) FileReader() (reader io.ReadCloser, filename, contentType string, err error) {\n\treturn Config.Backend.FileReader(self.FileID.Get())\n}", "func (b *Block) ReadAt(off int) ([]byte, error) {\n\tif off > blockSize || off < 0 {\n\t\treturn []byte{}, ErrOffsetRange\n\t}\n\n\treturn b.data[off:], nil\n}", "func ReadBlockData(chain uint64, key []byte) []byte {\n\tstream, _ := runtime.DbGet(dbBlockData{}, chain, key)\n\treturn stream\n}", "func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {\n\tif info.Digest == image.GzippedEmptyLayerDigest {\n\t\treturn ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil\n\t}\n\trc, n, _, err = s.getBlobAndLayerID(info)\n\treturn rc, n, err\n}", "func (d *DriveDB) readChunk(f *File, chunk, filesize int64) ([]byte, error) {\n\tif chunk*(*driveCacheChunk) > filesize {\n\t\treturn nil, fmt.Errorf(\"read past eof\")\n\t}\n\tkey := cacheMapKey(f.Id, chunk)\n\tv, err := d.sf.Do(string(key), func() (interface{}, error) {\n\t\treturn d.readChunkImpl(f, chunk, filesize)\n\t})\n\treturn v.([]byte), err\n}", "func (d *demo) readFile(fileName string, data *[]byte) {\r\n\t// ファイルを開く\r\n\trc, err := d.bucket.Object(fileName).NewReader(d.ctx)\r\n\tif err != nil {\r\n\t\td.errorf(\"readFile: unable to open file from bucket %q, file %q: %v\", d.bucketName, fileName, err)\r\n\t\treturn\r\n\t}\r\n\tdefer rc.Close()\r\n\r\n\t// データを読み込む\r\n\tslurp, err := ioutil.ReadAll(rc)\r\n\tif err != nil {\r\n\t\td.errorf(\"readFile: unable to read data from bucket %q, file %q: %v\", d.bucketName, fileName, err)\r\n\t\treturn\r\n\t}\r\n\r\n\t*data = slurp\r\n}", "func readMetadata(r io.ReaderAt, firstBlock int64, initialBlockOffset uint32, byteOffset uint16, size int) ([]byte, error) {\n\tvar (\n\t\tb []byte\n\t\tblockOffset = int(initialBlockOffset)\n\t)\n\t// we know how many blocks, so read them all in\n\tread, m, err := readMetadataBlock(r, firstBlock+int64(blockOffset))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb = append(b, m[byteOffset:]...)\n\t// do we have any more to read?\n\tfor len(b) < size {\n\t\tblockOffset += read\n\t\tread, m, err = readMetadataBlock(r, firstBlock+int64(blockOffset))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tb = append(b, m...)\n\t}\n\tif len(b) >= size {\n\t\tb = b[:size]\n\t}\n\treturn b, nil\n}", "func (s *SeekerWrapper) ReadAt(p []byte, offset int64) (int, error) { return s.s.ReadAt(p, offset) }", "func (is *ImageStoreLocal) GetBlob(repo string, digest godigest.Digest, mediaType string,\n) (io.ReadCloser, int64, error) {\n\tvar lockLatency time.Time\n\n\tif err := digest.Validate(); err != nil {\n\t\treturn nil, -1, err\n\t}\n\n\tblobPath := is.BlobPath(repo, digest)\n\n\tis.RLock(&lockLatency)\n\tdefer is.RUnlock(&lockLatency)\n\n\tbinfo, err := os.Stat(blobPath)\n\tif err != nil {\n\t\tis.log.Debug().Err(err).Str(\"blob\", blobPath).Msg(\"failed to stat blob\")\n\n\t\treturn nil, -1, zerr.ErrBlobNotFound\n\t}\n\n\tblobReadCloser, err := os.Open(blobPath)\n\tif err != nil {\n\t\tis.log.Debug().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob\")\n\n\t\treturn nil, -1, err\n\t}\n\n\t// The caller function is responsible for calling Close()\n\treturn blobReadCloser, binfo.Size(), nil\n}", "func ReadDataAt(r io.ReaderAt, n uint64, off int64) ([]byte, error) {\n\tif int64(n) < 0 || n != uint64(int(n)) {\n\t\t// n is too large to fit in int, so we can't allocate\n\t\t// a buffer large enough. Treat this as a read failure.\n\t\treturn nil, io.ErrUnexpectedEOF\n\t}\n\n\tif n < chunk {\n\t\tbuf := make([]byte, n)\n\t\t_, err := r.ReadAt(buf, off)\n\t\tif err != nil {\n\t\t\t// io.SectionReader can return EOF for n == 0,\n\t\t\t// but for our purposes that is a success.\n\t\t\tif err != io.EOF || n > 0 {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\treturn buf, nil\n\t}\n\n\tvar buf []byte\n\tbuf1 := make([]byte, chunk)\n\tfor n > 0 {\n\t\tnext := n\n\t\tif next > chunk {\n\t\t\tnext = chunk\n\t\t}\n\t\t_, err := r.ReadAt(buf1[:next], off)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf = append(buf, buf1[:next]...)\n\t\tn -= next\n\t\toff += int64(next)\n\t}\n\treturn buf, nil\n}", "func (b *Blob) Bytes() ([]byte, error) {\n\tdone := make(chan struct{})\n\tfr := js.Global.Get(\"FileReader\").New()\n\tfr.Set(\"onloadend\", func() { close(done) })\n\tfr.Call(\"readAsArrayBuffer\", b)\n\t<-done // Wait for the read to finish\n\tif err := fr.Get(\"error\"); err != nil {\n\t\treturn nil, &js.Error{err}\n\t}\n\treturn js.Global.Get(\"Uint8Array\").New(fr.Get(\"result\")).Interface().([]uint8), nil\n}", "func (a *file_asset) ReadAt(b []byte, off int64) (n int, err error) {\n\treturn a.f.ReadAt(b, off)\n}", "func ReadBytes(buffer []byte, offset int, size int) []byte {\n return buffer[offset:offset + size]\n}", "func (rc *RegClient) BlobHead(ctx context.Context, r ref.Ref, d types.Descriptor) (blob.Reader, error) {\n\tschemeAPI, err := rc.schemeGet(r.Scheme)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn schemeAPI.BlobHead(ctx, r, d)\n}", "func (c *Client) ReadCheckpoint(ctx context.Context) ([]byte, error) {\n\tbkt := c.gcsClient.Bucket(c.bucket)\n\tobj := bkt.Object(layout.CheckpointPath)\n\n\tr, err := obj.NewReader(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Close()\n\n\treturn io.ReadAll(r)\n}", "func (file *Remote) ReadAt(buf []byte, off int64) (int, error) {\n\tsize := len(buf)\n\tif size > file.maxBufSize() {\n\t\tsize = file.maxBufSize()\n\t}\n\n\tvar total int\n\tfor start := 0; start < len(buf); start += size {\n\t\tend := start + size\n\t\tif end > len(buf) {\n\t\t\tend = len(buf)\n\t\t}\n\n\t\tn, err := file.readPart(buf[start:end], off+int64(start))\n\t\ttotal += n\n\t\tif err != nil {\n\t\t\treturn total, err\n\t\t}\n\t}\n\treturn total, nil\n}", "func readFileToByteArray(p string) []byte {\n\tb, err := ioutil.ReadFile(p)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error reading file: #%v \", err)\n\t}\n\treturn b\n}", "func readFileToByteArray(p string) []byte {\n\tb, err := ioutil.ReadFile(p)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error reading file: #%v \", err)\n\t}\n\treturn b\n}", "func (rc *RegClient) BlobGet(ctx context.Context, r ref.Ref, d types.Descriptor) (blob.Reader, error) {\n\tdata, err := d.GetData()\n\tif err == nil {\n\t\treturn blob.NewReader(blob.WithDesc(d), blob.WithRef(r), blob.WithReader(bytes.NewReader(data))), nil\n\t}\n\tschemeAPI, err := rc.schemeGet(r.Scheme)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn schemeAPI.BlobGet(ctx, r, d)\n}", "func downloadBlob(blobClient *storage.BlobStorageClient, containerName, blobName, fileName string) error {\n\tfmt.Printf(\"Download blob data into '%v'...\\n\", fileName)\n\tif _, err := os.Stat(fileName); err == nil {\n\t\treturn fmt.Errorf(\"File '%v' already exists\", fileName)\n\t}\n\treadCloser, err := blobClient.GetBlob(containerName, blobName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbytesRead, err := ioutil.ReadAll(readCloser)\n\tdefer readCloser.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(fileName, bytesRead, 0666)\n}", "func (r *objReader) readData() Data {\n\tn := r.readInt()\n\td := Data{Offset: r.offset, Size: int64(n)}\n\tr.skip(int64(n))\n\treturn d\n}", "func ReadFromContainer(ip, containerName, volPath, fileName string) (string, error) {\n\tlog.Printf(\"Reading from file [%s] in container [%s] on VM [%s]\\n\", fileName, containerName, ip)\n\n\treadCmd := \" /bin/sh -c 'cat \" + volPath + \"/\" + fileName + \"'\"\n\tfullCmd := dockercli.RunCmdInContainer + containerName + readCmd\n\n\tlog.Println(fullCmd)\n\treturn ssh.InvokeCommand(ip, fullCmd)\n}", "func (s *AzureBlobStorage) Open(ctx context.Context, name string) (ExternalFileReader, error) {\n\tclient := s.containerClient.NewBlockBlobClient(s.withPrefix(name))\n\tresp, err := client.GetProperties(ctx, nil)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"Failed to get properties from the azure blob\")\n\t}\n\n\treturn &azblobObjectReader{\n\t\tblobClient: client,\n\n\t\tpos: 0,\n\t\ttotalSize: *resp.ContentLength,\n\n\t\tctx: ctx,\n\n\t\tcpkInfo: s.cpkInfo,\n\t}, nil\n}", "func (reader *embedFileReader) ReadAt(p []byte, off int64) (int, error) {\n\treturn 0, ErrNotImplemented\n}", "func (c *Conn) ReadDataUnit() (data []byte, err error) {\n\tvar s uint32\n\terr = binary.Read(c.Conn, binary.BigEndian, &s)\n\tif err != nil {\n\t\treturn\n\t}\n\tdata = make([]byte, s)\n\tn, err := c.Conn.Read(data)\n\tif err != nil {\n\t\treturn\n\t}\n\tif 4+n != int(s) {\n\t\treturn data, io.ErrNoProgress\n\t}\n\treturn data, nil\n}", "func BlobTime(v interface{}) (t time.Time) {\n\tvar (\n\t\tf *os.File\n\t\tfn string\n\t\terr error\n\t\tok bool\n\t)\n\tif fn, ok = v.(string); ok {\n\t\tif f, err = os.Open(fn); err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer f.Close()\n\t} else if f, ok = v.(*os.File); !ok {\n\t\treturn\n\t}\n\tf.Seek(BlobTimeOff, os.SEEK_SET)\n\t(NBOReader{f}).ReadNBO(&t)\n\treturn\n}", "func BlobTime(v interface{}) (t time.Time) {\n\tvar (\n\t\tf *os.File\n\t\tfn string\n\t\terr error\n\t\tok bool\n\t)\n\tif fn, ok = v.(string); ok {\n\t\tif f, err = os.Open(fn); err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer f.Close()\n\t} else if f, ok = v.(*os.File); !ok {\n\t\treturn\n\t}\n\tf.Seek(BlobTimeOff, os.SEEK_SET)\n\t(NBOReader{f}).ReadNBO(&t)\n\treturn\n}", "func ReadByte(buffer []byte, offset int) byte {\n return buffer[offset]\n}", "func (b *Blob) Reader() (core.ObjectReader, error) {\n\treturn b.obj.Reader()\n}", "func (c *poolConn) ReadPart() ([]byte, error) {\n\tfor {\n\t\tfor i := c.buffer.index; i < c.buffer.size; i++ {\n\t\t\tif c.buffer.realBuffer[i-1] == '\\r' && c.buffer.realBuffer[i] == '\\n' {\n\t\t\t\tindex := c.buffer.index\n\t\t\t\tc.buffer.index = i + 1\n\t\t\t\tif c.buffer.index >= c.buffer.size {\n\t\t\t\t\tc.mustRead = true\n\t\t\t\t}\n\t\t\t\treturn c.buffer.realBuffer[index: i-1], nil\n\t\t\t}\n\t\t}\n\t\terr := c.ReadUnsafeBuffer()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n}", "func getBlob(tx *sql.Tx, digest string) (*Blob, error) {\n\tvar b *Blob\n\trows, err := tx.Query(\"SELECT * from blobinfo WHERE digest == $1\", digest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor rows.Next() {\n\t\tb = &Blob{}\n\t\tif err := blobRowScan(rows, b); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// No more than one row for digest must exist.\n\t\tbreak\n\t}\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b, err\n}", "func (bio *BinaryIO) ReadData(off int64, data interface{}) {\n\tsr := NewSequentialReader(bio, off)\n\tsr.ReadData(data)\n}", "func GetBlobContent(ctx context.Context, bucketURL, key string) ([]byte, error) {\n\tbucket, err := blob.OpenBucket(ctx, bucketURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error from blob.OpenBucket: %w\", err)\n\t}\n\tdefer bucket.Close()\n\n\tkeyData, err := bucket.ReadAll(ctx, key)\n\tif err != nil {\n\t\treturn keyData, fmt.Errorf(\"error during bucket.ReadAll: %w\", err)\n\t}\n\treturn keyData, nil\n}", "func (fs *FileSystem) loadData(ino uint64) ([]byte, error) {\n\tif ino == 0 {\n\t\treturn nil, errNilIno\n\t}\n\n\tvar data []byte\n\terr := fs.db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"data\"))\n\t\td := b.Get(i2b(ino))\n\t\tif d == nil {\n\t\t\td = []byte{}\n\t\t}\n\t\tdata = make([]byte, len(d))\n\t\tcopy(data, d)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn data, err\n\t}\n\n\treturn data, err\n}", "func readLocalFileExtent(filename string, ofs int64, len int) ([]byte, error) {\n\tfReader, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer fReader.Close()\n\n\tbuf := make([]byte, len)\n\trecvLen, err := fReader.ReadAt(buf, ofs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif recvLen != len {\n\t\tpanic(fmt.Sprintf(\"short read, got %d bytes instead of %d\",\n\t\t\trecvLen, len))\n\t}\n\treturn buf, nil\n}", "func (h *proxyHandler) GetBlob(args []any) (replyBuf, error) {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\n\tvar ret replyBuf\n\n\tif h.sysctx == nil {\n\t\treturn ret, fmt.Errorf(\"client error: must invoke Initialize\")\n\t}\n\tif len(args) != 3 {\n\t\treturn ret, fmt.Errorf(\"found %d args, expecting (imgid, digest, size)\", len(args))\n\t}\n\timgref, err := h.parseImageFromID(args[0])\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tdigestStr, ok := args[1].(string)\n\tif !ok {\n\t\treturn ret, fmt.Errorf(\"expecting string blobid\")\n\t}\n\tsize, err := parseUint64(args[2])\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tctx := context.TODO()\n\td, err := digest.Parse(digestStr)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tblobr, blobSize, err := imgref.src.GetBlob(ctx, types.BlobInfo{Digest: d, Size: int64(size)}, h.cache)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tpiper, f, err := h.allocPipe()\n\tif err != nil {\n\t\tblobr.Close()\n\t\treturn ret, err\n\t}\n\tgo func() {\n\t\t// Signal completion when we return\n\t\tdefer blobr.Close()\n\t\tdefer f.wg.Done()\n\t\tverifier := d.Verifier()\n\t\ttr := io.TeeReader(blobr, verifier)\n\t\tn, err := io.Copy(f.w, tr)\n\t\tif err != nil {\n\t\t\tf.err = err\n\t\t\treturn\n\t\t}\n\t\tif n != int64(size) {\n\t\t\tf.err = fmt.Errorf(\"expected %d bytes in blob, got %d\", size, n)\n\t\t}\n\t\tif !verifier.Verified() {\n\t\t\tf.err = fmt.Errorf(\"corrupted blob, expecting %s\", d.String())\n\t\t}\n\t}()\n\n\tret.value = blobSize\n\tret.fd = piper\n\tret.pipeid = uint32(f.w.Fd())\n\treturn ret, nil\n}", "func (b *BitsImageManager) GetBlob(name string, digest string) io.ReadCloser {\n\tif digest == b.rootfsDigest {\n\t\tr, e := b.rootFSBlobstore.Get(\"assets/eirinifs.tar\")\n\t\tutil.PanicOnError(errors.WithStack(e))\n\t\treturn r\n\t}\n\n\tr, e := b.digestLookupStore.Get(digest)\n\tif _, notFound := e.(*bitsgo.NotFoundError); notFound {\n\t\treturn nil\n\t}\n\n\tutil.PanicOnError(errors.WithStack(e))\n\treturn r\n}", "func (mcm *MinioChunkManager) Read(ctx context.Context, filePath string) ([]byte, error) {\n\tobject, err := mcm.getMinioObject(ctx, mcm.bucketName, filePath, minio.GetObjectOptions{})\n\tif err != nil {\n\t\tlog.Warn(\"failed to get object\", zap.String(\"bucket\", mcm.bucketName), zap.String(\"path\", filePath), zap.Error(err))\n\t\treturn nil, err\n\t}\n\tdefer object.Close()\n\n\t// Prefetch object data\n\tvar empty []byte\n\t_, err = object.Read(empty)\n\tif err != nil {\n\t\terrResponse := minio.ToErrorResponse(err)\n\t\tif errResponse.Code == \"NoSuchKey\" {\n\t\t\treturn nil, WrapErrNoSuchKey(filePath)\n\t\t}\n\t\tlog.Warn(\"failed to read object\", zap.String(\"path\", filePath), zap.Error(err))\n\t\treturn nil, err\n\t}\n\n\tobjectInfo, err := object.Stat()\n\tif err != nil {\n\t\tlog.Warn(\"failed to stat object\", zap.String(\"bucket\", mcm.bucketName), zap.String(\"path\", filePath), zap.Error(err))\n\t\terrResponse := minio.ToErrorResponse(err)\n\t\tif errResponse.Code == \"NoSuchKey\" {\n\t\t\treturn nil, WrapErrNoSuchKey(filePath)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tdata, err := Read(object, objectInfo.Size)\n\tif err != nil {\n\t\terrResponse := minio.ToErrorResponse(err)\n\t\tif errResponse.Code == \"NoSuchKey\" {\n\t\t\treturn nil, WrapErrNoSuchKey(filePath)\n\t\t}\n\t\tlog.Warn(\"failed to read object\", zap.String(\"bucket\", mcm.bucketName), zap.String(\"path\", filePath), zap.Error(err))\n\t\treturn nil, err\n\t}\n\tmetrics.PersistentDataKvSize.WithLabelValues(metrics.DataGetLabel).Observe(float64(objectInfo.Size))\n\treturn data, nil\n}", "func fetchData(r reader) error {\n\tfmt.Printf(\"type %T\\n\", r)\n\tdata := make([]byte, 50)\n\tlen, err := r.read(data)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(string(data[:len]))\n\treturn nil\n\n}", "func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) {\n\tif info.Digest == \"\" {\n\t\treturn nil, errors.Errorf(`no digest supplied when reading blob`)\n\t}\n\tif err := info.Digest.Validate(); err != nil {\n\t\treturn nil, errors.Wrapf(err, `invalid digest supplied when reading blob`)\n\t}\n\t// Assume it's a file, since we're only calling this from a place that expects to read files.\n\tif filename, ok := s.filenames[info.Digest]; ok {\n\t\tcontents, err2 := ioutil.ReadFile(filename)\n\t\tif err2 != nil {\n\t\t\treturn nil, errors.Wrapf(err2, `error reading blob from file %q`, filename)\n\t\t}\n\t\treturn contents, nil\n\t}\n\t// If it's not a file, it's a bug, because we're not expecting to be asked for a layer.\n\treturn nil, errors.New(\"blob not found\")\n}", "func recoveredDataOffset(chunkFetchOffset uint64, rs modules.ErasureCoder) uint64 {\n\t// If partialDecoding is not available we downloaded the whole sector and\n\t// recovered the whole chunk which means the offset and length are actually\n\t// equal to the chunkFetchOffset and chunkFetchLength.\n\tif !rs.SupportsPartialEncoding() {\n\t\treturn chunkFetchOffset\n\t}\n\t// Else we need to adjust the offset a bit.\n\trecoveredSegmentSize := uint64(rs.MinPieces() * crypto.SegmentSize)\n\treturn chunkFetchOffset % recoveredSegmentSize\n}", "func (rh *readHandle) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {\n\trh.g.add(ctx, Event{\n\t\tOp: ReadOp,\n\t\tFileNum: rh.fileNum,\n\t\tHandleID: rh.handleID,\n\t\tOffset: off,\n\t\tSize: int64(len(p)),\n\t})\n\treturn rh.rh.ReadAt(ctx, p, off)\n}", "func alignReads(offsetInFile int64, readLen int64) []alignedBlobRead {\n\tblobIdx, offsetInBlob := stoclient.BlobIdxFromOffset(offsetInFile)\n\n\t// simplest, general case\n\tif offsetInBlob+readLen <= stotypes.BlobSize {\n\t\treturn []alignedBlobRead{\n\t\t\t{blobIdx: blobIdx, offsetInBlob: int(offsetInBlob), lenInBlob: readLen},\n\t\t}\n\t}\n\n\tfirstRead := alignedBlobRead{blobIdx: blobIdx, offsetInBlob: int(offsetInBlob), lenInBlob: stotypes.BlobSize - offsetInBlob}\n\treadLen -= firstRead.lenInBlob\n\n\tadditionalReads := []alignedBlobRead{}\n\n\tfor readLen > 0 {\n\t\tblobIdx++\n\t\treadLenForBlob := min(readLen, stotypes.BlobSize)\n\t\tadditionalReads = append(additionalReads, alignedBlobRead{blobIdx: blobIdx, offsetInBlob: 0, lenInBlob: readLenForBlob})\n\n\t\treadLen -= readLenForBlob\n\t}\n\n\treturn append([]alignedBlobRead{firstRead}, additionalReads...)\n}", "func (s *Service) GetBlob(c context.Context, req *blobpb.GetBlobRequest) (*blobpb.GetBlobResponse, error) {\n\tbr, err := s.getBlobRecord(c, id.BlobID(req.Id))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr, err := s.Storage.Get(c, br.Path)\n\tif r != nil {\n\t\tdefer r.Close()\n\t}\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Aborted, \"cannnot get storage: %v\", err)\n\t}\n\n\tb, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Aborted, \"cannot read from response: %v\", err)\n\t}\n\n\treturn &blobpb.GetBlobResponse{\n\t\tData: b,\n\t}, nil\n}", "func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {\n\tbaseUrl := d.getBaseUrl(path)\n\n\tinfo, err := d.Bucket.Stat(ctx, path)\n\tif err != nil {\n\t\treturn nil, err;\n\t}\n\n\tif offset > info.Fsize {\n\t\treturn ioutil.NopCloser(bytes.NewReader(nil)), nil\n\t}\n\n\thttpClient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", baseUrl, nil)\n\treq.Header.Add(\"Range\", \"bytes=\"+strconv.FormatInt(offset, 10)+\"-\")\n\tresp, err := httpClient.Do(req)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc,_ := ioutil.ReadAll(resp.Body)\n\n\tfmt.Print(\"content\"+string(c)+\"\\n\")\n\n\treturn resp.Body,err\n}", "func (tf *Temp) ReadAt(buffer []byte, off int64) (int, error) {\n\ttf.Lock()\n\tdefer tf.Unlock()\n\n\tread, err := tf.file.ReadAt(buffer, off)\n\treturn read, ex.New(err)\n}", "func (is *ObjectStorage) GetBlobContent(repo string, digest godigest.Digest) ([]byte, error) {\n\tif err := digest.Validate(); err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\tblobPath := is.BlobPath(repo, digest)\n\n\tbinfo, err := is.store.Stat(context.Background(), blobPath)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to stat blob\")\n\n\t\treturn []byte{}, zerr.ErrBlobNotFound\n\t}\n\n\tblobBuf, err := is.store.GetContent(context.Background(), blobPath)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob\")\n\n\t\treturn nil, err\n\t}\n\n\t// is a 'deduped' blob?\n\tif binfo.Size() == 0 {\n\t\t// Check blobs in cache\n\t\tdstRecord, err := is.checkCacheBlob(digest)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"digest\", digest.String()).Msg(\"cache: not found\")\n\n\t\t\treturn nil, zerr.ErrBlobNotFound\n\t\t}\n\n\t\tblobBuf, err := is.store.GetContent(context.Background(), dstRecord)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"blob\", dstRecord).Msg(\"failed to open blob\")\n\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn blobBuf, nil\n\t}\n\n\treturn blobBuf, nil\n}", "func (*BlobReadRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_store_store_proto_rawDescGZIP(), []int{19}\n}" ]
[ "0.7054809", "0.6706905", "0.6526644", "0.6507054", "0.6507054", "0.64965016", "0.6126463", "0.6124615", "0.6026566", "0.6013546", "0.5956357", "0.59294546", "0.5917369", "0.58980083", "0.58980083", "0.5863768", "0.58041257", "0.5774745", "0.5771428", "0.565637", "0.5630689", "0.56029814", "0.5590403", "0.5540793", "0.5530199", "0.5484269", "0.54775345", "0.5464464", "0.54614997", "0.5457328", "0.541358", "0.540986", "0.53908813", "0.53624713", "0.53595024", "0.53545994", "0.5336888", "0.5327992", "0.52963454", "0.5293901", "0.5288893", "0.5288691", "0.5264519", "0.52623904", "0.52525765", "0.5250638", "0.52406687", "0.52253485", "0.52228606", "0.52069503", "0.52025014", "0.51978046", "0.51749545", "0.5169728", "0.51634544", "0.51600623", "0.5158146", "0.5148585", "0.5147973", "0.5144297", "0.51418734", "0.51329577", "0.5127217", "0.51240057", "0.5113481", "0.51109874", "0.51039976", "0.50966835", "0.50887054", "0.50887054", "0.5083", "0.508018", "0.50796163", "0.50768614", "0.50761753", "0.5056775", "0.50511926", "0.5039565", "0.5039565", "0.5038713", "0.503745", "0.5034949", "0.50319904", "0.5029698", "0.5027055", "0.5017164", "0.5016795", "0.50117373", "0.50114876", "0.5005926", "0.4999733", "0.49993855", "0.49863642", "0.49828675", "0.49765438", "0.49724004", "0.49703413", "0.495218", "0.49512842", "0.49483335" ]
0.73583215
0
Returns true if the key is supported, false if it wasn't.
func (r *Response) Supported() bool { return !strings.Contains(r.String(), NotSupported) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func isSupported(key ssh.PublicKey) bool {\n\t_, ok := supportedKeyTypesMap[key.Type()]\n\treturn ok\n}", "func Supported() bool {\n\treturn gobool(C.keybinder_supported())\n}", "func KeyIsSuitable(key []byte) bool {\n\treturn subtle.ConstantTimeEq(int32(len(key)), int32(KeySize)) == 1\n}", "func Supported(version string) bool {\n\treturn supportedMap[version]\n}", "func IsSupportedConfigItem(key string) bool {\n\tconfigItem := C.CString(key)\n\tdefer C.free(unsafe.Pointer(configItem))\n\treturn bool(C.go_lxc_config_item_is_supported(configItem))\n}", "func (c *Client) IsCommandSupported(command string) bool {\n\t_, ok := c.funcMap[command]\n\treturn ok\n}", "func (key Key) Valid() bool {\n\tk := uint64(key)\n\treturn 0 <= k && k < config.maxKey // Check for 0 <= not necessary\n}", "func IsSupported() bool {\n\treturn true\n}", "func IsSupported() bool {\n\treturn true\n}", "func supportedLabel(key rune) bool {\n\tif key == 'a' || key == 'd' || key == 'e' ||\n\t\tkey == 'q' || key == 's' || key == 'w' {\n\t\treturn true\n\t}\n\treturn false\n}", "func (OnfTest1_Cont1A_List5_Key) IsYANGGoKeyStruct() {}", "func KeyCanPerformOperation(key jwk.Key, op jwk.KeyOperation) bool {\n\t// keyUsage is the value of \"use\" (\"sig\" or \"enc\"), while keyOps is the value of \"key_ops\" (an array of allowed operations)\n\t// Per RFC 7517: `The \"use\" and \"key_ops\" JWK members SHOULD NOT be used together; however, if both are used, the information they convey MUST be consistent.`\n\tkeyUsage := key.KeyUsage()\n\tkeyOps := key.KeyOps()\n\n\t// If the key has nothin in both fields, then just allow any operation\n\tif len(keyOps) == 0 && keyUsage == \"\" {\n\t\treturn true\n\t}\n\n\t// Check key_ops\n\tif len(keyOps) > 0 {\n\t\tif !slices.Contains(keyOps, op) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// Check use\n\tif keyUsage != \"\" {\n\t\tswitch op {\n\t\tcase jwk.KeyOpEncrypt, jwk.KeyOpDecrypt, jwk.KeyOpWrapKey, jwk.KeyOpUnwrapKey:\n\t\t\treturn keyUsage == \"enc\"\n\t\tcase jwk.KeyOpSign, jwk.KeyOpVerify:\n\t\t\treturn keyUsage == \"sig\"\n\t\t}\n\t}\n\n\treturn true\n}", "func Supported() bool { return true }", "func (OnfTest1_Cont1A_List4_List4A_Key) IsYANGGoKeyStruct() {}", "func (e ProviderInfoValidationError) Key() bool { return e.key }", "func Supported() bool {\n\treturn true\n}", "func (e ApplicationPubSub_MQTTProviderValidationError) Key() bool { return e.key }", "func IsSupported(p string) bool {\n\tswitch path.Ext(p) {\n\tcase \"\", \".gz\", \".bz2\", \".gpg\", \".xz\":\n\t\treturn true\n\t}\n\treturn false\n}", "func (e JwtProviderValidationError) Key() bool { return e.key }", "func (s *Service) IsSupported() bool {\n\tfileExists := s.d.isFileExists(\"/data/local/tmp/minicap\")\n\tif !fileExists {\n\t\terr := s.Install()\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t}\n\tout, err := s.d.shell(\"LD_LIBRARY_PATH=/data/local/tmp /data/local/tmp/minicap -i\")\n\tif err != nil {\n\t\treturn false\n\t}\n\tsupported := strings.Contains(out, \"height\") && strings.Contains(out, \"width\")\n\treturn supported\n}", "func (provider *AlertProvider) IsValid() bool {\n\treturn len(provider.IntegrationKey) == 32\n}", "func IsSupported() bool {\n\treturn false\n}", "func (kt sbKeyType) IsValid() bool {\n\tswitch kt {\n\tcase SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys,\n\t\tSBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (e ApplicationPubSub_NATSProviderValidationError) Key() bool { return e.key }", "func ValidKey(key string) bool {\n\treturn len(key) <= maxKey && keyRegex.Match([]byte(key))\n}", "func (OnfSwitch_Switch_Port_Key) IsYANGGoKeyStruct() {}", "func (e ProxyProtocolUpstreamTransportValidationError) Key() bool { return e.key }", "func (e ProviderRegisterRequestValidationError) Key() bool { return e.key }", "func IsValidKey(key string) bool {\n\tif _, err := DecodeKey(key); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (e RemoteJwksValidationError) Key() bool { return e.key }", "func IsSupportedToken(cTokenAddress common.Address) (ok bool) {\n\t_, ok = tokenAddressMap[cTokenAddress]\n\treturn\n}", "func (e BitStringValidationError) Key() bool { return e.key }", "func KeyCanPerformAlgorithm(key jwk.Key, alg string) bool {\n\t// \"alg\" is the supported algorithm\n\tvar keyAlg string\n\tif key != nil {\n\t\tkeyAlg = key.Algorithm().String()\n\t}\n\t// If there's no \"alg\", then allow the operation\n\tif keyAlg == \"\" {\n\t\treturn true\n\t}\n\n\treturn alg == keyAlg\n}", "func IsSupported(algorithmID AlgorithmID) bool {\n\tvar supportedAlgorithms = map[AlgorithmID]bool{\n\t\tCiota: true,\n\t\tSnow: true,\n\t}\n\treturn supportedAlgorithms[algorithmID]\n}", "func (kv KeyValue) Valid() bool {\n\treturn kv.Key.Defined() && kv.Value.Type() != INVALID\n}", "func (reg *ResourceRegistry) IsSupported(resourceType string) bool {\n\t_, ok := reg.headers[resourceType]\n\treturn ok\n}", "func (e JwtRequirementValidationError) Key() bool { return e.key }", "func (e NotSupported) IsNotSupported() {}", "func (e TagSpecifierValidationError) Key() bool { return e.key }", "func ValidAPIKey(key string) bool {\n\tif len(key) == 32 {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isReservedMetadataKey(k string) bool {\n\tswitch {\n\tcase strings.HasPrefix(k, \"X-Prpc-\"):\n\t\treturn true\n\n\tcase k == \"Accept\",\n\t\tk == \"Accept-Encoding\",\n\t\tk == \"Content-Encoding\",\n\t\tk == \"Content-Length\",\n\t\tk == \"Content-Type\",\n\t\tk == \"X-Content-Type-Options\":\n\t\treturn true\n\n\tdefault:\n\t\treturn false\n\t}\n}", "func (k Key) CanPerformOperation(op jwk.KeyOperation) bool {\n\treturn KeyCanPerformOperation(k, op)\n}", "func (e JwksAsyncFetchValidationError) Key() bool { return e.key }", "func (r *Restriction) KeyAllowed() bool {\n\treturn r.keyAllowed\n}", "func (c *Capabilities) Supports(capability string) bool {\n\t_, ok := c.m[capability]\n\treturn ok\n}", "func (e HttpConnectionManager_UpgradeConfigValidationError) Key() bool { return e.key }", "func hasKeyFormat(s string) bool {\n\t// Checks for Arrays definitions of the format \"[%d.%d]\" where the first\n\t// parameter describes the current index and the second the total capacity.\n\treturn keyRegExp.MatchString(s)\n}", "func IsSupportedAlgorithm(alg string) bool {\n\t_, ok := supportedAlgos[alg]\n\treturn ok\n}", "func (e RequestIDExtensionValidationError) Key() bool { return e.key }", "func (e HealthCheck_TlsOptionsValidationError) Key() bool { return e.key }", "func (m WatchMode) Supported() bool {\n\tswitch m {\n\tcase WatchMode_WatchModePortable:\n\t\treturn true\n\tcase WatchMode_WatchModeForcePoll:\n\t\treturn true\n\tcase WatchMode_WatchModeNoWatch:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (e ApplicationPubSubFormatsValidationError) Key() bool { return e.key }", "func IsSupportedMultihash(encodedMultihash string) bool {\n\tcode, err := GetMultihashCode(encodedMultihash)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn multihash.ValidCode(code)\n}", "func HasKey(key string) bool { return c.HasKey(key) }", "func (m AlertMapper) IsSupported(version string) bool {\n\tversionRange := semver.MustParseRange(\">=0.5.0 <=0.6.0\")\n\treturn versionRange(semver.MustParse(version))\n}", "func (e TestSpecificationValidationError) Key() bool { return e.key }", "func (k Key) IsValid() bool {\n\treturn k.isValidAtTime(time.Now())\n}", "func (fi *FabricInterface) SupportsProvider(provider string) bool {\n\tif fi == nil {\n\t\treturn false\n\t}\n\n\t// format: [lib+]prov[,prov2,...]\n\tvar prefix string\n\tprovPieces := strings.Split(provider, \"+\")\n\tproviders := provPieces[0]\n\tif len(provPieces) > 1 {\n\t\tprefix = provPieces[0] + \"+\"\n\t\tproviders = provPieces[1]\n\t}\n\n\tfor _, prov := range strings.Split(providers, \",\") {\n\t\tprov = prefix + prov\n\t\tif !fi.Providers.Has(prov) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (e ChannelPayRequestValidationError) Key() bool { return e.key }", "func (e GetApplicationPubSubRequestValidationError) Key() bool { return e.key }", "func (e InternalUpstreamTransportValidationError) Key() bool { return e.key }", "func (e RegisterRequestValidationError) Key() bool { return e.key }", "func (i *Info) SupportsProvisioning() bool {\n\treturn i.ProvisionToken() != \"\"\n}", "func (e JsonToMetadata_KeyValuePairValidationError) Key() bool { return e.key }", "func (e JwtComponentValidationError) Key() bool { return e.key }", "func IsSupported(reqHeader http.Header) bool {\n\th, err := accept.ParseHeader(reqHeader.Get(\"Accept\"))\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn h.Quality(ContentType, map[string]string{\"charset\": \"utf-8\"}) > 0\n}", "func SupportsStorage() bool {\n\treturn CurrentHelper.SupportsCredentialStorage()\n}", "func (e JwtAuthenticationValidationError) Key() bool { return e.key }", "func (e MaxPciValidationError) Key() bool { return e.key }", "func (e ProviderWithAudiencesValidationError) Key() bool { return e.key }", "func (e ProviderDisregisterRequestValidationError) Key() bool { return e.key }", "func (e CalculateComplianceRequestValidationError) Key() bool { return e.key }", "func (e JwtCacheConfigValidationError) Key() bool { return e.key }", "func isSupportedKind(k reflect.Kind) bool {\n\tswitch k {\n\tcase reflect.Chan:\n\t\treturn false\n\tcase reflect.Func:\n\t\treturn false\n\tcase reflect.Interface:\n\t\treturn false\n\tcase reflect.Uintptr:\n\t\treturn false\n\tcase reflect.UnsafePointer:\n\t\treturn false\n\tcase reflect.Invalid:\n\t\treturn false\n\t}\n\treturn true\n}", "func (e MaxPciRangeValidationError) Key() bool { return e.key }", "func (p *Pair) KeyIsValid() bool {\n\tif strings.Contains(p.Key, \"\\n\") {\n\t\treturn false\n\t}\n\tif strings.Contains(p.Key, \":\") {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (e JsonToMetadata_MatchRulesValidationError) Key() bool { return e.key }", "func (o *ApiKey) HasKey() bool {\n\tif o != nil && o.Key != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (e E2SmRcPreRanfunctionDescriptionValidationError) Key() bool { return e.key }", "func (e InternalUpstreamTransport_MetadataValueSourceValidationError) Key() bool { return e.key }", "func (pk Publickey) Enabled() bool {\n\treturn pk.publicKeyFile != \"\"\n}", "func (e HeaderMatchValidationError) Key() bool { return e.key }", "func (e TLSContextValidationError) Key() bool { return e.key }", "func (e RegisterResponseValidationError) Key() bool { return e.key }", "func (e JwtHeaderValidationError) Key() bool { return e.key }", "func (e ApplicationPubSubValidationError) Key() bool { return e.key }", "func (pk *PublicKey) Valid() bool {\n\t// TODO not implement\n\treturn true\n}", "func (e GetClusterCredentialReqValidationError) Key() bool { return e.key }", "func (k *KeyPair) isAbleToSign() bool {\n\treturn k.privKey != nil\n}", "func (v *CephCSIVersion) Supported() bool {\n\tif !v.isAtLeast(&minimum) {\n\t\treturn false\n\t}\n\n\t// if AllowUnsupported is set also a csi-image greater than the supported ones are allowed\n\tif AllowUnsupported {\n\t\treturn true\n\t}\n\tfor _, sv := range supportedCSIVersions {\n\t\tif v.Major == sv.Major {\n\t\t\tif v.Minor == sv.Minor {\n\t\t\t\tif v.Bugfix >= sv.Bugfix {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (e ApplicationPubSubIdentifiersValidationError) Key() bool { return e.key }", "func (e JwtClaimToHeaderValidationError) Key() bool { return e.key }", "func (e ChannelNotifyRequestValidationError) Key() bool { return e.key }", "func (o *Option) IsKey(key string) bool {\n\treturn strings.EqualFold(o.Key, key)\n}", "func (e ConfigValidationError) Key() bool { return e.key }", "func (e ConfigValidationError) Key() bool { return e.key }", "func IsKeyChange(e error) bool {\n\tif ke, ok := e.(*knownhosts.KeyError); ok {\n\t\treturn len(ke.Want) > 0\n\t}\n\treturn false\n}", "func KeyUsageIsPresent(keyUsages x509.KeyUsage, usage x509.KeyUsage) bool {\n\treturn keyUsages&usage != 0\n}", "func (k *Key) Valid(allowSpecial bool, kc KeyContext) bool {\n\tif !kc.Matches(k.kc) {\n\t\treturn false\n\t}\n\tfor _, t := range k.toks {\n\t\tif t.IsIncomplete() {\n\t\t\treturn false\n\t\t}\n\t\tif !allowSpecial && t.Special() {\n\t\t\treturn false\n\t\t}\n\t\tif t.Kind == \"\" {\n\t\t\treturn false\n\t\t}\n\t\tif t.StringID != \"\" && t.IntID != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (c *Client) SupportsActionPlatformProperties() bool {\n\treturn supportsActionPlatformProperties(c.serverCaps)\n}" ]
[ "0.7739214", "0.7222214", "0.6585144", "0.6423749", "0.639911", "0.6383442", "0.63143355", "0.62209886", "0.62209886", "0.6198825", "0.6188862", "0.6180553", "0.6123782", "0.61136216", "0.6100996", "0.6090964", "0.6056407", "0.6042656", "0.60423505", "0.6014734", "0.59600395", "0.5957922", "0.59327316", "0.5932682", "0.5930459", "0.5905317", "0.58938944", "0.58801836", "0.58778465", "0.5848969", "0.58442533", "0.58168983", "0.5815616", "0.5813494", "0.5803138", "0.57991654", "0.57825947", "0.5778348", "0.5771927", "0.577175", "0.57638437", "0.5751851", "0.5744678", "0.5735302", "0.5717902", "0.5717387", "0.57157534", "0.5714222", "0.57031536", "0.5691768", "0.56863546", "0.5680378", "0.5661998", "0.565891", "0.5655702", "0.5624157", "0.56216097", "0.56201756", "0.56175375", "0.561462", "0.5613273", "0.5603437", "0.55989605", "0.5589919", "0.5589365", "0.55873823", "0.5586182", "0.55807203", "0.5578071", "0.557431", "0.5574097", "0.55689996", "0.5553173", "0.5551514", "0.5549539", "0.554509", "0.5530462", "0.5529329", "0.5525334", "0.55222124", "0.5520728", "0.5518113", "0.55165184", "0.5512409", "0.55084753", "0.5508351", "0.5506069", "0.5496844", "0.5494464", "0.5494035", "0.549376", "0.5488186", "0.5484565", "0.54828423", "0.5481734", "0.5481734", "0.5476597", "0.5470553", "0.54674846", "0.54647636" ]
0.5702351
49
Convenience wrapper to return Response.Data as a string.
func (r *Response) String() string { return string(r.Data) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r Response) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (r *Response) String() string {\n\tif r.Error != nil {\n\t\treturn \"\"\n\t}\n\n\tr.populateResponseByteBuffer()\n\n\treturn r.internalByteBuffer.String()\n}", "func (o SchematizedDataResponseOutput) Data() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SchematizedDataResponse) string { return v.Data }).(pulumi.StringOutput)\n}", "func (this *Response) ToString() (string, error) {\n\tbytes, err := this.ReadAll()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(bytes), nil\n}", "func (self *BatchResponse) ResponseAsString() string {\n\treturn string(self.Debug.RawResponse)\n}", "func (r Response) String() string {\n\t// format:\n\t// VALUE <key> <flags> <bytes> [<cas unique>]\\r\\n\n\t//<data block>\\r\\n\n\n\tvar b bytes.Buffer\n\n\tfor i := range r.Values {\n\t\t//b.WriteString(fmt.Sprintf(\"VALUE %s %s %d\\r\\n\", r.Values[i].Key, r.Values[i].Flags, len(r.Values[i].Data)))\n\t\tb.WriteString(\"VALUE \")\n\t\tb.WriteString(r.Values[i].Key)\n\t\tb.WriteString(\" \")\n\t\tb.WriteString(r.Values[i].Flags)\n\t\tb.WriteString(\" \")\n\t\tb.WriteString(strconv.Itoa(len(r.Values[i].Data)))\n\n\t\tif r.Values[i].Cas != \"\" {\n\t\t\tb.WriteString(\" \")\n\t\t\tb.WriteString(r.Values[i].Cas)\n\t\t}\n\n\t\tb.WriteString(\"\\r\\n\")\n\n\t\tb.Write(r.Values[i].Data)\n\t\tb.WriteString(\"\\r\\n\")\n\t}\n\n\tb.WriteString(r.Response)\n\tb.WriteString(\"\\r\\n\")\n\n\treturn b.String()\n}", "func (r *Response) Data(data interface{}) JResponseWriter {\n\treturn r.Field(fieldData, data)\n}", "func (o GetRecordResultOutput) Data() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetRecordResult) string { return v.Data }).(pulumi.StringOutput)\n}", "func DataResponse(data interface{}) Response {\n\treturn Response{\n\t\tCode: 200,\n\t\tData: data,\n\t\tMessage: \"ok\",\n\t}\n}", "func StringResponse(req *http.Request, status int, headers http.Header, body string) *http.Response {\n\treturn SimpleResponse(req, status, headers, int64(len(body)), strings.NewReader(body))\n}", "func (v *DCHttpResponse) ToString() (body string, err error) {\n\tv.cacheBodyToMemory()\n\tif v.bodyErr != nil {\n\t\treturn \"\", v.bodyErr\n\t}\n\treturn string(v.body), nil\n}", "func (r *Response) ToString() string {\n\treturn r.recorder.Body.String()\n}", "func (r Response) String() string {\n\treturn fmt.Sprintf(\"%s : %s\", r.Regex.String(), r.Extra)\n}", "func (ctx *Context) Data(code int, contentType string, data []byte) {\n\tctx.Response.StatusCode = code\n\tctx.SetContentType(contentType)\n\tctx.Response.Body = ioutil.NopCloser(bytes.NewBuffer(data))\n}", "func (t AuthChallengeResponseResponse) String() string {\n\treturn string(t)\n}", "func (e Data) String() string {\n\tj, _ := e.MarshalJSON()\n\treturn string(j)\n}", "func (r *Response) String() string {\n\tif r.body == nil {\n\t\treturn \"\"\n\t}\n\treturn strings.TrimSpace(string(r.body))\n}", "func Data(status int, content []byte, headers Headers) *Response {\n\treturn &Response{\n\t\tStatus: status,\n\t\tContent: bytes.NewBuffer(content),\n\t\tHeaders: headers,\n\t}\n}", "func AsData(ip string, data interface{}) (resp Response, err error) {\n\traw, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn AsError(ip, err)\n\t}\n\n\tresp = Response{\n\t\tStatusCode: 200,\n\t\tHeaders: jsonHeaders,\n\t\tBody: string(raw),\n\t}\n\treturn\n}", "func (r *Response) String() string {\n\n\tbasicCode := r.BasicCode\n\tcomment := r.Comment\n\tif len(comment) == 0 && r.BasicCode == 0 {\n\t\tvar ok bool\n\t\tif comment, ok = defaultTexts.m[EnhancedStatusCode{r.Class, r.EnhancedCode}]; !ok {\n\t\t\tswitch r.Class {\n\t\t\tcase 2:\n\t\t\t\tcomment = \"OK\"\n\t\t\tcase 4:\n\t\t\t\tcomment = \"Temporary failure.\"\n\t\t\tcase 5:\n\t\t\t\tcomment = \"Permanent failure.\"\n\t\t\t}\n\t\t}\n\t}\n\te := EnhancedStatusCode{r.Class, r.EnhancedCode}\n\tif r.BasicCode == 0 {\n\t\tbasicCode = getBasicStatusCode(e)\n\t}\n\n\treturn fmt.Sprintf(\"%d %s %s\", basicCode, e.String(), comment)\n}", "func (d *Decoder) Data() string {\n\treturn string(d.buffer)\n}", "func (c *Context) String(status int, data string) error {\n\tc.SetHeader(\"Content-Type\", \"text/plain; charset=utf-8\")\n\treturn c.Bytes(status, []byte(data))\n}", "func (s IntegrationResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s IntegrationResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s IntegrationResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (statsResponse *StatsResponse) String() string {\n\tstatsResponseBytes, err := json.Marshal(statsResponse)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\treturn string(statsResponseBytes)\n}", "func (p *Proxy) String() string {\n\treturn string(p.lastResponseBody)\n}", "func Data(w http.ResponseWriter, r *http.Request, v []byte) {\n\trender.Data(w, r, v)\n}", "func (s *ResponseModifier) String(body string) {\n\ts.Response.Body = ioutil.NopCloser(bytes.NewReader([]byte(body)))\n}", "func ResponseWithData(writer http.ResponseWriter, statusCode int, data interface{}) {\n\twriter.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\twriter.WriteHeader(statusCode)\n\tif data != nil {\n\t\tencodeBody(writer, data)\n\t}\n}", "func (s ResponseMetadata) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (si SignedIdentifiers) Response() *http.Response {\n\treturn si.rawResponse\n}", "func (o CertificateOutput) Data() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Certificate) pulumi.StringPtrOutput { return v.Data }).(pulumi.StringPtrOutput)\n}", "func (c *Context) Write(data interface{}) {\n\t// use DataWriter to write response if possible\n\tif dw, ok := c.Response.(DataWriter); ok {\n\t\tif err := dw.WriteData(data); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn\n\t}\n\n\tswitch data.(type) {\n\tcase []byte:\n\t\tc.Response.Write(data.([]byte))\n\tcase string:\n\t\tc.Response.Write([]byte(data.(string)))\n\tdefault:\n\t\tif data != nil {\n\t\t\tfmt.Fprint(c.Response, data)\n\t\t}\n\t}\n}", "func (bblr BlobsBreakLeaseResponse) Response() *http.Response {\n\treturn bblr.rawResponse\n}", "func (self *GeocodeResult) ResponseAsString() string {\n\treturn string(self.Debug.RawResponse)\n}", "func responseText(h func(r *http.Request) (interface{}, int, error)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tdata, status, err := h(r)\n\t\tif err != nil {\n\t\t\tdata = err.Error()\n\t\t}\n\n\t\tvar b bytes.Buffer\n\t\tswitch v := data.(type) {\n\t\tcase []*storage.Item:\n\t\t\tfor _, item := range v {\n\t\t\t\t// https://www.ietf.org/rfc/rfc2046.txt says that the newline of\n\t\t\t\t// text is CRLF\n\t\t\t\tfmt.Fprintf(&b, \"%v\\r\\n\", item.Value)\n\t\t\t}\n\t\tdefault:\n\t\t\tfmt.Fprintf(&b, \"%v\\r\\n\", data)\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\tw.WriteHeader(status)\n\t\tw.Write(b.Bytes())\n\t}\n}", "func (o SchematizedDataResponsePtrOutput) Data() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *SchematizedDataResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Data\n\t}).(pulumi.StringPtrOutput)\n}", "func (o SnapmirrorResyncResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (c *Controller) Data(data []byte) {\n ctx := c.Context()\n r := render.NewData(data)\n\n ctx.PushLog(\"status\", http.StatusOK)\n ctx.SetHeader(\"Content-Type\", r.ContentType())\n ctx.End(r.HttpCode(), r.Content())\n}", "func (self *WebServer) respData(c *gin.Context, status, code int,\n\tmessage string, data interface{}) {\n\tc.JSON(status, &CR{\n\t\tMessage: message,\n\t\tCode: code,\n\t\tTimestamp: time.Now().Unix(),\n\t})\n}", "func (dr downloadResponse) Response() *http.Response {\n\treturn dr.rawResponse\n}", "func (res Responder) String() string {\n\treturn res.b.String()\n}", "func (resp *Response) Text() (string, error) {\n\tb, err := resp.DecompressedContent()\n\ts := string(b)\n\treturn s, err\n}", "func String(c Doer, r *Request) string {\n\tc <- r\n\treturn (<-r.resp).(string)\n}", "func (r *Response) ResStr() (string, error) {\n\treturn r.ResultString, r.Error\n}", "func responseString(rw http.ResponseWriter, statusCode int, s string) {\n\trw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\trw.WriteHeader(statusCode)\n\trw.Write([]byte(s))\n}", "func (r *Response) Raw() []byte {\n\treturn r.raw\n}", "func (p *FsData) Data() string {\n\treturn string(p.byteS)\n}", "func (s FunctionResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o SSLHealthCheckResponseOutput) Response() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SSLHealthCheckResponse) string { return v.Response }).(pulumi.StringOutput)\n}", "func (s MethodResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (cdr ContainersDeleteResponse) Response() *http.Response {\n\treturn cdr.rawResponse\n}", "func (s ComponentResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r *request) Data() []byte {\n\treturn r.msg.Data\n}", "func (h *ResponseHeader) String() string {\n\treturn string(h.Header())\n}", "func (cblr ContainersBreakLeaseResponse) Response() *http.Response {\n\treturn cblr.rawResponse\n}", "func (r Response) Bytes() []byte {\n\tvar b bytes.Buffer\n\tenc := gob.NewEncoder(&b)\n\tenc.Encode(&r)\n\n\treturn b.Bytes()\n}", "func (r Response) Bytes() []byte {\n\tvar b bytes.Buffer\n\tenc := gob.NewEncoder(&b)\n\tenc.Encode(&r)\n\n\treturn b.Bytes()\n}", "func (lpr LeasePathResponse) Response() *http.Response {\n\treturn lpr.rawResponse\n}", "func (bdr BlobsDeleteResponse) Response() *http.Response {\n\treturn bdr.rawResponse\n}", "func (resp *Response) Text(e ...encoding.Encoding) (string, error) {\n\tb, err := resp.Content()\n\tif err != nil || len(e) == 0 {\n\t\treturn b2s(b), err\n\t}\n\n\tb, err = e[0].NewDecoder().Bytes(b)\n\treturn b2s(b), err\n}", "func (dpr DeletePathResponse) Response() *http.Response {\n\treturn dpr.rawResponse\n}", "func RespondData(w http.ResponseWriter, data string) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tfmt.Fprintf(w, `{\n\t\t\"data\": %s\n\t}`, string(data))\n}", "func stringifyHTTPResponseBody(r *http.Response) (body string) {\n\tif r == nil {\n\t\treturn \"\"\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(r.Body)\n\tbody = buf.String()\n\treturn body\n}", "func (bstr BlobsSetTierResponse) Response() *http.Response {\n\treturn bstr.rawResponse\n}", "func (c *Context) Data(statusCode int, contentType string, data []byte) {\n\tc.SetStatusCode(statusCode)\n\tc.SetContentType(contentType)\n\tc.Write(data)\n}", "func (resp *CommonRPCResponse) String() string {\n\treturn fmt.Sprintf(\"<Code: %d, Msg: %s>\", resp.Code, resp.Msg)\n}", "func (r *TestRequest) Data() []byte {\n\treturn r.data\n}", "func (o HTTPHealthCheckResponseOutput) Response() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTPHealthCheckResponse) string { return v.Response }).(pulumi.StringOutput)\n}", "func (o HTTPSHealthCheckResponseOutput) Response() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTPSHealthCheckResponse) string { return v.Response }).(pulumi.StringOutput)\n}", "func (bur BlobsUndeleteResponse) Response() *http.Response {\n\treturn bur.rawResponse\n}", "func (abcr AppendBlobsCreateResponse) Response() *http.Response {\n\treturn abcr.rawResponse\n}", "func (ks *KerbServer) Response() string {\n\treturn C.GoString(ks.state.response)\n}", "func (t *Transaction) Data() string {\n\treturn utils.EncodeToBase64(t.data)\n}", "func (o ExportPolicyDestroyResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (crlr ContainersReleaseLeaseResponse) Response() *http.Response {\n\treturn crlr.rawResponse\n}", "func (s ResponseDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (ctx *Context) String(code int, s string) (err error) {\n\tctx.response.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\tctx.response.WriteHeader(code)\n\t_, err = ctx.response.Write([]byte(s))\n\treturn\n}", "func (s *StashConsumer) Data() string {\n\tb, _ := json.Marshal(s)\n\treturn string(b)\n}", "func (s GetIntegrationResponseOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (kc *KerbClient) Response() string {\n\treturn C.GoString(kc.state.response)\n}", "func (o SnapmirrorCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func encodeStringResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\tw.Header().Set(\"Content-Type\", \"application/json;charset=utf-8\")\n\treturn json.NewEncoder(w).Encode(response)\n}", "func (sss StorageServiceStats) Response() *http.Response {\n\treturn sss.rawResponse\n}", "func (o *Posttextresponse) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (brlr BlobsReleaseLeaseResponse) Response() *http.Response {\n\treturn brlr.rawResponse\n}", "func (dfr DeleteFilesystemResponse) Response() *http.Response {\n\treturn dfr.rawResponse\n}", "func (s ResponseOutputItem) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s *Server) Response(body string) string {\n\treturn \"HTTP/1.1 200 OK\\r\\n\" +\n\t\t\"Content-Length: \" + strconv.Itoa(len(body)) + lineBreaker +\n\t\t\"Content-Type: text/html\\r\\n\" +\n\t\t\"Connection: close\\r\\n\" +\n\t\tlineBreaker + body\n}", "func (o ExportPolicyCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (s ComputeResponse) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (a *ADMResponse) Bytes() ([]byte, error) {\n\treturn json.Marshal(a)\n}", "func (o LunOnlineResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (o LunGetSerialNumberResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (bclr BlobsChangeLeaseResponse) Response() *http.Response {\n\treturn bclr.rawResponse\n}", "func (o QtreeCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (o *Createshareresponse) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func GetStringFromResponse(r *http.Response) (string, error) {\n\t// If the content length is not set, limit reading to 8K worth of data.\n\treturn getResponse(r, strMax)\n}", "func (c *Context) Data(status int, v []byte) {\n\tif c.Header().Get(ContentType) == \"\" {\n\t\tc.Header().Set(ContentType, ContentBinary)\n\t}\n\tc.WriteHeader(status)\n\tc.Write(v)\n}" ]
[ "0.65663695", "0.6464477", "0.63433754", "0.62904406", "0.62059665", "0.6119778", "0.6021124", "0.6019827", "0.5965781", "0.5939541", "0.5917699", "0.5897104", "0.58647394", "0.5861402", "0.5832401", "0.5828792", "0.57842326", "0.57667345", "0.5754301", "0.56678456", "0.56670046", "0.5643823", "0.56369996", "0.56369996", "0.56369996", "0.5636495", "0.55978066", "0.55825263", "0.55708325", "0.55707943", "0.5566887", "0.5561343", "0.55561554", "0.5553434", "0.5551459", "0.554803", "0.55442375", "0.55431134", "0.55362165", "0.55351937", "0.55344903", "0.5526572", "0.5523935", "0.5521774", "0.5519714", "0.55182266", "0.5517297", "0.5507337", "0.55011487", "0.54996926", "0.5499683", "0.548567", "0.54739517", "0.54674244", "0.5462056", "0.5459856", "0.54571277", "0.54446375", "0.54446375", "0.54322565", "0.5427457", "0.542705", "0.5410796", "0.5408524", "0.5406744", "0.5406536", "0.54042363", "0.5398121", "0.5390205", "0.5381489", "0.53810036", "0.5380017", "0.53791237", "0.53728557", "0.5361761", "0.536052", "0.535204", "0.53392357", "0.53368485", "0.533627", "0.53334033", "0.5327318", "0.5325814", "0.5324395", "0.5323145", "0.5319973", "0.5318605", "0.5315278", "0.53135794", "0.53032255", "0.53005874", "0.52952033", "0.5292796", "0.5288559", "0.5286346", "0.5258392", "0.5258187", "0.52544814", "0.5247798", "0.52435917" ]
0.75903463
0
Convenience wrapper to return Response.Data as a bool.
func (r *Response) Bool() (bool, error) { return strconv.ParseBool(r.String()) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (res Response) AsBool() (bool, error) {\n\treturn res.Bits.AsBool(), res.Error\n}", "func (client PrimitiveClient) GetBoolResponder(resp *http.Response) (result BooleanWrapper, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n}", "func (data *Data) Bool(s ...string) bool {\n\treturn data.Interface(s...).(bool)\n}", "func (o *Wireless) GetDataOk() (map[string]string, bool) {\n\tif o == nil || o.Data == nil {\n\t\tvar ret map[string]string\n\t\treturn ret, false\n\t}\n\treturn *o.Data, true\n}", "func (o *ThingListResponse) HasData() bool {\n\tif o != nil && o.Data != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func DataResponse(data interface{}) Response {\n\treturn Response{\n\t\tCode: 200,\n\t\tData: data,\n\t\tMessage: \"ok\",\n\t}\n}", "func (m Message) IsResponse() bool {\n\treturn m.Y == \"r\"\n}", "func checkForEmptyResponseData(responseBody []byte) (bool, error) {\n\tvar basicResponse BasicResponse\n\terr := json.Unmarshal(responseBody, &basicResponse)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\temptyData := make([]interface{}, 0)\n\treturn reflect.DeepEqual(emptyData, basicResponse.Data), nil\n}", "func (dr *DeleteResponse) IsOk() bool {\n\treturn dr.ok\n}", "func (v *Value) Bool() bool {\n return Util.ToBool(v.data)\n}", "func (a *Response) Success() bool {\n\treturn a.Error == nil\n}", "func (o *SecretBagPatchable) GetDataOk() (*map[string]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Data, true\n}", "func (r *Decoder) Bool() bool {\n\tr.Sync(SyncBool)\n\tx, err := r.Data.ReadByte()\n\tr.checkErr(err)\n\tassert(x < 2)\n\treturn x != 0\n}", "func checkBool(resp Response, inErr error) (m Bool, outErr error) {\n\tif inErr != nil {\n\t\treturn false, inErr\n\t}\n\tif resp.Error() != nil {\n\t\treturn false, resp.Error()\n\t}\n\tm, ok := resp.(Bool)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"Expected a JSON bool, got %q instead\", resp)\n\t}\n\treturn\n}", "func (i Identifiable) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (i Intangible) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (j *JSONData) Bool(path ...interface{}) (bool, error) {\n\tjson, err := j.get(path...)\n\treturn json.MustBool(), err\n}", "func (lb LocalBusiness) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (r ResponseAPI) GetSuccess() bool {\n\treturn r.Data.Success\n}", "func (o *SecretBagWritable) GetDataOk() (*map[string]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Data, true\n}", "func (d *Decoder) Bool() bool {\n\tb := d.Byte()\n\treturn b != 0\n}", "func (o *PrivilegedBagData) GetDataOk() (*map[string]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Data, true\n}", "func Bool() Decoder {\n\treturn newDecoder(func(b []byte) (interface{}, error) {\n\t\tvar bl bool\n\t\tif err := json.Unmarshal(b, &bl); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn bl, nil\n\t})\n}", "func (lb LodgingBusiness) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (h Hotel) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (resp *Response) Ok() bool {\n\treturn resp.OK()\n}", "func (r Response) IsSuccess() bool {\n\treturn r.Result != nil\n}", "func (t Thing) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (o *LinkTypeSingle) GetDataOk() (*LinkTypeRead, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Data, true\n}", "func (res *Response) isSuccessfulResponse() bool {\n\treturn res.IsSuccessfulResponse\n}", "func (o *PrivilegedTextDataAllOf) GetDataOk() (*string, bool) {\n\tif o == nil || o.Data == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Data, true\n}", "func (r *Response) IsOk() bool {\n\treturn r.Code == ok\n}", "func (r *response) Written() bool {\n\treturn r.wrote\n}", "func (a Answer) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (o *CartaoProduto) GetDataOk() (*time.Time, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Data.Get(), o.Data.IsSet()\n}", "func GetBoolResponse(question string, defaultAnswer bool, reader *os.File) (bool, error) {\n\tvar result bool\n\treadr := bufio.NewReader(reader)\n\tlog.Warn(question)\n\n\ttext, _ := readr.ReadString('\\n')\n\ttext = strings.Replace(text, \"\\n\", \"\", 1)\n\tif text == \"\" {\n\t\treturn defaultAnswer, nil\n\t}\n\n\tif text == \"Yes\" || text == \"YES\" || text == \"Y\" || text == \"y\" {\n\t\tresult = true\n\t} else {\n\t\tresult = false\n\t}\n\n\treturn result, nil\n}", "func (o *PrivilegedTextData) GetDataOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Data, true\n}", "func (r Response) AsResponse() (*Response, bool) {\n\treturn &r, true\n}", "func (o *ThingListResponse) GetDataOk() (*[]ThingResponse, bool) {\n\tif o == nil || o.Data == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Data, true\n}", "func isResponseSuccess(resp *http.Response) bool {\n\tif resp == nil {\n\t\treturn false\n\t}\n\tstatusCode := resp.StatusCode\n\n\treturn statusCode >= http.StatusOK && statusCode <= 299\n}", "func (w *responseWrapper) IsOK() bool {\n\treturn w.status == 200\n}", "func (ta TouristAttraction) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func IfReturnHTMLResponse(w http.ResponseWriter, r *http.Request) bool {\n\taccepts := r.Header[\"Accept\"]\n\tfor _, accept := range accepts {\n\t\tfields := strings.Split(accept, \",\")\n\t\tfor _, field := range fields {\n\t\t\tif field == contentTypeHtml {\n\t\t\t\tw.Header().Set(\"Content-Type\", contentTypeHtml)\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func (mt MovieTheater) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (reply Reply) Bool() (bool, error) {\n\tresult, err := redis.Bool(reply.data, reply.err)\n\tif err != nil {\n\t\treturn false, redisError(err)\n\t}\n\n\treturn result, nil\n}", "func (cw CreativeWork) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func Bool(r interface{}, err error) (bool, error) {\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tswitch r := r.(type) {\n\tcase bool:\n\t\treturn r, err\n\t// Very common in redis to reply int64 with 0 for bool flag.\n\tcase int:\n\t\treturn r != 0, nil\n\tcase int64:\n\t\treturn r != 0, nil\n\tcase []byte:\n\t\treturn strconv.ParseBool(string(r))\n\tcase string:\n\t\treturn strconv.ParseBool(r)\n\tcase nil:\n\t\treturn false, simplesessions.ErrNil\n\t}\n\treturn false, simplesessions.ErrAssertType\n}", "func (o *V1TabularOutput) GetDataOk() (*string, bool) {\n\tif o == nil || o.Data == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Data, true\n}", "func (o *CheckCanDeleteMonitorResponse) GetDataOk() (*CheckCanDeleteMonitorResponseData, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Data, true\n}", "func (eb EntertainmentBusiness) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (o *VulnUpdateNotification) GetDataOk() (*VulnUpdateNotificationData, bool) {\n\tif o == nil || o.Data == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Data, true\n}", "func (fe FoodEstablishment) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (cr *ClientResponse) Ok() bool {\n\treturn cr.ok\n}", "func (o MethodResponseOutput) ResponseStreaming() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v MethodResponse) bool { return v.ResponseStreaming }).(pulumi.BoolOutput)\n}", "func (o *Venda) GetDataOk() (*time.Time, bool) {\n\tif o == nil || o.Data == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Data, true\n}", "func (o *Wireless) HasData() bool {\n\tif o != nil && o.Data != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Validate(data interface{}) bool {\n\t// Check if the data can be serialized\n\tif _, err := json.Marshal(data); err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Cannot serialize response as json\")\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn false\n\t}\n\n\treturn true\n}", "func ReadBool(buffer []byte, offset int) bool {\n return buffer[offset] != 0\n}", "func (a Airport) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (r *Reply) Bool() (bool, error) {\n\tif r.Type == ErrorReply {\n\t\treturn false, r.Err\n\t}\n\ti, err := r.Int()\n\tif err == nil {\n\t\tif i == 0 {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn true, nil\n\t}\n\n\ts, err := r.Str()\n\tif err == nil {\n\t\tif s == \"0\" {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn true, nil\n\t}\n\n\treturn false, errors.New(\"boolean value is not available for this reply type\")\n}", "func (o *PermissionOptionsPagination) GetDataOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Data, true\n}", "func (l License) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (c *Context) ResponseSent() bool {\n\treturn !c.WatchOnly() && c.PreparationPayloads[c.MyIndex] != nil\n}", "func (er *ExitResponse) IsOk() bool {\n\treturn er.Ok\n}", "func (n *eeNum) bool() *bool { return (*bool)(unsafe.Pointer(&n.data)) }", "func (r *Redis) Bool(reply interface{}, err error) (bool, error) {\n\treturn redigo.Bool(reply, err)\n}", "func (r *GetAttrsResult) Bool() bool {\n\tif !r.ok {\n\t\treturn false\n\t}\n\treturn true\n}", "func (c *QuerySM) CanResponse() bool {\n\treturn true\n}", "func IsResponse(msg *Message) bool {\n\tswitch msg.Head.Type {\n\tcase AckChallenge, AckAuth, AckLogout, AckInfo:\n\t\treturn true\n\t}\n\treturn false\n}", "func (r *Response) Data(data interface{}) JResponseWriter {\n\treturn r.Field(fieldData, data)\n}", "func (o *SecretValue) GetDataOk() (*[]int32, bool) {\n\tif o == nil || o.Data == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Data, true\n}", "func (o MethodOutput) ResponseStreaming() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v Method) *bool { return v.ResponseStreaming }).(pulumi.BoolPtrOutput)\n}", "func (r Restaurant) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (client PrimitiveClient) GetBoolSender(req *http.Request) (*http.Response, error) {\n return autorest.SendWithSender(client, req)\n}", "func (b *Buffer) HasData() bool {\n\treturn b.data.Len() != 0\n}", "func (p Place) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (o *FakeObject) Bool() bool { return o.Value.(bool) }", "func (RPCResponse *RPCResponse) GetBool() (bool, error) {\n\tval, ok := RPCResponse.Result.(bool)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"could not parse bool from %s\", RPCResponse.Result)\n\t}\n\n\treturn val, nil\n}", "func (this *JSONObject) Bool(key string) bool {\n\treturn this.innerMap[key].(bool)\n}", "func (o *PolicyPaginationAllOf) GetDataOk() (*[]PolicyExtended, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Data, true\n}", "func (imr *InvokeMethodResponse) HasMessageData() bool {\n\tm := imr.r.Message\n\treturn m != nil && m.Data != nil && len(m.Data.Value) > 0\n}", "func (e Entities) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (o *V2TcpConfiguration) GetDataOk() (*string, bool) {\n\tif o == nil || o.Data == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Data, true\n}", "func (j *JSONData) Boolean() *bool {\n\tif j != nil && j.value != nil {\n\t\tif bo, ok := (*j.value).(bool); ok {\n\t\t\treturn &bo\n\t\t}\n\t}\n\treturn nil\n}", "func (rb ResponseBase) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (o *FormField) HasResponse() bool {\n\tif o != nil && o.Response != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Service) GetServiceDataOk() (map[string]string, bool) {\n\tif o == nil || o.ServiceData == nil {\n\t\tvar ret map[string]string\n\t\treturn ret, false\n\t}\n\treturn *o.ServiceData, true\n}", "func (o *UserRolesResponse) GetDataOk() (*[][]int32, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Data, true\n}", "func (o *UserArray) GetDataOk() (*[]UserRead, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Data, true\n}", "func (r *RedisSession) Bool(reply interface{}) (bool, error) {\n\treturn redis.Bool(reply, nil)\n}", "func (s Stored) Ok() bool {\n\treturn s.Type.Ok() && s.Encoding.Ok()\n}", "func (ir *IndexResponse) IsOk() bool {\n\treturn ir.Ok\n}", "func FileGetCbDataAsBoolPointer(data FileCbData) *bool {\n\tpdata, ok := data.(*bool)\n\tif !ok {\n\t\t// This never happens.\n\t\tpanic(\"invalid type\")\n\t}\n\treturn pdata\n}", "func (i Identifiable) AsBasicResponse() (BasicResponse, bool) {\n\treturn nil, false\n}", "func (o *ModelsPaginatedResultModelsBackupCredentials) HasData() bool {\n\tif o != nil && o.Data != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *GetCertificateInfoForbidden) IsSuccess() bool {\n\treturn false\n}", "func (m *WipePostRequestBody) GetKeepEnrollmentData()(*bool) {\n return m.keepEnrollmentData\n}", "func (cs CivicStructure) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (c *GenericNack) CanResponse() bool {\n\treturn false\n}", "func (r *Response) IsFailure() bool {\n\treturn r.Type > 0\n}" ]
[ "0.59523034", "0.5925134", "0.5816356", "0.571438", "0.56353575", "0.55800897", "0.55623347", "0.55264825", "0.5524063", "0.55136734", "0.5495956", "0.5488293", "0.5482601", "0.545401", "0.5432316", "0.53984827", "0.53884727", "0.5387247", "0.5371247", "0.5367065", "0.536553", "0.53460765", "0.5337091", "0.532614", "0.53149223", "0.53092957", "0.5304232", "0.5299621", "0.5293016", "0.5290246", "0.5286718", "0.5273297", "0.5269625", "0.52651286", "0.52647024", "0.5256702", "0.52563006", "0.52503973", "0.52492476", "0.5238092", "0.5231029", "0.52283245", "0.52264595", "0.52173114", "0.51942897", "0.5167938", "0.5154834", "0.5150154", "0.5145815", "0.5144509", "0.5135506", "0.5120678", "0.5115233", "0.5112514", "0.51109993", "0.5099722", "0.509696", "0.5086354", "0.50815654", "0.50814015", "0.50782794", "0.5068242", "0.5062583", "0.50592715", "0.5058435", "0.5055148", "0.50473577", "0.50383556", "0.5034205", "0.5022259", "0.5000023", "0.49995464", "0.49923468", "0.49916434", "0.4981687", "0.49750072", "0.49717158", "0.49673772", "0.49526745", "0.49473128", "0.49364543", "0.49361658", "0.49222913", "0.49121898", "0.49064037", "0.48992494", "0.48988822", "0.48936146", "0.48924953", "0.48904058", "0.48886278", "0.48725554", "0.4871272", "0.48667365", "0.4865452", "0.48546568", "0.4852106", "0.4849131", "0.4844961", "0.4837082" ]
0.58634365
2
Convenience wrapper to return Response.Data as an int.
func (r *Response) Int() (int, error) { return strconv.Atoi(r.String()) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (res Response) AsInt() (int, error) {\n\treturn res.Bits.AsInt(), res.Error\n}", "func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v }", "func GetInt64Data(response *bcsmonitor.QueryResponse) int64 {\n\tif len(response.Data.Result) == 0 {\n\t\treturn 0\n\t}\n\tvalueStr, ok := response.Data.Result[0].Value[1].(string)\n\tif !ok {\n\t\treturn 0\n\t}\n\tvalue, err := strconv.ParseInt(valueStr, 10, 64)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn value\n}", "func (s *SliceInt) Data() []int {\n\tif s == nil {\n\t\treturn nil\n\t}\n\treturn s.data\n}", "func (v *Value) Int() int {\n return Util.ToInt(v.data)\n}", "func (ctx *Context) Data(code int, contentType string, data []byte) {\n\tctx.Response.StatusCode = code\n\tctx.SetContentType(contentType)\n\tctx.Response.Body = ioutil.NopCloser(bytes.NewBuffer(data))\n}", "func (self *HttpParam) Int() int { if self.Value != nil { return self.Value.(int) } else { return int(0) } }", "func (c *Context) Data(data interface{}, total ...int64) {\n\tc.responseFormat.SetData(data, total...)\n}", "func (client PrimitiveClient) GetIntResponder(resp *http.Response) (result IntWrapper, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n}", "func (IntCodec) Read(data []byte, ptr unsafe.Pointer, wt WireType) (n int, err error) {\n\ti, n := ReadVarInt(data)\n\tif n < 0 {\n\t\treturn 0, fmt.Errorf(\"corrupt var int\")\n\t}\n\t*(*int)(ptr) = int(i)\n\treturn n, nil\n}", "func (id *RequestID) Int() (int, error) {\n\treturn id.intValue, id.intError\n}", "func Int(c Doer, r *Request) int {\n\tc <- r\n\treturn (<-r.resp).(int)\n}", "func (j *JSONData) Int(path ...interface{}) (int, error) {\n\tjson, err := j.get(path...)\n\treturn json.MustInt(), err\n}", "func (n *eeNum) int() *int { return (*int)(unsafe.Pointer(&n.data)) }", "func responseSize(res *http.Response) int64 {\n\tfield, ok := res.Header[\"Content-Length\"]\n\tif !ok {\n\t\treturn -1\n\t}\n\tif len(field) != 1 {\n\t\treturn -1\n\t}\n\tsize, err := strconv.ParseInt(field[0], 0, 64)\n\tif err != nil {\n\t\treturn -1\n\t}\n\treturn size\n}", "func (code Code) Int() int {\n\treturn int(code)\n}", "func (w *ResponseWriter) Write(data []byte) (int, error) {\n\tif w.code == 0 {\n\t\tw.WriteHeader(http.StatusOK)\n\t}\n\tif w.writeError != nil {\n\t\treturn -1, w.writeError\n\t}\n\tw.buffer = append(w.buffer, data...)\n\treturn len(data), nil\n}", "func (res Response) AsInt32() (int32, error) {\n\treturn res.Bits.AsInt32(), res.Error\n}", "func (Int8Codec) Read(data []byte, ptr unsafe.Pointer, wt WireType) (n int, err error) {\n\ti, n := ReadVarInt(data)\n\tif n < 0 {\n\t\treturn 0, fmt.Errorf(\"corrupt var int\")\n\t}\n\n\t*(*int8)(ptr) = int8(i)\n\treturn n, nil\n}", "func (res Response) AsInt64() (int64, error) {\n\treturn res.Bits.AsInt64(), res.Error\n}", "func DataResponse(data interface{}) Response {\n\treturn Response{\n\t\tCode: 200,\n\t\tData: data,\n\t\tMessage: \"ok\",\n\t}\n}", "func (reply Reply) Int() (int, error) {\n\tresult, err := redis.Int(reply.data, reply.err)\n\tif err != nil {\n\t\treturn 0, redisError(err)\n\t}\n\n\treturn result, nil\n}", "func intDataSize(data interface{}) int {\n\tswitch data := data.(type) {\n\tcase int8, *int8, *uint8:\n\t\treturn 1\n\tcase []int8:\n\t\treturn len(data)\n\tcase []uint8:\n\t\treturn len(data)\n\tcase int16, *int16, *uint16:\n\t\treturn 2\n\tcase []int16:\n\t\treturn 2 * len(data)\n\tcase []uint16:\n\t\treturn 2 * len(data)\n\tcase int32, *int32, *uint32:\n\t\treturn 4\n\tcase []int32:\n\t\treturn 4 * len(data)\n\tcase []uint32:\n\t\treturn 4 * len(data)\n\tcase int64, *int64, *uint64:\n\t\treturn 8\n\tcase []int64:\n\t\treturn 8 * len(data)\n\tcase []uint64:\n\t\treturn 8 * len(data)\n\t}\n\treturn 0\n}", "func (c Code) DataLength() int {\n\treturn codeDataLength[c]\n}", "func (dr downloadResponse) ContentLength() int64 {\n\ts := dr.rawResponse.Header.Get(\"Content-Length\")\n\tif s == \"\" {\n\t\treturn -1\n\t}\n\ti, err := strconv.ParseInt(s, 10, 64)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn i\n}", "func (r *Result) Int() int {\n\tif r.Error != nil {\n\t\treturn 0\n\t}\n\n\treturn convert.ToInt(r.Value)\n\n}", "func (r *ResponseReverter) Write(buf []byte) (int, error) {\n\tn, err := r.ResponseWriter.Write(buf)\n\treturn n, err\n}", "func (self *WebServer) respData(c *gin.Context, status, code int,\n\tmessage string, data interface{}) {\n\tc.JSON(status, &CR{\n\t\tMessage: message,\n\t\tCode: code,\n\t\tTimestamp: time.Now().Unix(),\n\t})\n}", "func (c *Context) Data(statusCode int, contentType string, data []byte) {\n\tc.SetStatusCode(statusCode)\n\tc.SetContentType(contentType)\n\tc.Write(data)\n}", "func (w *responseWriter) Write(data []byte) (int, error) {\n\tif w.status == 0 {\n\t\tw.status = http.StatusOK\n\t}\n\tsize, err := w.rw.Write(data)\n\tw.size += size\n\treturn size, err\n}", "func (d Data) Int(key string) (int, error) {\n\tval, _ := d[key]\n\tswitch v := val.(type) {\n\tcase int:\n\t\treturn v, nil\n\tcase int32:\n\t\treturn int(v), nil\n\tcase int64:\n\t\treturn int(v), nil\n\tcase nil:\n\t\treturn 0, ErrNotFound\n\tdefault:\n\t\treturn 0, ErrUnexpectedType\n\t}\n}", "func (void *IntegerResponse) Commit() (*int64, *http.Response, error) {\n\tvar errs []error\n\n\tres := &http.Response{}\n\tmodel := struct {\n\t\tErrorResponse\n\t\tResult *int64 `json:\"result,omitempty\"`\n\t}{}\n\n\toperation := func() error {\n\t\tres, _, errs = void.Request.EndStruct(&model)\n\t\tif len(errs) > 0 {\n\t\t\treturn errs[0]\n\t\t}\n\t\treturn nil\n\t}\n\n\tif err := backoff.Retry(operation, void.Client.expBackOff); err != nil {\n\t\treturn nil, MakeHTTPResponse(void.Request), err\n\t}\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nil, res, fmt.Errorf(\"%v %v\", model.ErrorCode, model.Description)\n\t}\n\n\treturn model.Result, res, nil\n}", "func (c *Controller) Data(data []byte) {\n ctx := c.Context()\n r := render.NewData(data)\n\n ctx.PushLog(\"status\", http.StatusOK)\n ctx.SetHeader(\"Content-Type\", r.ContentType())\n ctx.End(r.HttpCode(), r.Content())\n}", "func (p *Packet) ReadInt() int {\n\treturn int(p.readVarLengthInt(4))\n}", "func (res *response) Unmarshal(data []byte) (uint64, error) {\n\tvar offset uint64\n\tvar n uint64\n\tn = code.DecodeVarint(data[offset:], &res.Seq)\n\toffset += n\n\tif data[offset] > 0 {\n\t\tn = code.DecodeString(data[offset:], &res.Error)\n\t} else {\n\t\tn = 1\n\t}\n\toffset += n\n\tif data[offset] > 127 {\n\t\tn = code.DecodeBytes(data[offset:], &res.Reply)\n\t} else if data[offset] > 0 {\n\t\tvar s = 1 + uint64(data[offset])\n\t\tres.Reply = data[offset+1 : offset+s]\n\t\tn = s\n\t} else {\n\t\tn = 1\n\t}\n\toffset += n\n\treturn offset, nil\n}", "func (data *Data) GetInt(key string, defaultInt int) (int, error) {\n\tbs, err := data.get(nil, key)\n\tif err != nil {\n\t\tlog.Printf(\"data.get Error : %+v\", err)\n\t\treturn defaultInt, err\n\t}\n\tif bs == nil {\n\t\treturn defaultInt, nil\n\t}\n\tbi := new(big.Int)\n\tbi.SetBytes(bs)\n\treturn int(bi.Int64()), nil\n}", "func (Int64Codec) Read(data []byte, ptr unsafe.Pointer, wt WireType) (n int, err error) {\n\ti, n := ReadVarInt(data)\n\tif n < 0 {\n\t\treturn 0, fmt.Errorf(\"corrupt var int\")\n\t}\n\t*(*int64)(ptr) = int64(i)\n\treturn n, nil\n}", "func (r *Response) Data(data interface{}) JResponseWriter {\n\treturn r.Field(fieldData, data)\n}", "func (res Response) AsInt8() (int8, error) {\n\treturn res.Bits.AsInt8(), res.Error\n}", "func (resp *response) Write(b []byte) (int, error) {\n\tsize, err := resp.ResponseWriter.Write(b)\n\tresp.size += size\n\treturn size, err\n}", "func returnValue(w http.ResponseWriter, req *http.Request) {\n\treqBody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Body read error, %v\", err)\n\t\tw.WriteHeader(500) // Return 500 Internal Server Error.\n\t\treturn\n\t}\n\tvar counter Value\n\tjson.Unmarshal(reqBody, &counter)\n\t//Server-sided limit of [-10000, 10000] to prevent intetger-overflow\n\t//and for testing purposes.\n\tif counter.Value < -10000 || counter.Value > 10000 {\n\t\tlog.Printf(\"The number %v exceeded the allowed range of [-10000, 10000]!\", counter.Value)\n\t\tw.WriteHeader(500) // Return 500 Internal Server Error.\n\t\tfmt.Fprintf(w, \"The number %v exceeded the allowed range of [-10000, 10000]!\", counter.Value)\n\t\treturn\n\t}\n\t//Parse response as json.\n\tjson.NewEncoder(w).Encode(counter)\n}", "func (o *UserRolesResponse) GetData() [][]int32 {\n\tif o == nil {\n\t\tvar ret [][]int32\n\t\treturn ret\n\t}\n\n\treturn o.Data\n}", "func (w *responseWriter) Write(data []byte) (int, error) {\n\tn, err := w.ResponseWriter.Write(data)\n\tif w.resp.StatusCode == 0 {\n\t\tw.resp.StatusCode = http.StatusOK\n\t}\n\treturn n, err\n}", "func (r *Reply) Int() (int, error) {\n\ti64, err := r.Int64()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn int(i64), nil\n}", "func (j Job) Int() int {\n\tif v, ok := j.data.(int); ok {\n\t\treturn v\n\t}\n\n\treturn 0\n}", "func (c *fakeRedisConn) WriteInt(num int) { c.rsp = append(c.rsp, num) }", "func (b *Buffer) RetrieveInt32() {\n\tb.Retrieve(4)\n}", "func (m *Manager) ReadInt() int {\n\treturn int(m.readInt(0))\n}", "func (r ResponseAPI) GetCode() int {\n\treturn r.Data.Code\n}", "func RetData(c echo.Context, data interface{}) error {\n\treturn c.JSON(http.StatusOK, DataRes{\n\t\tStatus: 200,\n\t\tData: data,\n\t})\n}", "func (r *Response) Int64() (int64, error) {\n\treturn strconv.ParseInt(r.String(), 10, 64)\n}", "func (o *SecretValue) GetData() []int32 {\n\tif o == nil || o.Data == nil {\n\t\tvar ret []int32\n\t\treturn ret\n\t}\n\treturn *o.Data\n}", "func (this *JSONObject) Int(key string) int {\n\treturn parseInt(this.innerMap[key])\n}", "func (r *Response) Read(p []byte) (n int, err error) {\n\n\tif r.Error != nil {\n\t\treturn -1, r.Error\n\t}\n\n\treturn r.RawResponse.Body.Read(p)\n}", "func (c *JSONElement) AsInt(def int) (value int) {\n\tvalue, err := c.Json.Int()\n\tif err != nil {\n\t\treturn def\n\t}\n\n\treturn value\n}", "func Data(status int, content []byte, headers Headers) *Response {\n\treturn &Response{\n\t\tStatus: status,\n\t\tContent: bytes.NewBuffer(content),\n\t\tHeaders: headers,\n\t}\n}", "func (recycleBinItemResp *RecycleBinItemResp) Data() *RecycleBinItemInfo {\n\tdata := NormalizeODataItem(*recycleBinItemResp)\n\tres := &RecycleBinItemInfo{}\n\tjson.Unmarshal(data, &res)\n\treturn res\n}", "func (p *IntVector) Data() []int {\n\tarr := make([]int, p.Len());\n\tfor i, v := range p.a {\n\t\tarr[i] = v.(int)\n\t}\n\treturn arr;\n}", "func (o *GetTimestampResponseDefault) Code() int {\n\treturn o._statusCode\n}", "func instanceLength(r *http.Response) (l int64, err error) {\n\tswitch r.StatusCode {\n\tcase http.StatusOK:\n\t\tl, err = strconv.ParseInt(r.Header.Get(\"Content-Length\"), 10, 64)\n\t\treturn\n\tcase http.StatusPartialContent:\n\t\tcr, parseOk := httptoo.ParseBytesContentRange(r.Header.Get(\"Content-Range\"))\n\t\tl = cr.Length\n\t\tif !parseOk {\n\t\t\terr = errors.New(\"error parsing Content-Range\")\n\t\t}\n\t\treturn\n\tdefault:\n\t\terr = errors.New(\"unhandled status code\")\n\t\treturn\n\t}\n}", "func (p *Parser) GetInt() (int, error) {\n\tswitch p.data.(type) {\n\tcase float32, float64:\n\t\treturn int(reflect.ValueOf(p.data).Float()), nil\n\tcase int, int8, int16, int32, int64:\n\t\treturn int(reflect.ValueOf(p.data).Int()), nil\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\treturn int(reflect.ValueOf(p.data).Uint()), nil\n\t}\n\treturn 0, errors.New(\"invalid value type\")\n}", "func (Int32Codec) Read(data []byte, ptr unsafe.Pointer, wt WireType) (n int, err error) {\n\ti, n := ReadVarInt(data)\n\tif n < 0 {\n\t\treturn 0, fmt.Errorf(\"corrupt var int\")\n\t}\n\n\t*(*int32)(ptr) = int32(i)\n\treturn n, nil\n}", "func (b *Buffer) GetInt() (ret int32) {\n\tbinary.Read(b.data, binary.LittleEndian, &ret)\n\treturn ret\n}", "func (res Responder) WriteInt(n int) int {\n\treturn res.writeInline(binCOLON, strconv.Itoa(n))\n}", "func (_TellorMesosphere *TellorMesosphereCaller) RetrieveData(opts *bind.CallOpts, _requestId *big.Int, _timestamp *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _TellorMesosphere.contract.Call(opts, &out, \"retrieveData\", _requestId, _timestamp)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func asInt(data interface{}) (int64, bool) {\n\tswitch val := data.(type) {\n\tcase int:\n\t\treturn int64(val), true\n\n\tcase int8:\n\t\treturn int64(val), true\n\n\tcase int16:\n\t\treturn int64(val), true\n\n\tcase int32:\n\t\treturn int64(val), true\n\n\tcase int64:\n\t\treturn val, true\n\n\tcase uint:\n\t\treturn int64(val), true\n\n\tcase uint8:\n\t\treturn int64(val), true\n\n\tcase uint16:\n\t\treturn int64(val), true\n\n\tcase uint32:\n\t\treturn int64(val), true\n\n\tcase uint64:\n\t\treturn int64(val), true\n\n\tcase time.Duration:\n\t\treturn int64(val), true\n\n\tcase StyledCell:\n\t\treturn asInt(val.Data)\n\t}\n\n\treturn 0, false\n}", "func countErrResponse(w http.ResponseWriter) {\n\terrResponse := errorResponse{\n\t\t\"invalid count\",\n\t\t\"The number of dice requested is invalid.\",\n\t}\n\tw.WriteHeader(http.StatusNotAcceptable)\n\tenc := json.NewEncoder(w)\n\tjsonEncode(w, enc, errResponse)\n\n\treturn\n}", "func (x *Int) Bytes() []byte {}", "func (d *Data) GetInt(key string, defaultValue int) int {\n\tval, err := d.Get(key)\n\tif err != nil {\n\t\treturn defaultValue\n\t}\n\n\tres, ok := val.(int)\n\tif !ok {\n\t\treturn defaultValue\n\t}\n\n\treturn res\n}", "func (o *SecretValue) GetDataOk() (*[]int32, bool) {\n\tif o == nil || o.Data == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Data, true\n}", "func (res *Response) Write(p []byte) (n int, err error) {\n\treturn res.c.Write(p)\n}", "func (w *customResponseWriter) Write(b []byte) (int, error) {\n\tif w.status == 0 {\n\t\tw.status = http.StatusOK\n\t}\n\tn, err := w.ResponseWriter.Write(b)\n\tw.length += n\n\treturn n, err\n}", "func (r *responseInfoRecorder) Write(b []byte) (int, error) {\n\tr.ContentLength += int64(len(b))\n\tif r.statusCode == 0 {\n\t\tr.statusCode = http.StatusOK\n\t}\n\treturn r.ResponseWriter.Write(b)\n}", "func (r *Reader) ReadVarInt() (int, error) {\n\tm := 1\n\tv := 0\n\tfor {\n\t\tb, err := r.ReadByte()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\terr = io.ErrUnexpectedEOF\n\t\t\t}\n\t\t\treturn 0, err\n\t\t}\n\t\tv += int(b&0x7f) * m\n\t\tif (b & 0x80) == 0 {\n\t\t\treturn v, nil\n\t\t}\n\t\tm *= 0x80\n\t\tif m > 0x200000 {\n\t\t\treturn 0, errors.New(\"malformed compressed int\")\n\t\t}\n\t}\n}", "func (r *Redis) Int(reply interface{}, err error) (int, error) {\n\treturn redigo.Int(reply, err)\n}", "func (r *Response) Code() int {\n\treturn r.recorder.Code\n}", "func fuckJSON(d interface{}) int {\n\tswitch d.(type) {\n\tcase int:\n\t\treturn d.(int)\n\tcase float64:\n\t\treturn int(d.(float64))\n\tdefault:\n\t\tpanic(\"not an int\")\n\t}\n}", "func (y *Yaml) Int() (int, error) {\n\tswitch y.data.(type) {\n\tcase float32, float64:\n\t\treturn int(reflect.ValueOf(y.data).Float()), nil\n\tcase int, int8, int16, int32, int64:\n\t\treturn int(reflect.ValueOf(y.data).Int()), nil\n\t}\n\treturn 0, errors.New(\"invalid value type\")\n}", "func (p Payload) GetInt(key string) (int, error) {\n\tvalue, err := p.Get(key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif value, ok := value.(int); ok {\n\t\treturn value, nil\n\t}\n\treturn 0, fmt.Errorf(\"Value with key '%s' not an int\", key)\n}", "func (res *Response) getStatusCode() int {\n\treturn res.StatusCode\n}", "func (_obj *DataService) GetRecordCount(wx_id string, activity_id string, count *int32, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(wx_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_string(activity_id, 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_int32((*count), 3)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"getRecordCount\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _is.Read_int32(&(*count), 3, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (w *responseWrapper) Write(p []byte) (int, error) {\n\treturn w.buffer.Write(p)\n}", "func (ec ErrCode) Int() int { return int(ec.code) }", "func (table Table) GetInt(key string) (int, error) {\n\tif table.Contain(key) {\n\t\tb, err := json.Marshal(table[key])\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tvar num int\n\t\tjson.Unmarshal(b, &num)\n\t\treturn num, nil\n\t}\n\treturn 0, errors.New(\"Key Not Found\")\n}", "func (s Stream) ReadVarInt() (int, error) {\n\tnum, err := s.ReadVarInt64()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn int(num), nil\n}", "func (w *AppResponseWriter) Write(data []byte) (n int, err error) {\n\tif !w.written {\n\t\tw.statusCode = http.StatusOK\n\t\tw.written = true\n\t}\n\treturn w.ResponseWriter.Write(data)\n}", "func IntSlice(c Doer, r *Request) []int {\n\tc <- r\n\treturn (<-r.resp).([]int)\n}", "func (o *ApiResponse) GetCode() int32 {\n\tif o == nil || o.Code == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Code\n}", "func (c *DefaultApiController) DataDataIdGet(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tdataId, err := parseInt32Parameter(params[\"dataId\"])\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tresult, err := c.service.DataDataIdGet(r.Context(), dataId)\n\t//If an error occured, encode the error with the status code\n\tif err != nil {\n\t\tEncodeJSONResponse(err.Error(), &result.Code, w)\n\t\treturn\n\t}\n\t//If no error, encode the body and the result code\n\tEncodeJSONResponse(result.Body, &result.Code, w)\n\n}", "func (d *DataItem) ToIntBytes() ([]byte, error) {\n\tswitch v := d.raw.(type) {\n\tcase int64:\n\t\treturn prvEncodeInt(v), nil\n\t}\n\treturn nil, typeError\n}", "func (c *Controller) Data(data []byte, dftContentType ...string) {\n\tctx := c.Context()\n\tr := render.NewData(data)\n\thttpStatus := r.HttpCode()\n\tif ctx.Status() > 0 && ctx.Status() != httpStatus {\n\t\thttpStatus = ctx.Status()\n\t}\n\n\tctx.PushLog(\"status\", httpStatus)\n\tcontentType := \"\"\n\tif len(dftContentType) > 0 {\n\t\tcontentType = dftContentType[0]\n\t}\n\n\tif contentType != \"\" {\n\t\tctx.SetHeader(\"Content-Type\", contentType)\n\t}\n\n\tctx.End(httpStatus, r.Content())\n}", "func (Int16Codec) Read(data []byte, ptr unsafe.Pointer, wt WireType) (n int, err error) {\n\ti, n := ReadVarInt(data)\n\tif n < 0 {\n\t\treturn 0, fmt.Errorf(\"corrupt var int\")\n\t}\n\n\t*(*int16)(ptr) = int16(i)\n\treturn n, nil\n}", "func Int(r interface{}, err error) (int, error) {\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tswitch r := r.(type) {\n\tcase int:\n\t\treturn r, nil\n\tcase int64:\n\t\tx := int(r)\n\t\tif int64(x) != r {\n\t\t\treturn 0, strconv.ErrRange\n\t\t}\n\t\treturn x, nil\n\tcase []byte:\n\t\tn, err := strconv.ParseInt(string(r), 10, 0)\n\t\treturn int(n), err\n\tcase string:\n\t\tn, err := strconv.ParseInt(r, 10, 0)\n\t\treturn int(n), err\n\tcase nil:\n\t\treturn 0, simplesessions.ErrNil\n\t}\n\n\treturn 0, simplesessions.ErrAssertType\n}", "func (o *ApiResponse) GetCode() int32 {\n\tif o == nil || IsNil(o.Code) {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Code\n}", "func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v }", "func (a *BaseApi) DataRes(c *rux.Context, data interface{}) *model.JsonData {\n\treturn a.MakeRes(0, nil, data)\n}", "func (r *RedisSession) Int(reply interface{}) (int, error) {\n\treturn redis.Int(reply, nil)\n}", "func (w *responseWriter) Write(b []byte) (int, error) {\n\tif w.Status == 0 {\n\t\tw.Status = 200\n\t}\n\tn, err := w.ResponseWriter.Write(b)\n\tw.Length += n\n\treturn n, err\n}", "func (m *Metadata) ReadInt(reader io.Reader) int {\n\tval := 0\n\tbyteLength := 8\n\tsign := ReadExactInt(reader, 1)\n\tbuf := make([]byte, m.IntSize)\n\n\tif _, err := reader.Read(buf); err != nil {\n\t\tlog.Fatalf(\"err reading int: %v\", err)\n\t}\n\n\tfor len(buf) > 0 {\n\t\tv := buf[len(buf)-1]\n\t\tbuf = buf[:len(buf)-1]\n\t\tval = (val << byteLength) + int(v)\n\t}\n\n\tif sign > 0 {\n\t\tval = -val\n\t}\n\n\treturn val\n}", "func AsData(ip string, data interface{}) (resp Response, err error) {\n\traw, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn AsError(ip, err)\n\t}\n\n\tresp = Response{\n\t\tStatusCode: 200,\n\t\tHeaders: jsonHeaders,\n\t\tBody: string(raw),\n\t}\n\treturn\n}" ]
[ "0.6555175", "0.6241124", "0.61665976", "0.5970528", "0.5872309", "0.5627681", "0.5596786", "0.55721736", "0.55685365", "0.5532696", "0.55013376", "0.5430772", "0.5414721", "0.5408866", "0.5398135", "0.53445405", "0.5334568", "0.53330725", "0.5322254", "0.52992433", "0.52889645", "0.52627665", "0.52382135", "0.5219681", "0.5218625", "0.5205876", "0.5193162", "0.51884484", "0.5160087", "0.5153549", "0.5152874", "0.51381177", "0.5135291", "0.512818", "0.51105577", "0.5107155", "0.51068664", "0.51005113", "0.5095828", "0.50948673", "0.50808823", "0.5055048", "0.5046475", "0.5039078", "0.5037134", "0.50351614", "0.50273967", "0.5025966", "0.50227755", "0.502209", "0.5017149", "0.50155663", "0.5008019", "0.49992618", "0.49939683", "0.49892816", "0.49844244", "0.49786654", "0.49704227", "0.49693942", "0.49655777", "0.4962292", "0.49593967", "0.4956749", "0.495084", "0.4945919", "0.49397635", "0.49324387", "0.493139", "0.49187094", "0.49183813", "0.49172136", "0.4917025", "0.49112508", "0.49066585", "0.49051273", "0.48998025", "0.48835075", "0.4877566", "0.48768014", "0.48759001", "0.48755807", "0.4874704", "0.48652667", "0.48636097", "0.4857446", "0.48559946", "0.48551145", "0.4854356", "0.48510674", "0.4840375", "0.4836658", "0.48364815", "0.48331258", "0.48247606", "0.48225552", "0.48203415", "0.48195836", "0.48174277", "0.4811134" ]
0.6545365
1
Convenience wrapper to return Response.Data as an int64.
func (r *Response) Int64() (int64, error) { return strconv.ParseInt(r.String(), 10, 64) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetInt64Data(response *bcsmonitor.QueryResponse) int64 {\n\tif len(response.Data.Result) == 0 {\n\t\treturn 0\n\t}\n\tvalueStr, ok := response.Data.Result[0].Value[1].(string)\n\tif !ok {\n\t\treturn 0\n\t}\n\tvalue, err := strconv.ParseInt(valueStr, 10, 64)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn value\n}", "func (res Response) AsInt64() (int64, error) {\n\treturn res.Bits.AsInt64(), res.Error\n}", "func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v }", "func (res Response) AsInt() (int, error) {\n\treturn res.Bits.AsInt(), res.Error\n}", "func (d DataView) Int64(offset uint, littleEndian bool) int64 {\n\tvar decoding binary.ByteOrder\n\tif littleEndian {\n\t\tdecoding = binary.LittleEndian\n\t} else {\n\t\tdecoding = binary.BigEndian\n\t}\n\treturn int64(decoding.Uint64(d[offset:]))\n}", "func (b *Buffer) RetrieveInt64() {\n\tb.Retrieve(8)\n}", "func (b *Bytes) Int64() int64 {\n\treturn int64(*b)\n}", "func (reply Reply) Int64() (int64, error) {\n\tresult, err := redis.Int64(reply.data, reply.err)\n\tif err != nil {\n\t\treturn 0, redisError(err)\n\t}\n\n\treturn result, nil\n}", "func ReadInt64(buffer []byte, offset int) int64 {\n return (int64(buffer[offset + 0]) << 0) |\n (int64(buffer[offset + 1]) << 8) |\n (int64(buffer[offset + 2]) << 16) |\n (int64(buffer[offset + 3]) << 24) |\n (int64(buffer[offset + 4]) << 32) |\n (int64(buffer[offset + 5]) << 40) |\n (int64(buffer[offset + 6]) << 48) |\n (int64(buffer[offset + 7]) << 56)\n}", "func (r *Reader) Int64() int64 {\n\treturn int64(r.Uint64())\n}", "func (r *Result) Int64() int64 {\n\tif r.Error != nil {\n\t\treturn 0\n\t}\n\n\treturn convert.ToInt64(r.Value)\n}", "func (s Stream) ReadVarInt64() (int64, error) {\n\tvar size uint\n\tvar num uint64\n\n\tfor {\n\t\tb, err := s.ReadByte()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\tnum |= (uint64(b) & uint64(0x7F)) << (size * 7)\n\t\tsize++\n\t\tif size > 10 {\n\t\t\treturn 0, ErrInvalidData\n\t\t}\n\n\t\tif (b & 0x80) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn int64(num), nil\n}", "func (j *JSONData) Int64(path ...interface{}) (int64, error) {\n\tjson, err := j.get(path...)\n\treturn json.MustInt64(), err\n}", "func (n *eeNum) int64() *int64 { return (*int64)(unsafe.Pointer(&n.data)) }", "func (Int64Codec) Read(data []byte, ptr unsafe.Pointer, wt WireType) (n int, err error) {\n\ti, n := ReadVarInt(data)\n\tif n < 0 {\n\t\treturn 0, fmt.Errorf(\"corrupt var int\")\n\t}\n\t*(*int64)(ptr) = int64(i)\n\treturn n, nil\n}", "func (r *Decoder) Int64() int64 {\n\tr.Sync(SyncInt64)\n\treturn r.rawVarint()\n}", "func (r *Reader) ReadInt64() int64 {\n\tif len(r.buffer) <= r.index+7 {\n\t\tlog.Panic(\"Error reading int64: buffer is too small!\")\n\t}\n\n\tvar data = int64(r.buffer[r.index])\n\tdata += int64(r.buffer[r.index+1]) << 8\n\tdata += int64(r.buffer[r.index+2]) << 16\n\tdata += int64(r.buffer[r.index+3]) << 24\n\tdata += int64(r.buffer[r.index+4]) << 32\n\tdata += int64(r.buffer[r.index+5]) << 40\n\tdata += int64(r.buffer[r.index+6]) << 48\n\tdata += int64(r.buffer[r.index+7]) << 56\n\tr.index += 8\n\n\treturn data\n}", "func (res Response) AsUInt64() (uint64, error) {\n\treturn res.Bits.AsUInt64(), res.Error\n}", "func makeInt64(in interface{}) int64 {\n\tval, _ := in.(json.Number).Int64()\n\treturn val\n}", "func (s VerbatimString) ToInt64() (int64, error) { return _verbatimString(s).ToInt64() }", "func (r *Response) Int() (int, error) {\n\treturn strconv.Atoi(r.String())\n}", "func ReadInt64() int64 {\n\treturn readInt64()\n}", "func (p RedisDsl) GET_INT64(key string) (*int64, error) {\n\treturn ReplyToInt64Ptr(p.GET(key))\n}", "func (r *Reply) Int64() (int64, error) {\n\tif r.Type == ErrorReply {\n\t\treturn 0, r.Err\n\t}\n\tif r.Type != IntegerReply {\n\t\ts, err := r.Str()\n\t\tif err == nil {\n\t\t\ti64, err := strconv.ParseInt(s, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, errors.New(\"failed to parse integer value from string value\")\n\t\t\t} else {\n\t\t\t\treturn i64, nil\n\t\t\t}\n\t\t}\n\n\t\treturn 0, errors.New(\"integer value is not available for this reply type\")\n\t}\n\n\treturn r.int, nil\n}", "func (m *Message) getInt64() int64 {\n\tb := m.bufferForGet()\n\tdefer b.Advance(8)\n\n\treturn int64(binary.LittleEndian.Uint64(b.Bytes[b.Offset:]))\n}", "func (r *Redis) Int64(reply interface{}, err error) (int64, error) {\n\treturn redigo.Int64(reply, err)\n}", "func Int64(r interface{}, err error) (int64, error) {\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tswitch r := r.(type) {\n\tcase int:\n\t\treturn int64(r), nil\n\tcase int64:\n\t\treturn r, nil\n\tcase []byte:\n\t\tn, err := strconv.ParseInt(string(r), 10, 64)\n\t\treturn n, err\n\tcase string:\n\t\tn, err := strconv.ParseInt(r, 10, 64)\n\t\treturn n, err\n\tcase nil:\n\t\treturn 0, simplesessions.ErrNil\n\t}\n\n\treturn 0, simplesessions.ErrAssertType\n}", "func (m *Manager) ReadInt64() int64 {\n\treturn m.readInt(64)\n}", "func (c *Context) Data(data interface{}, total ...int64) {\n\tc.responseFormat.SetData(data, total...)\n}", "func (b *Buffer) ReadInt64() (x int64, err error) {\n\tx, err = b.PeekInt64()\n\tif err != nil {\n\t\treturn\n\t}\n\tb.RetrieveInt64()\n\treturn\n}", "func (r *RedisSession) Int64(reply interface{}) (int64, error) {\n\treturn redis.Int64(reply, nil)\n}", "func (p *Parser) GetInt64() (int64, error) {\n\tswitch p.data.(type) {\n\tcase float32, float64:\n\t\treturn int64(reflect.ValueOf(p.data).Float()), nil\n\tcase int, int8, int16, int32, int64:\n\t\treturn reflect.ValueOf(p.data).Int(), nil\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\treturn int64(reflect.ValueOf(p.data).Uint()), nil\n\t}\n\treturn 0, errors.New(\"invalid value type\")\n}", "func (s Stream) ReadInt64() (int64, error) {\n\tb := make([]byte, 8)\n\terr := s.DecodeReadFull(b)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn int64(binary.LittleEndian.Uint64(b)), nil\n}", "func (payload *Payload) GetInt64(key string) (int64, bool) {\n\tval, ok := payload.data[key]\n\treturn convertToInt64(val, false), ok\n}", "func (num Number) Int64() (int64, bool) {\n\ti, err := json.Number(num).Int64()\n\tif err != nil {\n\t\treturn 0, false\n\t}\n\treturn i, true\n}", "func (s *SliceInt) Data() []int {\n\tif s == nil {\n\t\treturn nil\n\t}\n\treturn s.data\n}", "func (v *Value) Int() int {\n return Util.ToInt(v.data)\n}", "func (r *Reader) ReadInt64() int64 {\n\tidx := r.currentReadIndex\n\tr.currentReadIndex += 8\n\treturn int64(r.bufBytes[idx]) | int64(r.bufBytes[idx+1])<<8 | int64(r.bufBytes[idx+2])<<16 | int64(r.bufBytes[idx+3])<<24 |\n\t\tint64(r.bufBytes[idx+4])<<32 | int64(r.bufBytes[idx+5])<<40 | int64(r.bufBytes[idx+6])<<48 | int64(r.bufBytes[idx+7])<<56\n}", "func (x *Int) Int64() int64 {}", "func (p *Stream) ReadInt64() (int64, *base.Error) {\n\tv := p.readFrame[p.readIndex]\n\tif v > 13 && v < 54 {\n\t\tif p.CanRead() {\n\t\t\tp.gotoNextReadByteUnsafe()\n\t\t\treturn int64(v) - 21, nil\n\t\t}\n\t} else if v == 6 {\n\t\tif p.isSafetyReadNBytesInCurrentFrame(3) {\n\t\t\tb := p.readFrame[p.readIndex:]\n\t\t\tp.readIndex += 3\n\t\t\treturn int64(uint16(b[1])|\n\t\t\t\t(uint16(b[2])<<8),\n\t\t\t) - 32768, nil\n\t\t}\n\t\tif p.hasNBytesToRead(3) {\n\t\t\tb := p.readNBytesCrossFrameUnsafe(3)\n\t\t\treturn int64(uint16(b[1])|\n\t\t\t\t(uint16(b[2])<<8),\n\t\t\t) - 32768, nil\n\t\t}\n\t} else if v == 7 {\n\t\tif p.isSafetyReadNBytesInCurrentFrame(5) {\n\t\t\tb := p.readFrame[p.readIndex:]\n\t\t\tp.readIndex += 5\n\t\t\treturn int64(uint32(b[1])|\n\t\t\t\t(uint32(b[2])<<8)|\n\t\t\t\t(uint32(b[3])<<16)|\n\t\t\t\t(uint32(b[4])<<24),\n\t\t\t) - 2147483648, nil\n\t\t}\n\t\tif p.hasNBytesToRead(5) {\n\t\t\tb := p.readNBytesCrossFrameUnsafe(5)\n\t\t\treturn int64(uint32(b[1])|\n\t\t\t\t(uint32(b[2])<<8)|\n\t\t\t\t(uint32(b[3])<<16)|\n\t\t\t\t(uint32(b[4])<<24),\n\t\t\t) - 2147483648, nil\n\t\t}\n\t} else if v == 8 {\n\t\tif p.isSafetyReadNBytesInCurrentFrame(9) {\n\t\t\tb := p.readFrame[p.readIndex:]\n\t\t\tp.readIndex += 9\n\t\t\treturn int64(\n\t\t\t\tuint64(b[1]) |\n\t\t\t\t\t(uint64(b[2]) << 8) |\n\t\t\t\t\t(uint64(b[3]) << 16) |\n\t\t\t\t\t(uint64(b[4]) << 24) |\n\t\t\t\t\t(uint64(b[5]) << 32) |\n\t\t\t\t\t(uint64(b[6]) << 40) |\n\t\t\t\t\t(uint64(b[7]) << 48) |\n\t\t\t\t\t(uint64(b[8]) << 56) -\n\t\t\t\t\t9223372036854775808), nil\n\t\t}\n\t\tif p.hasNBytesToRead(9) {\n\t\t\tb := p.readNBytesCrossFrameUnsafe(9)\n\t\t\treturn int64(\n\t\t\t\tuint64(b[1]) |\n\t\t\t\t\t(uint64(b[2]) << 8) |\n\t\t\t\t\t(uint64(b[3]) << 16) |\n\t\t\t\t\t(uint64(b[4]) << 24) |\n\t\t\t\t\t(uint64(b[5]) << 32) |\n\t\t\t\t\t(uint64(b[6]) << 40) |\n\t\t\t\t\t(uint64(b[7]) << 48) |\n\t\t\t\t\t(uint64(b[8]) << 56) -\n\t\t\t\t\t9223372036854775808), nil\n\t\t}\n\t}\n\treturn 0, base.ErrStream\n}", "func (o *FakeObject) Int64() int64 { return o.Value.(int64) }", "func GetInt64(v interface{}) int64 {\n\treturn cache2.GetInt64(v)\n}", "func GetInt64(key string) int64 { return viper.GetInt64(key) }", "func (w *Writer) WriteInt64(data interface{}) {\n\tvar t = w.getType(data, 8)\n\n\tw.buffer[w.index] = byte(t[0])\n\tw.buffer[w.index+1] = byte(t[1])\n\tw.buffer[w.index+2] = byte(t[2])\n\tw.buffer[w.index+3] = byte(t[3])\n\tw.buffer[w.index+4] = byte(t[4])\n\tw.buffer[w.index+5] = byte(t[5])\n\tw.buffer[w.index+6] = byte(t[6])\n\tw.buffer[w.index+7] = byte(t[7])\n\tw.index += 8\n}", "func GetInt64(key string) int64 {\n\treturn cargoboat.GetInt64(key)\n}", "func (c *fakeRedisConn) WriteInt64(num int64) { c.rsp = append(c.rsp, num) }", "func asInt(data interface{}) (int64, bool) {\n\tswitch val := data.(type) {\n\tcase int:\n\t\treturn int64(val), true\n\n\tcase int8:\n\t\treturn int64(val), true\n\n\tcase int16:\n\t\treturn int64(val), true\n\n\tcase int32:\n\t\treturn int64(val), true\n\n\tcase int64:\n\t\treturn val, true\n\n\tcase uint:\n\t\treturn int64(val), true\n\n\tcase uint8:\n\t\treturn int64(val), true\n\n\tcase uint16:\n\t\treturn int64(val), true\n\n\tcase uint32:\n\t\treturn int64(val), true\n\n\tcase uint64:\n\t\treturn int64(val), true\n\n\tcase time.Duration:\n\t\treturn int64(val), true\n\n\tcase StyledCell:\n\t\treturn asInt(val.Data)\n\t}\n\n\treturn 0, false\n}", "func (_TellorMesosphere *TellorMesosphereCaller) RetrieveData(opts *bind.CallOpts, _requestId *big.Int, _timestamp *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _TellorMesosphere.contract.Call(opts, &out, \"retrieveData\", _requestId, _timestamp)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (rw *ReadWrite) GetInt64() int64 {\n\tshift := uint(0)\n\tn := uint64(0)\n\tfor {\n\t\tb := rw.GetByte_()\n\t\tn |= uint64(b&0x7f) << shift\n\t\tshift += 7\n\t\tif 0 == (b & 0x80) {\n\t\t\tbreak\n\t\t}\n\t}\n\ttmp := ((int64(n<<63) >> 63) ^ int64(n)) >> 1\n\ttmp = tmp ^ int64(n&(1<<63))\n\treturn tmp\n}", "func (z *Int) Int64() int64 {\n\treturn int64(z[0] & 0x7fffffffffffffff)\n}", "func (result ContractFunctionResult) GetInt64(index uint64) int64 {\n\treturn int64(binary.BigEndian.Uint64(result.ContractCallResult[index*32+24 : (index+1)*32]))\n}", "func (c *Chain) GetInt64() int64 {\n\treturn c.val.(int64)\n}", "func Int64(v interface{}) *int64 {\n\tswitch v.(type) {\n\tcase string, int32, int16, int8, int, uint32, uint16, uint8, uint, float32, float64:\n\t\tval := fmt.Sprintf(\"%v\", v)\n\t\tres, err := strconv.ParseInt(val, 10, 64)\n\t\tif err != nil {\n\t\t\texception.Err(err, 500).Ctx(M{\"v\": v}).Throw()\n\t\t}\n\t\treturn &res\n\tcase int64, uint64:\n\t\tres := v.(int64)\n\t\treturn &res\n\tcase bool:\n\t\tval := v.(bool)\n\t\tvar res int64 = 0\n\t\tif val {\n\t\t\tres = 1\n\t\t}\n\t\treturn &res\n\t}\n\treturn nil\n}", "func (d *DataItem) ToIntBytes() ([]byte, error) {\n\tswitch v := d.raw.(type) {\n\tcase int64:\n\t\treturn prvEncodeInt(v), nil\n\t}\n\treturn nil, typeError\n}", "func GetInt64(key string) int64 {\n\treturn v.GetInt64(key)\n}", "func (c *Context) GetInt64(key interface{}) int64 {\n\treturn c.Get(key).(int64)\n}", "func Int64(i64 int64) Val { return Val{t: bsontype.Int64}.writei64(i64) }", "func (no *Node) Int64() (int64, error) {\n\tvar out int64\n\tif err := binary.Read(no.buf, binary.LittleEndian, &out); err != nil {\n\t\treturn 0, err\n\t}\n\treturn out, nil\n}", "func Int64(any interface{}) int64 {\n\tif any == nil {\n\t\treturn 0\n\t}\n\tswitch value := any.(type) {\n\tcase int:\n\t\treturn int64(value)\n\tcase int8:\n\t\treturn int64(value)\n\tcase int16:\n\t\treturn int64(value)\n\tcase int32:\n\t\treturn int64(value)\n\tcase int64:\n\t\treturn value\n\tcase uint:\n\t\treturn int64(value)\n\tcase uint8:\n\t\treturn int64(value)\n\tcase uint16:\n\t\treturn int64(value)\n\tcase uint32:\n\t\treturn int64(value)\n\tcase uint64:\n\t\treturn int64(value)\n\tcase float32:\n\t\treturn int64(value)\n\tcase float64:\n\t\treturn int64(value)\n\tcase bool:\n\t\tif value {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\tcase []byte:\n\t\treturn gbinary.DecodeToInt64(value)\n\tdefault:\n\t\tif f, ok := value.(iInt64); ok {\n\t\t\treturn f.Int64()\n\t\t}\n\t\tvar (\n\t\t\ts = String(value)\n\t\t\tisMinus = false\n\t\t)\n\t\tif len(s) > 0 {\n\t\t\tif s[0] == '-' {\n\t\t\t\tisMinus = true\n\t\t\t\ts = s[1:]\n\t\t\t} else if s[0] == '+' {\n\t\t\t\ts = s[1:]\n\t\t\t}\n\t\t}\n\t\t// Hexadecimal\n\t\tif len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') {\n\t\t\tif v, e := strconv.ParseInt(s[2:], 16, 64); e == nil {\n\t\t\t\tif isMinus {\n\t\t\t\t\treturn -v\n\t\t\t\t}\n\t\t\t\treturn v\n\t\t\t}\n\t\t}\n\t\t// Decimal\n\t\tif v, e := strconv.ParseInt(s, 10, 64); e == nil {\n\t\t\tif isMinus {\n\t\t\t\treturn -v\n\t\t\t}\n\t\t\treturn v\n\t\t}\n\t\t// Float64\n\t\treturn int64(Float64(value))\n\t}\n}", "func ToInt64(v []byte) (int64, error) {\n\tprimitivePacket, _, _, err := DecodePrimitivePacket(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tvalue, err := primitivePacket.ToInt64()\n\tif err != nil {\n\t\treturn 0, nil\n\t}\n\treturn value, nil\n}", "func (res Response) AsInt32() (int32, error) {\n\treturn res.Bits.AsInt32(), res.Error\n}", "func (n StringNumber) Int64() int64 {\n\treturn int64(n)\n}", "func parseInt64(content []byte, aggErr *AggregateError) int64 {\n result, err := strconv.ParseInt(string(content), 10, 64)\n if err != nil {\n aggErr.Append(err)\n }\n return result\n}", "func (msg *Message) GetInt64() int64 {\n\tif msg.Ptr+8 < len(msg.Body) && msg.Body[msg.Ptr] == Int64Type {\n\t\tvar res uint64\n\t\tmsg.Ptr++\n\t\tfor i := 0; i < 8; i++ {\n\t\t\tres = (res << 8) | uint64(msg.Body[msg.Ptr+7-i]&0xFF)\n\t\t}\n\t\tmsg.Ptr += 8\n\t\treturn int64(res)\n\t}\n\tpanic(\"Expected int64\")\n}", "func (j JSON) Int64(key string) (int64, error) {\n\treturn strconv.ParseInt(j.Get(key), 10, 64)\n}", "func (recv *Value) GetInt64() int64 {\n\tretC := C.g_value_get_int64((*C.GValue)(recv.native))\n\tretGo := (int64)(retC)\n\n\treturn retGo\n}", "func (n Number) Int64() int64 {\n\treturn int64(n.Int())\n}", "func (x *Int) Bytes() []byte {}", "func (client PrimitiveClient) GetIntResponder(resp *http.Response) (result IntWrapper, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n}", "func (p Payload) GetInt64(key string) (int64, error) {\n\tvalue, err := p.Get(key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif value, ok := value.(int64); ok {\n\t\treturn value, nil\n\t}\n\treturn 0, fmt.Errorf(\"Value with key '%s' not an int64\", key)\n}", "func (x *Int) Uint64() uint64 {}", "func ReadInt() int {\n\treturn int(readInt64())\n}", "func (n Number) Int64() (int64, error) {\n\treturn strconv.ParseInt(string(n), 10, 64)\n}", "func (payload *Payload) WantInt64(key string) int64 {\n\tval, ok := payload.data[key]\n\tif !ok {\n\t\tpanic(\"Key \" + key + \" is NOT found.\")\n\t}\n\treturn convertToInt64(val, true)\n}", "func (i *Int64) Int64() int64 {\n\treturn int64(*i)\n}", "func (c *Client) GetInt64(ctx context.Context, key string) (int64, error) {\n\tval, err := c.client.Get(ctx, key).Int64()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn val, nil\n}", "func (y *Yaml) Int64() (int64, error) {\n\tswitch y.data.(type) {\n\tcase float32, float64:\n\t\treturn int64(reflect.ValueOf(y.data).Float()), nil\n\tcase int, int8, int16, int32, int64:\n\t\treturn reflect.ValueOf(y.data).Int(), nil\n\t}\n\treturn 0, errors.New(\"invalid value type\")\n}", "func (n *eeNum) int() *int { return (*int)(unsafe.Pointer(&n.data)) }", "func (Uint64Codec) Read(data []byte, ptr unsafe.Pointer, wt WireType) (n int, err error) {\n\ti, n := ReadVarUint(data)\n\tif n < 0 {\n\t\treturn 0, fmt.Errorf(\"corrupt var int\")\n\t}\n\t*(*uint64)(ptr) = uint64(i)\n\treturn n, nil\n}", "func (ctx *Context) Data(code int, contentType string, data []byte) {\n\tctx.Response.StatusCode = code\n\tctx.SetContentType(contentType)\n\tctx.Response.Body = ioutil.NopCloser(bytes.NewBuffer(data))\n}", "func (db *sliceDB) fetchInt64(key []byte) int64 {\r\n\tblob, err := db.lvl.Get(key, nil)\r\n\tif err != nil {\r\n\t\treturn 0\r\n\t}\r\n\tval, read := binary.Varint(blob)\r\n\tif read <= 0 {\r\n\t\treturn 0\r\n\t}\r\n\treturn val\r\n}", "func ToInt64(i interface{}) int64 {\n\treturn cast.ToInt64(i)\n}", "func toInt64(v interface{}) int64 {\n\treturn cast.ToInt64(v)\n}", "func (c Cents) Int64() int64 {\n\treturn int64(c)\n}", "func (res Response) AsInt8() (int8, error) {\n\treturn res.Bits.AsInt8(), res.Error\n}", "func (n *Int64Wrapper) Value() (Value, error) {\n\tif !n.Valid {\n\t\treturn nil, nil\n\t}\n\treturn n.Int64, nil\n}", "func (db *DB) GetInt64(key string) (value int64, err error) {\n\ts, err := db.GetStr(key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn strconv.ParseInt(s, 10, 64)\n}", "func (v Int) Native() interface{} {\n\treturn int64(v)\n}", "func fetch64(p []byte, index int) uint64 {\n\treturn binary.LittleEndian.Uint64(p[index:])\n}", "func (d *BinaryValue) GetData(buffer unsafe.Pointer, bufferSize, dataOffset uint64) uint64 {\n\treturn uint64(C.gocef_binary_value_get_data(d.toNative(), buffer, C.size_t(bufferSize), C.size_t(dataOffset), d.get_data))\n}", "func (s *Structure) Int64(isMaster bool, cmd string, params ...interface{}) (reply int64, err error) {\n\tconn := s.getConn(isMaster)\n\tif conn == nil {\n\t\treturn constant.ZeroInt64, configNotExistsOrLoad(s.InstanceName, isMaster)\n\t}\n\n\treply, err = redis.Int64(conn.Do(cmd, params...))\n\tconn.Close()\n\n\treturn reply, err\n}", "func (d *Data) GetInt64(key string, defaultValue int64) int64 {\n\tval, err := d.Get(key)\n\tif err != nil {\n\t\treturn defaultValue\n\t}\n\n\tres, ok := val.(int64)\n\tif !ok {\n\t\treturn defaultValue\n\t}\n\n\treturn res\n}", "func (c *Context) GetInt64(key string) (i64 int64) {\n\tif val, ok := c.Get(key); ok && val != nil {\n\t\ti64, _ = val.(int64)\n\t}\n\treturn\n}", "func (c *Context) GetInt64(key string) (i64 int64) {\n\tif val, ok := c.Get(key); ok && val != nil {\n\t\ti64, _ = val.(int64)\n\t}\n\treturn\n}", "func (res *response) Unmarshal(data []byte) (uint64, error) {\n\tvar offset uint64\n\tvar n uint64\n\tn = code.DecodeVarint(data[offset:], &res.Seq)\n\toffset += n\n\tif data[offset] > 0 {\n\t\tn = code.DecodeString(data[offset:], &res.Error)\n\t} else {\n\t\tn = 1\n\t}\n\toffset += n\n\tif data[offset] > 127 {\n\t\tn = code.DecodeBytes(data[offset:], &res.Reply)\n\t} else if data[offset] > 0 {\n\t\tvar s = 1 + uint64(data[offset])\n\t\tres.Reply = data[offset+1 : offset+s]\n\t\tn = s\n\t} else {\n\t\tn = 1\n\t}\n\toffset += n\n\treturn offset, nil\n}", "func Int64(v int64) *int64 { return &v }", "func Int64(v int64) *int64 { return &v }", "func GetInt64Var(r *http.Request, key string) int64 {\n\tv := Vars(r)[key]\n\tif len(v) == 0 {\n\t\tva := r.URL.Query()[key]\n\t\tif len(va) > 0 {\n\t\t\tv = va[0]\n\t\t}\n\t}\n\ti, _ := strconv.ParseInt(v, 10, 64)\n\n\treturn i\n}", "func (v *Value) Int() int64 {\n\treturn (int64)(C.value_get_long(v.value))\n}", "func (dr downloadResponse) ContentLength() int64 {\n\ts := dr.rawResponse.Header.Get(\"Content-Length\")\n\tif s == \"\" {\n\t\treturn -1\n\t}\n\ti, err := strconv.ParseInt(s, 10, 64)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn i\n}" ]
[ "0.7620368", "0.6956235", "0.6308384", "0.60723186", "0.5968573", "0.5942842", "0.59171957", "0.5859009", "0.5852559", "0.5840035", "0.5835712", "0.5815407", "0.57881236", "0.5754577", "0.5732076", "0.57312083", "0.56497526", "0.5623139", "0.5620109", "0.5567401", "0.55656445", "0.5562429", "0.5547836", "0.5544895", "0.5510715", "0.5504191", "0.54847634", "0.5482659", "0.5472587", "0.546789", "0.54481864", "0.5444354", "0.5435651", "0.54173243", "0.5412041", "0.5383086", "0.5377451", "0.537634", "0.5375643", "0.53590035", "0.5353813", "0.5350667", "0.5340327", "0.53355205", "0.53188396", "0.52989715", "0.52933615", "0.52899814", "0.52752805", "0.52752084", "0.52671754", "0.5264374", "0.5256139", "0.52520967", "0.5249431", "0.5239558", "0.5239554", "0.5224817", "0.5220047", "0.5211322", "0.5208623", "0.52038604", "0.52027774", "0.5190593", "0.51892793", "0.51747346", "0.51709545", "0.51701283", "0.5165006", "0.5164367", "0.5149629", "0.51461303", "0.51436764", "0.5135984", "0.5123411", "0.5123246", "0.5098406", "0.50972927", "0.50897217", "0.50890446", "0.5087847", "0.5077566", "0.5075417", "0.50702417", "0.5063001", "0.5052533", "0.5046981", "0.5042922", "0.5035589", "0.50304675", "0.5030111", "0.50217164", "0.50203836", "0.50203836", "0.50158286", "0.5006742", "0.5006742", "0.49901953", "0.4985803", "0.49846998" ]
0.6651416
2
Convenience wrapper to return Response.Data as an float64.
func (r *Response) Float64() (float64, error) { return strconv.ParseFloat(r.String(), 64) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetFloatData(response *bcsmonitor.QueryResponse) float64 {\n\tif len(response.Data.Result) == 0 {\n\t\treturn 0\n\t}\n\tvalueStr, ok := response.Data.Result[0].Value[1].(string)\n\tif !ok {\n\t\treturn 0\n\t}\n\tvalue, err := strconv.ParseFloat(valueStr, 64)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn value\n}", "func (v *Value) Float() float64 {\n return Util.ToFloat(v.data)\n}", "func (n *eeNum) float64() *float64 { return (*float64)(unsafe.Pointer(&n.data)) }", "func (data *Data) Float(s ...string) float64 {\n\treturn data.Interface(s...).(float64)\n}", "func (j *JSONData) Float64(path ...interface{}) (float64, error) {\n\tjson, err := j.get(path...)\n\treturn json.MustFloat64(), err\n}", "func (f Floats) Data() []float64 {\n\treturn f[0][:f.Len()]\n}", "func (reply Reply) Float64() (float64, error) {\n\tresult, err := redis.Float64(reply.data, reply.err)\n\tif err != nil {\n\t\treturn 0, redisError(err)\n\t}\n\n\treturn result, nil\n}", "func (client PrimitiveClient) GetFloatResponder(resp *http.Response) (result FloatWrapper, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n}", "func (r *Result) Float64() float64 {\n\tif r.Error != nil {\n\t\treturn 0\n\t}\n\n\treturn convert.ToFloat64(r.Value)\n}", "func (d DataView) Float64(offset uint, littleEndian bool) float64 {\n\tvar decoding binary.ByteOrder\n\tif littleEndian {\n\t\tdecoding = binary.LittleEndian\n\t} else {\n\t\tdecoding = binary.BigEndian\n\t}\n\tbits := decoding.Uint64(d[offset:])\n\treturn math.Float64frombits(bits)\n}", "func (n Float64Wrapper) Value() (Value, error) {\n\tif !n.Valid {\n\t\treturn nil, nil\n\t}\n\treturn n.Float64, nil\n}", "func (v *Value) Float() float64 {\n\treturn (float64)(C.value_get_double(v.value))\n}", "func (r *Reply) Float64() (float64, error) {\n\tif r.Type == ErrorReply {\n\t\treturn 0, r.Err\n\t}\n\tif r.Type == BulkReply {\n\t\ts, err := r.Str()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tf64, err := strconv.ParseFloat(s, 64)\n\t\tif err != nil {\n\t\t\treturn 0, errors.New(\"failed to parse float value from string value\")\n\t\t} else {\n\t\t\treturn f64, nil\n\t\t}\n\t}\n\n\treturn 0, errors.New(\"float value is not available for this reply type\")\n}", "func (r *Redis) Float64(reply interface{}, err error) (float64, error) {\n\treturn redigo.Float64(reply, err)\n}", "func (buff *Bytes) ToFloat64() float64 {\r\n\treturn *(*float64)(unsafe.Pointer(&(*buff)[0]))\r\n}", "func Float64() float64 {\n\tmu.Lock()\n\tres := r.Float64()\n\tmu.Unlock()\n\treturn res\n}", "func (v Float) Float64() float64 {\n\treturn v.v\n}", "func (c *Client) GetFloat64(endpoint string) (float64, error) {\n\taddress := c.baseURL + endpoint + \"?token=\" + c.token\n\tresp, err := http.Get(address)\n\tif err != nil {\n\t\treturn 0.0, err\n\t}\n\tdefer resp.Body.Close()\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn 0.0, err\n\t}\n\treturn strconv.ParseFloat(string(b), 64)\n\n}", "func (this *JSONArray) Float64(index int) float64 {\n\treturn this.innerArray[index].(float64)\n}", "func (s VerbatimString) ToFloat64() (float64, error) { return _verbatimString(s).ToFloat64() }", "func GetFloat64(key string) float64 { return viper.GetFloat64(key) }", "func (s *Structure) Float64(isMaster bool, cmd string, params ...interface{}) (reply float64, err error) {\n\tconn := s.getConn(isMaster)\n\tif conn == nil {\n\t\treturn constant.ZeroFLOAT64, configNotExistsOrLoad(s.InstanceName, isMaster)\n\t}\n\n\treply, err = redis.Float64(conn.Do(cmd, params...))\n\tconn.Close()\n\n\treturn reply, err\n}", "func (c *Chain) GetFloat64() float64 {\n\treturn c.val.(float64)\n}", "func (o *FakeObject) Float() float64 { return o.Value.(float64) }", "func (m USD) Float64() float64 {\n\tx := float64(m)\n\tx = x / 100\n\treturn x\n}", "func ReadDouble(buffer []byte, offset int) float64 {\n bits := ReadUInt64(buffer, offset)\n return math.Float64frombits(bits)\n}", "func (p *PoolAllocator) Float64() Floating {\n\ts := p.f64.Get().(*f64)\n\ts.channels = channels(p.Channels)\n\ts.buffer = s.buffer[:p.Length*p.Channels]\n\treturn s\n}", "func GetInt64Data(response *bcsmonitor.QueryResponse) int64 {\n\tif len(response.Data.Result) == 0 {\n\t\treturn 0\n\t}\n\tvalueStr, ok := response.Data.Result[0].Value[1].(string)\n\tif !ok {\n\t\treturn 0\n\t}\n\tvalue, err := strconv.ParseInt(valueStr, 10, 64)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn value\n}", "func (s *Smplen) Float() float64 {\n\treturn s.f\n}", "func (i *Number) AsFloat64() float64 {\n\treturn i.value\n}", "func (r *Reader) Float64() float64 {\n\treturn math.Float64frombits(r.Uint64())\n}", "func GetFloat64(v interface{}) float64 {\n\treturn cache2.GetFloat64(v)\n}", "func (v Float) Native() interface{} {\n\treturn float64(v)\n}", "func (this *JSONObject) Float64(key string) float64 {\n\treturn this.innerMap[key].(float64)\n}", "func (c *Context) Data(data interface{}, total ...int64) {\n\tc.responseFormat.SetData(data, total...)\n}", "func (v Derive) Float64() float64 {\n\treturn float64(v)\n}", "func (a AmountFigure) Float64() float64 {\n\treturn float64(a) / 100\n}", "func (o *FloatObject) AsFloat() (float64) {\n return o.Value\n}", "func (p RedisDsl) GET_FLOAT64(key string) (*float64, error) {\n\treturn ReplyToFloat64Ptr(p.GET(key))\n}", "func GetFloat64(key string) float64 {\n\treturn 0\n}", "func (v *Float64) Read(d []byte) {\n\thead := (*reflect.SliceHeader)(unsafe.Pointer(&d))\n\tv.Value = (*float64)(unsafe.Pointer(head.Data))\n\tv.Bytes = d[:SzFloat64]\n}", "func (m *Message) getFloat64() float64 {\n\tb := m.bufferForGet()\n\tdefer b.Advance(8)\n\n\treturn math.Float64frombits(binary.LittleEndian.Uint64(b.Bytes[b.Offset:]))\n}", "func (x *Rat) Float64() (f float64, exact bool) {}", "func (v Value) Float() float64 {\n\treturn v.v.Float()\n}", "func (m *Manager) ReadFloat64() float64 {\n\treturn m.readFloat(64)\n}", "func (c *Context) GetFloat64(key interface{}) float64 {\n\treturn c.Get(key).(float64)\n}", "func (mw *Stats) Data() *Data {\n\tmw.mu.RLock()\n\n\tresponseCounts := make(map[string]int, len(mw.ResponseCounts))\n\ttotalResponseCounts := make(map[string]int, len(mw.TotalResponseCounts))\n\ttotalMetricsCounts := make(map[string]int, len(mw.MetricsCounts))\n\tmetricsCounts := make(map[string]float64, len(mw.MetricsCounts))\n\n\tnow := time.Now()\n\n\tuptime := now.Sub(mw.Uptime)\n\n\tcount := 0\n\tfor code, current := range mw.ResponseCounts {\n\t\tresponseCounts[code] = current\n\t\tcount += current\n\t}\n\n\ttotalCount := 0\n\tfor code, count := range mw.TotalResponseCounts {\n\t\ttotalResponseCounts[code] = count\n\t\ttotalCount += count\n\t}\n\n\ttotalResponseTime := mw.TotalResponseTime.Sub(time.Time{})\n\ttotalResponseSize := mw.TotalResponseSize\n\n\taverageResponseTime := time.Duration(0)\n\taverageResponseSize := int64(0)\n\tif totalCount > 0 {\n\t\tavgNs := int64(totalResponseTime) / int64(totalCount)\n\t\taverageResponseTime = time.Duration(avgNs)\n\t\taverageResponseSize = int64(totalResponseSize) / int64(totalCount)\n\t}\n\n\tfor key, count := range mw.MetricsCounts {\n\t\ttotalMetric := mw.MetricsTimers[key].Sub(time.Time{})\n\t\tavgNs := int64(totalMetric) / int64(count)\n\t\tmetricsCounts[key] = time.Duration(avgNs).Seconds()\n\t\ttotalMetricsCounts[key] = count\n\t}\n\n\tmw.mu.RUnlock()\n\n\tr := &Data{\n\t\tPid: mw.Pid,\n\t\tUpTime: uptime.String(),\n\t\tUpTimeSec: uptime.Seconds(),\n\t\tTime: now.String(),\n\t\tTimeUnix: now.Unix(),\n\t\tStatusCodeCount: responseCounts,\n\t\tTotalStatusCodeCount: totalResponseCounts,\n\t\tCount: count,\n\t\tTotalCount: totalCount,\n\t\tTotalResponseTime: totalResponseTime.String(),\n\t\tTotalResponseSize: totalResponseSize,\n\t\tTotalResponseTimeSec: totalResponseTime.Seconds(),\n\t\tTotalMetricsCounts: totalMetricsCounts,\n\t\tAverageResponseSize: averageResponseSize,\n\t\tAverageResponseTime: averageResponseTime.String(),\n\t\tAverageResponseTimeSec: averageResponseTime.Seconds(),\n\t\tAverageMetricsTimers: metricsCounts,\n\t}\n\n\treturn r\n}", "func Float64(r interface{}, err error) (float64, error) {\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tswitch r := r.(type) {\n\tcase float64:\n\t\treturn r, err\n\tcase []byte:\n\t\tn, err := strconv.ParseFloat(string(r), 64)\n\t\treturn n, err\n\tcase string:\n\t\tn, err := strconv.ParseFloat(r, 64)\n\t\treturn n, err\n\tcase nil:\n\t\treturn 0, simplesessions.ErrNil\n\t}\n\treturn 0, simplesessions.ErrAssertType\n}", "func GetFloat64(key string) float64 {\n\treturn v.GetFloat64(key)\n}", "func (s *Smpval) Float() float64 {\n\treturn s.f\n}", "func Float(v float64) *float64 {\n\treturn &v\n}", "func (v Absolute) Float64() float64 {\n\treturn float64(v)\n}", "func (d LegacyDec) Float64() (float64, error) {\n\treturn strconv.ParseFloat(d.String(), 64)\n}", "func Float(param interface{}) float64 {\n\tvar v float64\n\tif param != nil {\n\t\tswitch param.(type) {\n\t\tcase int64:\n\t\t\tv = float64(param.(int64))\n\t\tdefault:\n\t\t\tv = param.(float64)\n\t\t}\n\t}\n\treturn v\n}", "func (num Number) Float64() (float64, bool) {\n\tf, err := json.Number(num).Float64()\n\tif err != nil {\n\t\treturn 0, false\n\t}\n\treturn f, true\n}", "func (p Payload) GetFloat64(key string) (float64, error) {\n\tvalue, err := p.Get(key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif value, ok := value.(float64); ok {\n\t\treturn value, nil\n\t}\n\treturn 0, fmt.Errorf(\"Value with key '%s' not a float64\", key)\n}", "func GetFloat64(key string) float64 {\n\treturn cargoboat.GetFloat64(key)\n}", "func (v *Value) AsFloat64(dv float64) float64 {\n\tif v.IsUndefined() {\n\t\treturn dv\n\t}\n\tswitch tv := v.raw.(type) {\n\tcase string:\n\t\tf, err := strconv.ParseFloat(tv, 64)\n\t\tif err != nil {\n\t\t\treturn dv\n\t\t}\n\t\treturn f\n\tcase int:\n\t\treturn float64(tv)\n\tcase float64:\n\t\treturn tv\n\tcase bool:\n\t\tif tv {\n\t\t\treturn 1.0\n\t\t}\n\t\treturn 0.0\n\tcase time.Time:\n\t\tns := tv.UnixNano()\n\t\treturn float64(ns)\n\tcase time.Duration:\n\t\tns := tv.Nanoseconds()\n\t\treturn float64(ns)\n\t}\n\treturn dv\n}", "func (j *JSONData) Number() *float64 {\n\tif j != nil && j.value != nil {\n\t\tif float, ok := (*j.value).(float64); ok {\n\t\t\treturn &float\n\t\t}\n\t}\n\treturn nil\n}", "func (v Base) Float() float64 {\n\treturn float64(v)\n}", "func (t *Dense) Data() interface{} {\n\tif t.IsScalar() {\n\t\treturn t.Get(0)\n\t}\n\treturn t.v\n}", "func floatHandler(set func(float64) error, get func() float64) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := mux.Vars(r)\n\n\t\tval, err := strconv.ParseFloat(vars[\"value\"], 64)\n\t\tif err == nil {\n\t\t\terr = set(val)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tjsonError(w, http.StatusBadRequest, err)\n\t\t\treturn\n\t\t}\n\n\t\tjsonResult(w, get())\n\t}\n}", "func (f Float64) Float64(_ context.Context) (*float64, error) {\n\tif f > 0 {\n\t\tvalue := float64(f)\n\t\treturn &value, nil\n\t}\n\treturn nil, nil\n}", "func (rs *ReceiptSelect) Float64(ctx context.Context) (_ float64, err error) {\n\tvar v []float64\n\tif v, err = rs.Float64s(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{receipt.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: ReceiptSelect.Float64s returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}", "func (tv *TypedDecimal) Float() float64 {\n\tfloatVal, _ := strconv.ParseFloat(strDecimal64(tv.Decimal64()), 64)\n\treturn floatVal\n}", "func LoadRawData(raw interface{}) (f Float64Data) {\n\tvar r []interface{}\n\tvar s Float64Data\n\n\tswitch t := raw.(type) {\n\tcase []interface{}:\n\t\tr = t\n\tcase []uint:\n\t\tfor _, v := range t {\n\t\t\ts = append(s, float64(v))\n\t\t}\n\t\treturn s\n\tcase []uint8:\n\t\tfor _, v := range t {\n\t\t\ts = append(s, float64(v))\n\t\t}\n\t\treturn s\n\tcase []uint16:\n\t\tfor _, v := range t {\n\t\t\ts = append(s, float64(v))\n\t\t}\n\t\treturn s\n\tcase []uint32:\n\t\tfor _, v := range t {\n\t\t\ts = append(s, float64(v))\n\t\t}\n\t\treturn s\n\tcase []uint64:\n\t\tfor _, v := range t {\n\t\t\ts = append(s, float64(v))\n\t\t}\n\t\treturn s\n\tcase []bool:\n\t\tfor _, v := range t {\n\t\t\tif v == true {\n\t\t\t\ts = append(s, 1.0)\n\t\t\t} else {\n\t\t\t\ts = append(s, 0.0)\n\t\t\t}\n\t\t}\n\t\treturn s\n\tcase []float64:\n\t\treturn Float64Data(t)\n\tcase []int:\n\t\tfor _, v := range t {\n\t\t\ts = append(s, float64(v))\n\t\t}\n\t\treturn s\n\tcase []int8:\n\t\tfor _, v := range t {\n\t\t\ts = append(s, float64(v))\n\t\t}\n\t\treturn s\n\tcase []int16:\n\t\tfor _, v := range t {\n\t\t\ts = append(s, float64(v))\n\t\t}\n\t\treturn s\n\tcase []int32:\n\t\tfor _, v := range t {\n\t\t\ts = append(s, float64(v))\n\t\t}\n\t\treturn s\n\tcase []int64:\n\t\tfor _, v := range t {\n\t\t\ts = append(s, float64(v))\n\t\t}\n\t\treturn s\n\tcase []string:\n\t\tfor _, v := range t {\n\t\t\tr = append(r, v)\n\t\t}\n\tcase []time.Duration:\n\t\tfor _, v := range t {\n\t\t\tr = append(r, v)\n\t\t}\n\tcase map[int]int:\n\t\tfor i := 0; i < len(t); i++ {\n\t\t\ts = append(s, float64(t[i]))\n\t\t}\n\t\treturn s\n\tcase map[int]int8:\n\t\tfor i := 0; i < len(t); i++ {\n\t\t\ts = append(s, float64(t[i]))\n\t\t}\n\t\treturn s\n\tcase map[int]int16:\n\t\tfor i := 0; i < len(t); i++ {\n\t\t\ts = append(s, float64(t[i]))\n\t\t}\n\t\treturn s\n\tcase map[int]int32:\n\t\tfor i := 0; i < len(t); i++ {\n\t\t\ts = append(s, float64(t[i]))\n\t\t}\n\t\treturn s\n\tcase map[int]int64:\n\t\tfor i := 0; i < len(t); i++ {\n\t\t\ts = append(s, float64(t[i]))\n\t\t}\n\t\treturn s\n\tcase map[int]string:\n\t\tfor i := 0; i < len(t); i++ {\n\t\t\tr = append(r, t[i])\n\t\t}\n\tcase map[int]uint:\n\t\tfor i := 0; i < len(t); i++ {\n\t\t\ts = append(s, float64(t[i]))\n\t\t}\n\t\treturn s\n\tcase map[int]uint8:\n\t\tfor i := 0; i < len(t); i++ {\n\t\t\ts = append(s, float64(t[i]))\n\t\t}\n\t\treturn s\n\tcase map[int]uint16:\n\t\tfor i := 0; i < len(t); i++ {\n\t\t\ts = append(s, float64(t[i]))\n\t\t}\n\t\treturn s\n\tcase map[int]uint32:\n\t\tfor i := 0; i < len(t); i++ {\n\t\t\ts = append(s, float64(t[i]))\n\t\t}\n\t\treturn s\n\tcase map[int]uint64:\n\t\tfor i := 0; i < len(t); i++ {\n\t\t\ts = append(s, float64(t[i]))\n\t\t}\n\t\treturn s\n\tcase map[int]bool:\n\t\tfor i := 0; i < len(t); i++ {\n\t\t\tif t[i] == true {\n\t\t\t\ts = append(s, 1.0)\n\t\t\t} else {\n\t\t\t\ts = append(s, 0.0)\n\t\t\t}\n\t\t}\n\t\treturn s\n\tcase map[int]float64:\n\t\tfor i := 0; i < len(t); i++ {\n\t\t\ts = append(s, t[i])\n\t\t}\n\t\treturn s\n\tcase map[int]time.Duration:\n\t\tfor i := 0; i < len(t); i++ {\n\t\t\tr = append(r, t[i])\n\t\t}\n\t}\n\n\tfor _, v := range r {\n\t\tswitch t := v.(type) {\n\t\tcase int:\n\t\t\ta := float64(t)\n\t\t\tf = append(f, a)\n\t\tcase uint:\n\t\t\tf = append(f, float64(t))\n\t\tcase float64:\n\t\t\tf = append(f, t)\n\t\tcase string:\n\t\t\tfl, err := strconv.ParseFloat(t, 64)\n\t\t\tif err == nil {\n\t\t\t\tf = append(f, fl)\n\t\t\t}\n\t\tcase bool:\n\t\t\tif t == true {\n\t\t\t\tf = append(f, 1.0)\n\t\t\t} else {\n\t\t\t\tf = append(f, 0.0)\n\t\t\t}\n\t\tcase time.Duration:\n\t\t\tf = append(f, float64(t))\n\t\t}\n\t}\n\treturn f\n}", "func (v Counter) Float64() float64 {\n\treturn float64(v)\n}", "func (n Number) Float64() float64 {\n\treturn float64(n)\n}", "func (a Allocator) Float64() Floating {\n\treturn &f64{\n\t\tbuffer: make([]float64, a.Channels*a.Length, a.Channels*a.Capacity),\n\t\tchannels: channels(a.Channels),\n\t}\n}", "func (d Decimal) Float64() (f float64, exact bool) {\n\treturn d.Rat().Float64()\n}", "func (d Decimal) Float64() (f float64, exact bool) {\n\treturn d.Rat().Float64()\n}", "func AsFloat(r *http.Request, key string, decimalSeparator rune) float64 {\n\tif err := r.ParseForm(); err != nil {\n\t\tlog.Println(r.RequestURI, err)\n\t\treturn 0\n\t}\n\n\ts := r.FormValue(key)\n\n\tif s == \"\" {\n\t\ts = r.URL.Query().Get(key)\n\t}\n\n\tif s == \"\" {\n\t\tparams := mux.Vars(r)\n\t\ts = params[key]\n\t}\n\n\tif s == \"\" {\n\t\treturn 0\n\t}\n\n\tthousandSeparator := ','\n\n\tif decimalSeparator == ',' {\n\t\tthousandSeparator = '.'\n\t}\n\n\ts = strings.ReplaceAll(s, string(thousandSeparator), \"\")\n\n\tf, _ := strconv.ParseFloat(s, 64)\n\n\treturn f\n}", "func (f Float) Float64() float64 {\n\tpanic(\"not yet implemented\")\n}", "func GetFloat64(dict map[string]interface{}, key string) (float64, error) {\n\treturn getFloat64(dict, key, 0, fmt.Errorf(\"key %s not found\", key))\n}", "func (m *Float64Metric) Get() float64 {\n\treturn m.get().(float64)\n}", "func Float64Value(p *float64) float64 {\n\tif p != nil {\n\t\treturn *p\n\t}\n\tvar v float64\n\treturn v\n}", "func (m *Value) Double() float64 { return m.DoubleMock() }", "func (m *Metrics) Data() (*Data, error) {\n\tvar (\n\t\ttotalResponseTime float64\n\t\tmaxTime float64\n\t\tminTime = math.MaxFloat64\n\t\tbufsize = len(m.requests)\n\t\tpercentiledTime = make(percentiledTimeMap)\n\t)\n\tm.m.RLock()\n\tdefer m.m.RUnlock()\n\tfor _, v := range m.requests {\n\t\ttotalResponseTime += v\n\n\t\tif minTime > v {\n\t\t\tminTime = v\n\t\t}\n\t\tif maxTime < v {\n\t\t\tmaxTime = v\n\t\t}\n\t}\n\n\tfor _, p := range percents {\n\t\tvar err error\n\t\tpercentiledTime[p], err = m.requests.Percentile(float64(p))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tstatusCount := make(statusCountMap)\n\tfor _, status := range httpStatuses {\n\t\tstatusCount[status] = atomic.LoadInt64(&m.statusCount[status])\n\t}\n\n\treturn &Data{\n\t\tRequest: RequestData{\n\t\t\tCount: atomic.LoadInt64(&m.count),\n\t\t\tStatusCount: statusCount,\n\t\t},\n\t\tResponse: ResponseData{\n\t\t\tMaxTime: maxTime,\n\t\t\tMinTime: minTime,\n\t\t\tAverageTime: totalResponseTime / float64(bufsize),\n\t\t\tPercentiledTime: percentiledTime,\n\t\t},\n\t}, nil\n}", "func (c *JSONElement) AsFloat64(def float64) (value float64) {\n\tvalue, err := c.Json.Float64()\n\tif err != nil {\n\t\treturn def\n\t}\n\n\treturn value\n}", "func (v Value) Float() float64 {\n\tswitch {\n\tcase v == 0:\n\t\treturn 0\n\tcase v == 64:\n\t\treturn 0.5\n\tcase v == 127:\n\t\treturn 1\n\tcase v < 64:\n\t\treturn float64(v) / 128\n\tdefault:\n\t\treturn float64(v-1) / 126\n\t}\n}", "func (v *Value) Float64() float64 {\n\tswitch {\n\tcase v.fvalOk:\n\tcase v.ivalOk:\n\t\tv.fval = float64(v.ival)\n\t\tv.fvalOk = true\n\tcase v.svalOk:\n\t\t// Perform a best-effort conversion from string to float64.\n\t\tv.fval = 0.0\n\t\tstrs := matchFloat.FindStringSubmatch(v.sval)\n\t\tif len(strs) >= 2 {\n\t\t\tv.fval, _ = strconv.ParseFloat(strs[1], 64)\n\t\t}\n\t\tv.fvalOk = true\n\t}\n\treturn v.fval\n}", "func (v *Value) Float() (*encoding.Float, error) {\n\tif v.vfloat != nil {\n\t\treturn v.vfloat, nil\n\t}\n\n\tvfloat := encoding.NewFloat()\n\terr := vfloat.UnmarshalBinary(v.Raw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tv.vfloat = vfloat\n\treturn vfloat, nil\n}", "func (b Datasize) Bytes() float64 {\n\treturn float64(b / Byte)\n}", "func (d Decimal) Float64() (float64, error) {\n\treturn strconv.ParseFloat(string(d), 64)\n}", "func (s Stream) ReadFloat64() (float64, error) {\n\tvar result float64\n\terr := binary.Read(s, binary.LittleEndian, &result)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn result, nil\n}", "func Float(flag string, value float64, description string) *float64 {\n\tvar v float64\n\tFloatVar(&v, flag, value, description)\n\treturn &v\n}", "func (v Document) QueryFloat64(query string) float64 {\n\tr, ok := v.QueryOne(query).(float64)\n\tif !ok {\n\t\treturn 0\n\t}\n\n\treturn r\n}", "func (v Gauge) Float64() float64 {\n\treturn float64(v)\n}", "func parseFloat64(content []byte, aggErr *AggregateError) float64 {\n result, err := strconv.ParseFloat(string(content), 64)\n if err != nil {\n aggErr.Append(err)\n }\n return result\n}", "func (p *Stream) ReadFloat64() (float64, *base.Error) {\n\tv := p.readFrame[p.readIndex]\n\tif v == 4 {\n\t\tif p.CanRead() {\n\t\t\tp.gotoNextReadByteUnsafe()\n\t\t\treturn 0, nil\n\t\t}\n\t} else if v == 5 {\n\t\tif p.isSafetyReadNBytesInCurrentFrame(9) {\n\t\t\tb := p.readFrame[p.readIndex:]\n\t\t\tp.readIndex += 9\n\t\t\treturn math.Float64frombits(\n\t\t\t\tuint64(b[1]) |\n\t\t\t\t\t(uint64(b[2]) << 8) |\n\t\t\t\t\t(uint64(b[3]) << 16) |\n\t\t\t\t\t(uint64(b[4]) << 24) |\n\t\t\t\t\t(uint64(b[5]) << 32) |\n\t\t\t\t\t(uint64(b[6]) << 40) |\n\t\t\t\t\t(uint64(b[7]) << 48) |\n\t\t\t\t\t(uint64(b[8]) << 56),\n\t\t\t), nil\n\t\t}\n\t\tif p.hasNBytesToRead(9) {\n\t\t\tb := p.readNBytesCrossFrameUnsafe(9)\n\t\t\treturn math.Float64frombits(\n\t\t\t\tuint64(b[1]) |\n\t\t\t\t\t(uint64(b[2]) << 8) |\n\t\t\t\t\t(uint64(b[3]) << 16) |\n\t\t\t\t\t(uint64(b[4]) << 24) |\n\t\t\t\t\t(uint64(b[5]) << 32) |\n\t\t\t\t\t(uint64(b[6]) << 40) |\n\t\t\t\t\t(uint64(b[7]) << 48) |\n\t\t\t\t\t(uint64(b[8]) << 56),\n\t\t\t), nil\n\t\t}\n\t}\n\treturn 0, base.ErrStream\n}", "func (p *RedisHashFieldCounterFloat64) Float64() (float64, error) {\n\treturn p.operationReturnsAmount(\"HGET\")\n}", "func (gs *GoodsSelect) Float64(ctx context.Context) (_ float64, err error) {\n\tvar v []float64\n\tif v, err = gs.Float64s(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{goods.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: GoodsSelect.Float64s returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}", "func (hm *HM) ReadFloat(addr int) (val float64, err error) {\n\tb, err := hm.shm.ReadN(addr, int(unsafe.Sizeof(float64(0))))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tbits := binary.BigEndian.Uint64(b)\n\treturn math.Float64frombits(bits), nil\n}", "func (hs *HarborSelect) Float64(ctx context.Context) (_ float64, err error) {\n\tvar v []float64\n\tif v, err = hs.Float64s(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{harbor.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: HarborSelect.Float64s returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}", "func (n StringNumber) Float64() float64 {\n\treturn float64(n)\n}", "func (v Value) AsFloat() float64 {\n\treturn v.iface.(float64)\n}", "func (d Decimal) Float64() (f float64, exact bool) {\n\treturn d.val.Float64()\n}", "func (form *FormData) Float64(key string, target *float64, defaultValue float64) *FormData {\n\treturn form.mustValue(key, target, defaultValue)\n}", "func (f Fixed8) FloatValue() float64 {\n\treturn float64(f) / decimals\n}", "func (c *Client) QueryFloat64(q string) (float64, error) {\n\tn, err := c.QueryNumber(q)\n\tif err != nil {\n\t\treturn 0.0, err\n\t}\n\treturn n.Float64()\n}" ]
[ "0.7289144", "0.63614255", "0.61734873", "0.6018624", "0.5833616", "0.58181524", "0.57975155", "0.5772632", "0.576887", "0.57428145", "0.57083213", "0.5594193", "0.55241936", "0.546558", "0.54640913", "0.54607713", "0.5460503", "0.54176515", "0.5403042", "0.5389518", "0.5374623", "0.534869", "0.533966", "0.5332349", "0.53029174", "0.53005725", "0.52759844", "0.527107", "0.52668536", "0.52663064", "0.5265515", "0.5260742", "0.52599186", "0.5258602", "0.52446216", "0.5244595", "0.5229584", "0.5227245", "0.52271116", "0.5203128", "0.5190682", "0.51866937", "0.5182627", "0.51771605", "0.5153954", "0.5138812", "0.5134164", "0.5087306", "0.5085615", "0.5081697", "0.50802803", "0.5075338", "0.50725734", "0.50601524", "0.5052224", "0.5037955", "0.5027392", "0.5020824", "0.50182414", "0.50177723", "0.501547", "0.5015114", "0.5014453", "0.50103354", "0.50077736", "0.5006087", "0.5000644", "0.49951595", "0.4990395", "0.49866557", "0.49866557", "0.49841204", "0.4983805", "0.49738848", "0.4969206", "0.49679726", "0.49672377", "0.4965387", "0.49577338", "0.49533504", "0.49510932", "0.49509284", "0.49468017", "0.49426493", "0.49370232", "0.49349", "0.49332398", "0.49331784", "0.4929073", "0.4922169", "0.4910703", "0.4907458", "0.49074247", "0.49059328", "0.4903952", "0.48966095", "0.48965427", "0.48961803", "0.4892417", "0.48902044" ]
0.61657727
3
/ Convert Response.Data to the most appropriate type. Useful when you want a concrete type but don't know it ahead of time.
func (r *Response) Interface() interface{} { // Attempt int64 i, err := strconv.ParseInt(r.String(), 10, 64) if err == nil { return i } // Attempt float64 f, err := strconv.ParseFloat(r.String(), 64) if err == nil { return f } // Attempt bool b, err := strconv.ParseBool(r.String()) if err == nil { return b } return r.String() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (ctx *impl) Data(obj interface{}) (rsp []byte, err error) {\n\tvar (\n\t\treq *bytes.Buffer\n\t\twritten int64\n\t\tct string\n\t)\n\n\tif ctx.Request == nil {\n\t\trsp, err = ctx.DataError(\"net/http is nil, can't retrieve data\")\n\t\treturn\n\t}\n\treq = &bytes.Buffer{}\n\t// Получение запроса\n\tdefer func() { _ = ctx.Request.Body.Close() }()\n\tif written, err = io.Copy(req, ctx.Request.Body); err != nil {\n\t\trsp, err = ctx.DataError(\"reading data of request error: %s\", err)\n\t\treturn\n\t} else if written < 2 {\n\t\trsp, err = ctx.DataError(\"request data is empty\")\n\t\treturn\n\t}\n\t// Тип кодирования выбирается на основе Content-Type заголовка\n\tct = ctx.Request.Header.Get(header.ContentType)\n\tswitch {\n\tcase strings.Contains(ct, mime.ApplicationJSON):\n\t\terr = json.NewDecoder(req).Decode(obj)\n\tcase strings.Contains(ct, mime.TextXML), strings.Contains(ct, mime.ApplicationXML):\n\t\terr = xml.NewDecoder(req).Decode(obj)\n\tdefault:\n\t\terr = fmt.Errorf(\"unknown content type: %q\", ct)\n\t\treturn\n\t}\n\tif err != nil {\n\t\trsp, err = ctx.DataError(\"decoding data error: %s\", err)\n\t\treturn\n\t}\n\t// Верификация данных с использованием внешней библиотеки\n\tif rsp, err = ctx.Verify(obj); err != nil {\n\t\trsp, err = ctx.DataError(\"verification of data error: %s\", err)\n\t\treturn\n\t}\n\n\treturn\n}", "func DataResponse(data interface{}) Response {\n\treturn Response{\n\t\tCode: 200,\n\t\tData: data,\n\t\tMessage: \"ok\",\n\t}\n}", "func AsData(ip string, data interface{}) (resp Response, err error) {\n\traw, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn AsError(ip, err)\n\t}\n\n\tresp = Response{\n\t\tStatusCode: 200,\n\t\tHeaders: jsonHeaders,\n\t\tBody: string(raw),\n\t}\n\treturn\n}", "func dataObjectFieldResponse(f *models.DataObjectField, val string) interface{} {\n\t// If field is string or text type - return as is.\n\tif f.FType == models.FieldStringType || f.FType == models.FieldTextType {\n\t\treturn val\n\t} else if val == \"\" {\n\t\t// If non-string, non-text field is empty - return nil.\n\t\treturn nil\n\t}\n\n\tswitch f.FType {\n\tcase models.FieldIntegerType:\n\t\tif v, err := strconv.Atoi(val); err == nil {\n\t\t\treturn v\n\t\t}\n\n\tcase models.FieldFloatType:\n\t\tif v, err := strconv.ParseFloat(val, 64); err == nil {\n\t\t\treturn v\n\t\t}\n\n\tcase models.FieldBooleanType:\n\t\treturn util.IsTrue(val)\n\n\tcase models.FieldDatetimeType:\n\t\tif v, err := f.FromString(val); err == nil {\n\t\t\treturn struct {\n\t\t\t\tType string `json:\"type\"`\n\t\t\t\tValue fields.Time `json:\"value\"`\n\t\t\t}{\n\t\t\t\tType: f.FType,\n\t\t\t\tValue: v.(fields.Time),\n\t\t\t}\n\t\t}\n\n\tcase models.FieldFileType:\n\t\treturn struct {\n\t\t\tType string `json:\"type\"`\n\t\t\tValue string `json:\"value\"`\n\t\t}{\n\t\t\tType: f.FType,\n\t\t\tValue: settings.API.StorageURL + val,\n\t\t}\n\n\tcase models.FieldReferenceType:\n\t\tif v, err := f.FromString(val); err == nil {\n\t\t\treturn struct {\n\t\t\t\tType string `json:\"type\"`\n\t\t\t\tTarget string `json:\"target\"`\n\t\t\t\tValue int `json:\"value\"`\n\t\t\t}{\n\t\t\t\tType: f.FType,\n\t\t\t\tTarget: f.Target,\n\t\t\t\tValue: v.(int),\n\t\t\t}\n\t\t}\n\n\tcase models.FieldRelationType:\n\t\tif v, err := f.FromString(val); err == nil {\n\t\t\treturn struct {\n\t\t\t\tType string `json:\"type\"`\n\t\t\t\tTarget string `json:\"target\"`\n\t\t\t\tValue []int `json:\"value\"`\n\t\t\t}{\n\t\t\t\tType: f.FType,\n\t\t\t\tTarget: f.Target,\n\t\t\t\tValue: v.([]int),\n\t\t\t}\n\t\t}\n\n\tcase models.FieldObjectType, models.FieldArrayType:\n\t\tif v, err := f.FromString(val); err == nil {\n\t\t\treturn v\n\t\t}\n\n\tcase models.FieldGeopointType:\n\t\tif g, err := ewkbhex.Decode(val); err == nil {\n\t\t\tp := g.(*geom.Point)\n\n\t\t\treturn struct {\n\t\t\t\tType string `json:\"type\"`\n\t\t\t\tLongitude float64 `json:\"longitude\"`\n\t\t\t\tLatitude float64 `json:\"latitude\"`\n\t\t\t}{\n\t\t\t\tType: f.FType,\n\t\t\t\tLongitude: p.X(),\n\t\t\t\tLatitude: p.Y(),\n\t\t\t}\n\t\t}\n\t}\n\n\treturn val\n}", "func (resp *DataResponse) SetData(data interface{}) error {\n\tkind := reflect.ValueOf(data).Kind()\n\terrMessage := \"Argument data should be of type struct or array/slice of struct!\"\n\n\tswitch {\n\tcase kind == reflect.Struct:\n\t\tresp.Data = data\n\tcase kind == reflect.Map:\n\t\tresp.Data = data\n\tcase kind == reflect.Array || kind == reflect.Slice:\n\t\tvalue := reflect.ValueOf(data)\n\t\tif value.Len() > 0 {\n\t\t\tif value.Index(0).Kind() == reflect.Struct {\n\t\t\t\tresp.Data = data\n\t\t\t} else {\n\t\t\t\treturn errors.New(errMessage)\n\t\t\t}\n\t\t} else {\n\t\t\tresp.Data = data\n\t\t}\n\tdefault:\n\t\treturn errors.New(errMessage)\n\t}\n\treturn nil\n}", "func (ctx *Context) Data(code int, contentType string, data []byte) {\n\tctx.Response.StatusCode = code\n\tctx.SetContentType(contentType)\n\tctx.Response.Body = ioutil.NopCloser(bytes.NewBuffer(data))\n}", "func (self *WebServer) respData(c *gin.Context, status, code int,\n\tmessage string, data interface{}) {\n\tc.JSON(status, &CR{\n\t\tMessage: message,\n\t\tCode: code,\n\t\tTimestamp: time.Now().Unix(),\n\t})\n}", "func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) {\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tresponse := newResponse(resp)\n\n\tif resp.StatusCode > maxHTTPCode {\n\t\terrMsg := resp.Status + \" returned from balldontlie\"\n\t\tif strings.Contains(resp.Header.Get(\"Content-Type\"), \"application/json;\") {\n\t\t\terr = json.NewDecoder(resp.Body).Decode(new(interface{}))\n\t\t\tif err != nil {\n\t\t\t\treturn response, err\n\t\t\t}\n\t\t} else if strings.Contains(resp.Header.Get(\"Content-Type\"), \"text/plain;\") {\n\t\t\terrBuf := &bytes.Buffer{}\n\t\t\tbufio.NewReader(resp.Body).WriteTo(errBuf)\n\t\t\terrMsg += \": \" + errBuf.String()\n\t\t} else {\n\t\t\terrMsg += \": \" + fmt.Sprintf(\"Response with unexpected Content-Type - %s received\", resp.Header.Get(\"Content-Type\"))\n\t\t}\n\t\treturn response, fmt.Errorf(errMsg)\n\t}\n\n\tif v != nil {\n\t\tresponse.Container = v\n\t}\n\n\tif strings.Contains(resp.Header.Get(\"Content-Type\"), \"application/json;\") {\n\t\terr = json.NewDecoder(resp.Body).Decode(response.Container)\n\t\tif err != nil {\n\t\t\treturn response, err\n\t\t}\n\t} else {\n\t\treturn response, fmt.Errorf(\"Response with unexpected Content-Type - %s received\", resp.Header.Get(\"Content-Type\"))\n\t}\n\n\treturn response, nil\n}", "func (o FunctionInputResponseOutput) DataType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FunctionInputResponse) *string { return v.DataType }).(pulumi.StringPtrOutput)\n}", "func (o FunctionOutputResponseOutput) DataType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FunctionOutputResponse) *string { return v.DataType }).(pulumi.StringPtrOutput)\n}", "func (r *Response) Data(data interface{}) JResponseWriter {\n\treturn r.Field(fieldData, data)\n}", "func ResponseToStruct(res *http.Response, v interface{}) error {\n\tvar reader io.ReadCloser\n\tvar err error\n\tswitch res.Header.Get(\"Content-Encoding\") {\n\tcase gzipHeader:\n\t\treader, err = gzip.NewReader(res.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer reader.Close()\n\tcase deflateHeader:\n\t\treader = flate.NewReader(res.Body)\n\t\tdefer reader.Close()\n\tdefault:\n\t\treader = res.Body\n\t}\n\n\tdecoder := gojay.BorrowDecoder(reader)\n\tdefer decoder.Release()\n\n\terr = decoder.Decode(&v)\n\tif err != nil {\n\t\treturn json.NewDecoder(reader).Decode(v)\n\t}\n\n\treturn nil\n}", "func (recycleBinItemResp *RecycleBinItemResp) Data() *RecycleBinItemInfo {\n\tdata := NormalizeODataItem(*recycleBinItemResp)\n\tres := &RecycleBinItemInfo{}\n\tjson.Unmarshal(data, &res)\n\treturn res\n}", "func (app *Configurable) SetResponseData(parameters map[string]string) interfaces.AppFunction {\n\ttransform := transforms.ResponseData{}\n\n\tvalue, ok := parameters[ResponseContentType]\n\tif ok && len(value) > 0 {\n\t\ttransform.ResponseContentType = value\n\t}\n\n\treturn transform.SetResponseData\n}", "func Data(status int, content []byte, headers Headers) *Response {\n\treturn &Response{\n\t\tStatus: status,\n\t\tContent: bytes.NewBuffer(content),\n\t\tHeaders: headers,\n\t}\n}", "func replyDataType(reply interface{}, err error) (dt DataType, outputErr error) {\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tswitch reply := reply.(type) {\n\tcase string:\n\t\tswitch reply {\n\t\tcase \"FLOAT\":\n\t\t\tdt = TypeFloat\n\t\tcase \"DOUBLE\":\n\t\t\tdt = TypeDouble\n\t\tcase \"INT8\":\n\t\t\tdt = TypeInt8\n\t\tcase \"INT16\":\n\t\t\tdt = TypeInt16\n\t\tcase \"INT32\":\n\t\t\tdt = TypeInt32\n\t\tcase \"INT64\":\n\t\t\tdt = TypeInt64\n\t\tcase \"UINT8\":\n\t\t\tdt = TypeUint8\n\t\tcase \"UINT16\":\n\t\t\tdt = TypeUint16\n\t\t}\n\t\treturn dt, nil\n\tcase nil:\n\t\treturn \"\", ErrNil\n\n\t}\n\treturn \"\", fmt.Errorf(\"redisai-go: unexpected type for replyDataType, got type %T\", reply)\n}", "func processResponse(resp mesos.Response, t agent.Response_Type) (agent.Response, error) {\n\tvar r agent.Response\n\tdefer func() {\n\t\tif resp != nil {\n\t\t\tresp.Close()\n\t\t}\n\t}()\n\tfor {\n\t\tif err := resp.Decode(&r); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn r, err\n\t\t}\n\t}\n\tif r.GetType() == t {\n\t\treturn r, nil\n\t} else {\n\t\treturn r, fmt.Errorf(\"processResponse expected type %q, got %q\", t, r.GetType())\n\t}\n}", "func (o IndexesResponseOutput) DataType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IndexesResponse) *string { return v.DataType }).(pulumi.StringPtrOutput)\n}", "func decodeResponse(resp *http.Response, isSuccess SuccessDecider, decoder ResponseDecoder, successV, failureV interface{}) error {\n\tif isSuccess(resp) {\n\t\tswitch sv := successV.(type) {\n\t\tcase nil:\n\t\t\treturn nil\n\t\tcase *Raw:\n\t\t\trespBody, err := ioutil.ReadAll(resp.Body)\n\t\t\t*sv = respBody\n\t\t\treturn err\n\t\tdefault:\n\t\t\treturn decoder.Decode(resp, successV)\n\t\t}\n\t} else {\n\t\tswitch fv := failureV.(type) {\n\t\tcase nil:\n\t\t\treturn nil\n\t\tcase *Raw:\n\t\t\trespBody, err := ioutil.ReadAll(resp.Body)\n\t\t\t*fv = respBody\n\t\t\treturn err\n\t\tdefault:\n\t\t\treturn decoder.Decode(resp, failureV)\n\t\t}\n\t}\n}", "func NewResponse(req *reqres.Req, data interface{}) []byte {\n\n\tres := &reqres.Res{\n\t\tData: data,\n\t}\n\n\tswitch req.ResponseMarshalingMode {\n\tcase \"DataOnly\":\n\t\treturn res.MarshalDataOnly(req)\n\tdefault:\n\t\treturn res.Marshal(req)\n\t}\n}", "func dResponseWriter(w http.ResponseWriter, data interface{}, HStat int) error {\n\tdataType := reflect.TypeOf(data)\n\tif dataType.Kind() == reflect.String {\n\t\tw.WriteHeader(HStat)\n\t\tw.Header().Set(\"Content-Type\", \"application/text\")\n\n\t\t_, err := w.Write([]byte(data.(string)))\n\t\treturn err\n\t} else if reflect.PtrTo(dataType).Kind() == dataType.Kind() {\n\t\tw.WriteHeader(HStat)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\toutData, err := json.MarshalIndent(data, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\tzerolog.Error().Msg(err.Error())\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = w.Write(outData)\n\t\treturn err\n\t} else if reflect.Struct == dataType.Kind() {\n\t\tw.WriteHeader(HStat)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\toutData, err := json.MarshalIndent(data, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\tzerolog.Error().Msg(err.Error())\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = w.Write(outData)\n\t\treturn err\n\t} else if reflect.Slice == dataType.Kind() {\n\t\tw.WriteHeader(HStat)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\toutData, err := json.MarshalIndent(data, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\tzerolog.Error().Msg(err.Error())\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = w.Write(outData)\n\t\treturn err\n\t}\n\n\treturn errors.New(\"we could not be able to support data type that you passed\")\n}", "func FromResponse(r *http.Response) error {\n\tdefer r.Body.Close()\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn New(r.StatusCode, errors.Errorf(\"Failed to read response body: %q\", err))\n\t}\n\tmediatype, _, _ := mime.ParseMediaType(r.Header.Get(\"Content-Type\"))\n\tswitch mediatype {\n\tcase \"text/plain\", \"text/html\", \"text/xml\":\n\t\treturn New(r.StatusCode, errors.New(string(data)))\n\tcase \"application/json\":\n\t\tfallthrough\n\tdefault:\n\t\tvar tmp Response\n\t\tif err := json.Unmarshal(data, &tmp); err != nil {\n\t\t\treturn New(r.StatusCode, errors.Errorf(\"Error parsing response: %s\", err))\n\t\t}\n\t\treturn New(r.StatusCode, errors.New(tmp.Message))\n\t}\n}", "func (o FunctionOutputResponsePtrOutput) DataType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FunctionOutputResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.DataType\n\t}).(pulumi.StringPtrOutput)\n}", "func convertResponse(r *http.Response, resp interface{}) error {\n\tdefer r.Body.Close()\n\n\tbuf := new(bytes.Buffer)\n\tif _, err := buf.ReadFrom(r.Body); err != nil {\n\t\treturn err\n\t}\n\n\tif err := json.Unmarshal(buf.Bytes(), resp); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func buildResponseConvertData(response, result *expr.AttributeExpr, svcCtx *codegen.AttributeContext, hdrs, trlrs []*MetadataData, e *expr.GRPCEndpointExpr, sd *ServiceData, svr bool) *ConvertData {\n\tif !svr && (e.MethodExpr.IsStreaming() || isEmpty(e.MethodExpr.Result.Type)) {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\tsvc = sd.Service\n\t)\n\n\tif svr {\n\t\t// server side\n\n\t\tvar data *InitData\n\t\t{\n\t\t\tdata = buildInitData(result, response, \"result\", \"message\", svcCtx, true, svr, false, sd)\n\t\t\tdata.Description = fmt.Sprintf(\"%s builds the gRPC response type from the result of the %q endpoint of the %q service.\", data.Name, e.Name(), svc.Name)\n\t\t}\n\t\treturn &ConvertData{\n\t\t\tSrcName: svcCtx.Scope.Name(result, svcCtx.Pkg(result), svcCtx.Pointer, svcCtx.UseDefault),\n\t\t\tSrcRef: svcCtx.Scope.Ref(result, svcCtx.Pkg(result)),\n\t\t\tTgtName: protoBufGoFullTypeName(response, sd.PkgName, sd.Scope),\n\t\t\tTgtRef: protoBufGoFullTypeRef(response, sd.PkgName, sd.Scope),\n\t\t\tInit: data,\n\t\t}\n\t}\n\n\t// client side\n\n\tvar data *InitData\n\t{\n\t\tdata = buildInitData(response, result, \"message\", \"result\", svcCtx, false, svr, false, sd)\n\t\tdata.Name = fmt.Sprintf(\"New%sResult\", codegen.Goify(e.Name(), true))\n\t\tdata.Description = fmt.Sprintf(\"%s builds the result type of the %q endpoint of the %q service from the gRPC response type.\", data.Name, e.Name(), svc.Name)\n\t\tfor _, m := range hdrs {\n\t\t\t// pass the headers as arguments to result constructor in client\n\t\t\tdata.Args = append(data.Args, &InitArgData{\n\t\t\t\tName: m.VarName,\n\t\t\t\tRef: m.VarName,\n\t\t\t\tFieldName: m.FieldName,\n\t\t\t\tFieldType: m.FieldType,\n\t\t\t\tTypeName: m.TypeName,\n\t\t\t\tTypeRef: m.TypeRef,\n\t\t\t\tType: m.Type,\n\t\t\t\tPointer: m.Pointer,\n\t\t\t\tRequired: m.Required,\n\t\t\t\tValidate: m.Validate,\n\t\t\t\tExample: m.Example,\n\t\t\t})\n\t\t}\n\t\tfor _, m := range trlrs {\n\t\t\t// pass the trailers as arguments to result constructor in client\n\t\t\tdata.Args = append(data.Args, &InitArgData{\n\t\t\t\tName: m.VarName,\n\t\t\t\tRef: m.VarName,\n\t\t\t\tFieldName: m.FieldName,\n\t\t\t\tFieldType: m.FieldType,\n\t\t\t\tTypeName: m.TypeName,\n\t\t\t\tTypeRef: m.TypeRef,\n\t\t\t\tType: m.Type,\n\t\t\t\tPointer: m.Pointer,\n\t\t\t\tRequired: m.Required,\n\t\t\t\tValidate: m.Validate,\n\t\t\t\tExample: m.Example,\n\t\t\t})\n\t\t}\n\t}\n\treturn &ConvertData{\n\t\tSrcName: protoBufGoFullTypeName(response, sd.PkgName, sd.Scope),\n\t\tSrcRef: protoBufGoFullTypeRef(response, sd.PkgName, sd.Scope),\n\t\tTgtName: svcCtx.Scope.Name(result, svcCtx.Pkg(result), svcCtx.Pointer, svcCtx.UseDefault),\n\t\tTgtRef: svcCtx.Scope.Ref(result, svcCtx.Pkg(result)),\n\t\tInit: data,\n\t\tValidation: addValidation(response, \"message\", sd, false),\n\t}\n}", "func (dr *DealResponse) Type() datatransfer.TypeIdentifier {\n\treturn \"RetrievalDealResponse/1\"\n}", "func (r *proxyResponse) Response() interface{} {\n\treturn r.response\n}", "func (s *Service) ToObject(data *Data) (r interface{}, err error) {\n\tdefer func() {\n\t\tif rec := recover(); rec != nil {\n\t\t\terr = makeError(rec)\n\t\t}\n\t}()\n\tvar ok bool\n\tif data == nil {\n\t\treturn nil, nil\n\t}\n\ttypeID := data.Type()\n\tserializer := s.lookupBuiltinDeserializer(typeID)\n\tif serializer == nil {\n\t\tserializer, ok = s.registry[typeID]\n\t\tif !ok {\n\t\t\treturn nil, ihzerrors.NewSerializationError(fmt.Sprintf(\"there is no suitable de-serializer for type %d\", typeID), nil)\n\t\t}\n\t}\n\tdataInput := NewObjectDataInput(data.Buffer(), DataOffset, s, !s.SerializationConfig.LittleEndian)\n\treturn serializer.Read(dataInput), nil\n}", "func toGame(data interface{}, isResponse bool) *Game {\n\tif data == nil {\n\t\treturn nil\n\t}\n\n\tif isResponse {\n\t\tdest := gameResponse{}\n\n\t\tif recast(data, &dest) == nil {\n\t\t\treturn &dest.Data\n\t\t}\n\t} else {\n\t\tdest := Game{}\n\n\t\tif recast(data, &dest) == nil {\n\t\t\treturn &dest\n\t\t}\n\t}\n\n\treturn nil\n}", "func DecodeResponse(response []byte, responseType interface{}) (interface{}, error) {\n\tif err := json.Unmarshal([]byte(response), &responseType); err != nil {\n\t\tlog.Printf(\"error detected unmarshalling response: %v\", err)\n\t\treturn nil, err\n\t}\n\treturn responseType, nil\n}", "func (c *Client) DataType(datatype string) (*Client, error) {\n\t// validate input\n\tswitch datatype {\n\t// default\n\tcase \"json\":\n\tcase \"csv\":\n\tdefault:\n\t\t// throw error\n\t\tmsg := fmt.Sprintf(\"Invalid response format requested: %s.\\n\", datatype)\n\t\treturn nil, errors.New(msg)\n\t}\n\t// add to receiver\n\tc.datatype = &datatype\n\treturn c, nil\n}", "func (msg *Message) GetData() interface{} {\n\tif msg.Ptr >= len(msg.Body) {\n\t\treturn nil\n\t}\n\tswitch msg.Body[msg.Ptr] {\n\tcase Int32Type:\n\t\treturn msg.GetInt32()\n\tcase Int64Type:\n\t\treturn msg.GetInt64()\n\tcase StringType:\n\t\treturn msg.GetString()\n\t}\n\tpanic(\"Not expected type\")\n}", "func makeResponse(value interface{}) methodResponse {\n\tvar r methodResponse\n\tswitch v := value.(type) {\n\tcase Fault:\n\t\tr.Fault = makeValue(v)\n\tcase error:\n\t\tr.Fault = makeValue(InternalError.New(v.Error()))\n\tdefault:\n\t\tr.Params = makeParams(v)\n\t}\n\treturn r\n}", "func Unmarshal(data []byte, v interface{}) error {\n\trv := reflect.ValueOf(v)\n\n\tif rv.Kind() != reflect.Ptr || rv.IsNil() {\n\t\treturn fmt.Errorf(\"can't unmarshal to a %s variable\", reflect.TypeOf(v))\n\t}\n\n\trv = rv.Elem()\n\n\tswitch TypeOf(data) {\n\tcase integer:\n\t\tif rv.Kind() != reflect.Int {\n\t\t\treturn fmt.Errorf(\"can't unmarshal int to a %s variable\", rv.Kind())\n\t\t}\n\n\t\t//FIXME: leading zeros are not allowed\n\t\t//FIXME: minus 0 is not allowed\n\n\t\tvalue, _, err := unmarshalFirstValue(data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t//FIXME: check if value is Integer\n\n\t\trv.Set(reflect.ValueOf(value))\n\n\tcase str:\n\t\tif rv.Kind() != reflect.String {\n\t\t\treturn fmt.Errorf(\"can't unmarshal string to a %s variable\", rv.Kind())\n\t\t}\n\n\t\tvalue, _, err := unmarshalFirstValue(data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t//FIXME: check if value is String\n\n\t\trv.Set(reflect.ValueOf(value))\n\n\tcase list:\n\t\tswitch rv.Kind() {\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"can't unmarshal list to a %s variable\", rv.Kind())\n\t\tcase reflect.Array, reflect.Slice:\n\t\t}\n\n\t\tvalue, _, err := unmarshalFirstValue(data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t//FIXME: check if value is Array or Slice\n\n\t\trv.Set(reflect.ValueOf(value))\n\n\t//FIXME: could it be a Struct rather than a map[string]T ?\n\tcase dictionary:\n\t\tif destKind := rv.Kind(); destKind != reflect.Map {\n\t\t\treturn fmt.Errorf(\"can't unmarshal dictionary to a %s variable\", destKind)\n\t\t}\n\n\t\tt := rv.Type()\n\t\tif keyKind := t.Key().Kind(); keyKind != reflect.String {\n\t\t\treturn fmt.Errorf(\"map key has wrong type: expected string, got %s\", keyKind)\n\t\t}\n\n\t\tvalue, _, err := unmarshalFirstValue(data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch t := value.(type) {\n\t\tcase Dictionary:\n\t\t\trv.Set(reflect.ValueOf(t))\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"decoded unexpected value %v of type %s\", value, reflect.ValueOf(value).Type())\n\t\t}\n\n\tdefault:\n\t\treturn fmt.Errorf(\"cant decode %v\", data)\n\t}\n\n\treturn nil\n}", "func (d Data) Data(key string) (Data, error) {\n\tval, _ := d[key]\n\tswitch v := val.(type) {\n\tcase Data:\n\t\treturn v, nil\n\tcase nil:\n\t\treturn nil, ErrNotFound\n\tdefault:\n\t\treturn nil, ErrUnexpectedType\n\t}\n}", "func (o AzureMachineLearningServiceOutputColumnResponseOutput) DataType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AzureMachineLearningServiceOutputColumnResponse) *string { return v.DataType }).(pulumi.StringPtrOutput)\n}", "func (a *BaseApi) DataRes(c *rux.Context, data interface{}) *model.JsonData {\n\treturn a.MakeRes(0, nil, data)\n}", "func (o SchematizedDataResponseOutput) Data() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SchematizedDataResponse) string { return v.Data }).(pulumi.StringOutput)\n}", "func (o AzureMachineLearningServiceInputColumnResponseOutput) DataType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AzureMachineLearningServiceInputColumnResponse) *string { return v.DataType }).(pulumi.StringPtrOutput)\n}", "func responseType(m dns.Msg) byte {\n\tif len(m.Answer) > 0 {\n\t\treturn rTypeResponse\n\t} else if m.Rcode == dns.RcodeNameError {\n\t\treturn rTypeNxdomain\n\t} else if m.Rcode == dns.RcodeSuccess {\n\t\t// todo: determine if this is actually a referral, assume nodata for now\n\t\treturn rTypeNodata\n\t} else {\n\t\treturn rTypeError\n\t}\n}", "func (j *JSONData) Type() JSONType {\n\tif j == nil || j.value == nil { // no data\n\t\treturn JSONnil\n\t}\n\tvalue := *j.value\n\tif value == nil {\n\t\treturn JSONnull\n\t}\n\tif _, ok := value.(bool); ok {\n\t\treturn JSONboolean\n\t}\n\tif _, ok := value.(float64); ok {\n\t\treturn JSONnumber\n\t}\n\tif _, ok := value.(string); ok {\n\t\treturn JSONstring\n\t}\n\tif _, ok := value.([]interface{}); ok {\n\t\treturn JSONarray\n\t}\n\tif _, ok := value.(map[string]interface{}); ok {\n\t\treturn JSONobject\n\t}\n\tpanic(errors.New(\"JSONData corrupt\"))\n}", "func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\t// Set JSON type\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n\t// Check error\n\tif e, ok := response.(errorer); ok {\n\t\t// This is a errorer class, now check for error\n\t\tif err := e.error(); err != nil {\n\t\t\tencodeError(ctx, e.error(), w)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// cast to dataHolder to get Data, otherwise just encode the resposne\n\tif holder, ok := response.(dataHolder); ok {\n\t\treturn json.NewEncoder(w).Encode(holder.getData())\n\t} else {\n\t\treturn json.NewEncoder(w).Encode(response)\n\t}\n}", "func (o AzureMachineLearningStudioOutputColumnResponseOutput) DataType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AzureMachineLearningStudioOutputColumnResponse) *string { return v.DataType }).(pulumi.StringPtrOutput)\n}", "func ParseGetInstanceTypeResponse(rsp *http.Response) (*GetInstanceTypeResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &GetInstanceTypeResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tvar dest InstanceType\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON200 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func (mgr *ResTypeMgr) ToData() *ResTypeData {\n\trtd := &ResTypeData{}\n\n\tfor _, v := range mgr.List {\n\t\trtd.ResType = append(rtd.ResType, v.ResType)\n\t\trtd.Bytes = append(rtd.Bytes, v.TotalBytes)\n\t\trtd.MBytes = append(rtd.MBytes, float32(v.TotalBytes)/1024/1024)\n\t\trtd.Time = append(rtd.Time, v.TotalTime)\n\t\trtd.Nums = append(rtd.Nums, v.TotalNums)\n\n\t\tds := float32(v.TotalBytes) * 1000 / float32(v.TotalTime) / 1024 / 1024\n\n\t\trtd.DownloadSpeed = append(rtd.DownloadSpeed, ds)\n\t}\n\n\tfor _, v := range mgr.ImgList {\n\t\trtd.ImgType = append(rtd.ImgType, fmt.Sprintf(\"%vx%v\", v.ImgWidth, v.ImgHeight))\n\t\trtd.ImgBytes = append(rtd.ImgBytes, v.TotalBytes)\n\t\trtd.ImgTime = append(rtd.ImgTime, v.TotalTime)\n\t\trtd.ImgNums = append(rtd.ImgNums, v.TotalNums)\n\t}\n\n\treturn rtd\n}", "func (o AzureMachineLearningStudioInputColumnResponseOutput) DataType() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AzureMachineLearningStudioInputColumnResponse) *string { return v.DataType }).(pulumi.StringPtrOutput)\n}", "func ParseResponseToBaseModel(resp *http.Response, baseModel IBaseModel, xmlResult bool, isObs bool) (err error) {\n\treadCloser, ok := baseModel.(IReadCloser)\n\tif !ok {\n\t\tdefer func() {\n\t\t\terrMsg := resp.Body.Close()\n\t\t\tif errMsg != nil {\n\t\t\t\tdoLog(LEVEL_WARN, \"Failed to close response body\")\n\t\t\t}\n\t\t}()\n\t\tvar body []byte\n\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\tif err == nil && len(body) > 0 {\n\t\t\tif xmlResult {\n\t\t\t\terr = ParseXml(body, baseModel)\n\t\t\t} else {\n\t\t\t\ts := reflect.TypeOf(baseModel).Elem()\n\t\t\t\tif reflect.TypeOf(baseModel).Elem().Name() == \"GetBucketPolicyOutput\" {\n\t\t\t\t\tparseBucketPolicyOutput(s, baseModel, body)\n\t\t\t\t} else {\n\t\t\t\t\terr = parseJSON(body, baseModel)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tdoLog(LEVEL_ERROR, \"Unmarshal error: %v\", err)\n\t\t\t}\n\t\t}\n\t} else {\n\t\treadCloser.setReadCloser(resp.Body)\n\t}\n\n\tbaseModel.setStatusCode(resp.StatusCode)\n\tresponseHeaders := cleanHeaderPrefix(resp.Header)\n\tbaseModel.setResponseHeaders(responseHeaders)\n\tif values, ok := responseHeaders[HEADER_REQUEST_ID]; ok {\n\t\tbaseModel.setRequestID(values[0])\n\t}\n\treturn\n}", "func (c *Context) Data(data interface{}, total ...int64) {\n\tc.responseFormat.SetData(data, total...)\n}", "func (d dynamicSystemView) ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {\n\treq := &logical.Request{\n\t\tOperation: logical.CreateOperation,\n\t\tPath: \"sys/wrapping/wrap\",\n\t}\n\n\tresp := &logical.Response{\n\t\tWrapInfo: &wrapping.ResponseWrapInfo{\n\t\t\tTTL: ttl,\n\t\t},\n\t\tData: data,\n\t}\n\n\tif jwt {\n\t\tresp.WrapInfo.Format = \"jwt\"\n\t}\n\n\t_, err := d.core.wrapInCubbyhole(ctx, req, resp, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp.WrapInfo, nil\n}", "func (o *ApiResponse) GetType() string {\n\tif o == nil || IsNil(o.Type) {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Type\n}", "func (o HTTPHealthCheckTypeOutput) Response() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HTTPHealthCheckType) *string { return v.Response }).(pulumi.StringPtrOutput)\n}", "func CreateFromResponse(u url.URL, r http.Response, b []byte) (Representor, error) {\n\n\tct := r.Header.Get(\"Content-Type\")\n\tif ct == \"\" {\n\t\treturn nil, errors.New(\"missing content-type\")\n\t}\n\n\t// TODO: Check the header for Link\n\n\treturn Create(u, ct, b)\n}", "func (o RawOutputDatasourceResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v RawOutputDatasourceResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\tresponse.populatePageValues()\n\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err == nil && data != nil {\n\t\tjson.Unmarshal(data, response)\n\t}\n\n\treturn response\n}", "func (d *Module) GetData() interface{} {\n\n\treturn Response{\n\t\tInfo: d.getInfo(),\n\t\tContainers: d.getContainers(),\n\t\tNetworks: d.getNetworks(),\n\t\tVolumes: d.getVolumes(),\n\t\tImages: d.getImages(),\n\t}\n}", "func (o HTTPSHealthCheckTypeOutput) Response() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HTTPSHealthCheckType) *string { return v.Response }).(pulumi.StringPtrOutput)\n}", "func (c *Client) decodeResponse(resp *http.Response, data interface{}) error {\n\tc.Logger.Printf(\"decodeResponse: status:%v data:%T\", resp.StatusCode, data)\n\tif data == nil {\n\t\t// without any content body (media/upload APPEND)\n\t\treturn nil\n\t}\n\n\tif resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusAccepted {\n\t\treturn json.NewDecoder(resp.Body).Decode(data)\n\t}\n\n\tp, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn errors.Errorf(\"get %s returned status %d, %s\", resp.Request.URL, resp.StatusCode, p)\n}", "func unconvertData(data []byte) interface{} {\n\tif data == nil || string(data) == \"\" {\n\t\treturn nil\n\t}\n\n\tvar proto interface{}\n\tresult, err := serial.Deserialize(data, proto, serial.PERSISTENT)\n\tif err != nil {\n\t\tlog.Fatal(\"Persistent Deserialization Failed\", \"err\", err, \"data\", data)\n\t}\n\treturn result\n}", "func BuildResponse(data interface{}, statusCode int, msg string, tid string) Response {\n\tstatus := OK\n\n\tif statusCode >= 400 {\n\t\tstatus = ERROR\n\t}\n\n\tresponse := Response{\n\t\tMeta: Meta{\n\t\t\tStatus: status,\n\t\t\tStatusCode: statusCode,\n\t\t\tMessage: msg,\n\t\t\tTransactionID: tid,\n\t\t},\n\t\tData: data,\n\t}\n\n\treturn response\n}", "func (o *ApiResponse) GetType() string {\n\tif o == nil || o.Type == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Type\n}", "func (s StatsSerializer) Cast(data interface{}) Serializer {\n\tvar serializer = new(StatsSerializer)\n\n\tif model, ok := data.(models.Stats); ok {\n\t\tserializer.Missions = model.Missions\n\t\tserializer.Challenges = model.Challenges\n\t\tserializer.Account = model.Account\n\t}\n\n\treturn serializer\n}", "func (bstr BlobsSetTierResponse) Response() *http.Response {\n\treturn bstr.rawResponse\n}", "func (o BlobStreamInputDataSourceResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BlobStreamInputDataSourceResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func ResponseData(q Query) (data []types.Row, err error) {\n\tvar results []map[string]string\n\tif err = q.All(&results); err != nil {\n\t\treturn nil, err\n\t}\n\tdata = make([]types.Row, len(results))\n\tfor i, r := range results {\n\t\tdata[i].Data = r\n\t}\n\treturn\n}", "func readResponse(p packetType) (response responseType, err error) {\n\t// The calls to bencode.Unmarshal() can be fragile.\n\tdefer func() {\n\t\tif x := recover(); x != nil {\n\t\t\tlogger.Infof(\"DHT: !!! Recovering from panic() after bencode.Unmarshal %q, %v\", string(p.b), x)\n\t\t}\n\t}()\n\tif e2 := bencode.Unmarshal(bytes.NewBuffer(p.b), &response); e2 == nil {\n\t\terr = nil\n\t\treturn\n\t} else {\n\t\tlogger.Infof(\"DHT: unmarshal error, odd or partial data during UDP read? %v, err=%s\", string(p.b), e2)\n\t\treturn response, e2\n\t}\n\treturn\n}", "func LoadResponse(data []byte) (*Response, error) {\n\tvar response Response\n\treturn &response, msgpack.Unmarshal(data, &response)\n}", "func (e *Event) Data(out interface{}) error {\n\treturn json.Unmarshal([]byte(e.data), out)\n}", "func (o RawStreamInputDataSourceResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v RawStreamInputDataSourceResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func (s *Service) ToObject(data *Data) (r interface{}, err error) {\n\tdefer func() {\n\t\tif rec := recover(); rec != nil {\n\t\t\terr = makeError(rec)\n\t\t}\n\t}()\n\tif data == nil {\n\t\treturn nil, nil\n\t}\n\ttypeID := data.Type()\n\tif typeID == 0 {\n\t\treturn data, nil\n\t}\n\tserializer, ok := s.registry[typeID]\n\tif !ok {\n\t\treturn nil, hzerrors.NewHazelcastSerializationError(fmt.Sprintf(\"there is no suitable de-serializer for type %d\", typeID), nil)\n\t}\n\tdataInput := NewObjectDataInput(data.Buffer(), DataOffset, s, s.SerializationConfig.BigEndian)\n\treturn serializer.Read(dataInput), nil\n}", "func (rr *ResourceRecord) RData() interface{} {\n\tswitch rr.Type {\n\tcase QueryTypeA:\n\t\treturn rr.Text.Value\n\tcase QueryTypeNS:\n\t\treturn rr.Text.Value\n\tcase QueryTypeMD:\n\t\treturn nil\n\tcase QueryTypeMF:\n\t\treturn nil\n\tcase QueryTypeCNAME:\n\t\treturn rr.Text.Value\n\tcase QueryTypeSOA:\n\t\treturn rr.SOA\n\tcase QueryTypeMB:\n\t\treturn rr.Text.Value\n\tcase QueryTypeMG:\n\t\treturn rr.Text.Value\n\tcase QueryTypeMR:\n\t\treturn rr.Text.Value\n\tcase QueryTypeNULL:\n\t\treturn rr.Text.Value\n\tcase QueryTypeWKS:\n\t\treturn rr.WKS\n\tcase QueryTypePTR:\n\t\treturn rr.Text.Value\n\tcase QueryTypeHINFO:\n\t\treturn rr.HInfo\n\tcase QueryTypeMINFO:\n\t\treturn rr.MInfo\n\tcase QueryTypeMX:\n\t\treturn rr.MX\n\tcase QueryTypeTXT:\n\t\treturn rr.Text.Value\n\tcase QueryTypeAAAA:\n\t\treturn rr.Text.Value\n\tcase QueryTypeSRV:\n\t\treturn rr.SRV\n\tcase QueryTypeOPT:\n\t\treturn rr.OPT\n\t}\n\treturn nil\n}", "func (nexus Nexus2x) errorFromResponse(response *http.Response) Error {\n\te := Error{\n\t\tURL: nexus.URL,\n\t\tStatusCode: response.StatusCode,\n\t\tStatus: response.Status,\n\t\tMessage: fmt.Sprintf(\"Error (%v) from %v\", response.Status, nexus.URL),\n\t}\n\n\tbody, err := bodyToBytes(response.Body)\n\tif err != nil {\n\t\treturn e // problems getting the response shouldn't mask the original error\n\t}\n\n\tcontentType := response.Header.Get(\"Content-Type\")\n\tswitch {\n\tcase strings.Contains(contentType, \"application/json\"):\n\t\tmsg, err := tryFromJSON(body)\n\t\tif err != nil {\n\t\t\treturn e // couldn't determine a message; use the default one then\n\t\t}\n\n\t\te.Message = msg\n\tcase strings.Contains(contentType, \"text/html\"):\n\t\tmsg, err := tryFromHTML(body)\n\t\tif err != nil {\n\t\t\treturn e // couldn't determine a message; use the default one then\n\t\t}\n\n\t\te.Message = msg\n\t}\n\n\treturn e\n}", "func CreateDataResponse() *DataResponse {\n\tvar dataResponse = DataResponse{}\n\tdataResponse.Jsonapi = JsonapiVersion{Version: \"1.0\"}\n\treturn &dataResponse\n}", "func HandleGetType(res http.ResponseWriter, req *http.Request) {\r\n\tdefer sde.Debug(time.Now())\r\n\tresponse := make([]byte, 0)\r\n\tv := mux.Vars(req)\r\n\tvs := v[\"typeID\"]\r\n\ttypeID, err := strconv.Atoi(vs)\r\n\tif procErr(err, res) {\r\n\t\tlog.LogError(\"Error encountered while handling a response\", err.Error())\r\n\t\treturn\r\n\t}\r\n\tif t, err := SDE.GetType(typeID); err != nil {\r\n\t\tprocErr(err, res)\r\n\t} else {\r\n\t\tif _, ok := t.Attributes[\"mDisplayName\"]; !ok {\r\n\t\t\tt.GetAttributes()\r\n\t\t}\r\n\t\t//t.Lookup(2)\r\n\t\tj, _ := t.ToJSON()\r\n\t\tresponse = []byte(j)\r\n\t}\r\n\tres.Write(response)\r\n}", "func newResponse(data map[string]string) (*AMIResponse, error) {\n\tr, found := data[\"Response\"]\n\tif !found {\n\t\treturn nil, errors.New(\"Not Response\")\n\t}\n\tresponse := &AMIResponse{ID: data[\"ActionID\"], Status: r, Params: make(map[string]string)}\n\tfor k, v := range data {\n\t\tif k == \"Response\" {\n\t\t\tcontinue\n\t\t}\n\t\tresponse.Params[k] = v\n\t}\n\treturn response, nil\n}", "func (o BlobOutputDataSourceResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BlobOutputDataSourceResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func (sr *DataSingleResult) Decode(v interface{}) error {\n\tif sr.result == nil {\n\t\treturn nil\n\t}\n\treturn sr.result.Decode(v)\n}", "func (o FieldResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FieldResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func DecodeResp(serializers *rest.Serializers, b []byte, reqContentType, respContentType string) (runtime.Object, error) {\n\tdecoder := serializers.Decoder\n\tif len(respContentType) > 0 && (decoder == nil || (len(reqContentType) > 0 && respContentType != reqContentType)) {\n\t\tmediaType, params, err := mime.ParseMediaType(respContentType)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"response content type(%s) is invalid, %v\", respContentType, err)\n\t\t}\n\t\tdecoder, err = serializers.RenegotiatedDecoder(mediaType, params)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"response content type(%s) is not supported, %v\", respContentType, err)\n\t\t}\n\t\tklog.Infof(\"serializer decoder changed from %s to %s(%v)\", reqContentType, respContentType, params)\n\t}\n\n\tif len(b) == 0 {\n\t\treturn nil, fmt.Errorf(\"0-length response with content type: %s\", respContentType)\n\t}\n\n\tout, _, err := decoder.Decode(b, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// if a different object is returned, see if it is Status and avoid double decoding\n\t// the object.\n\tswitch out.(type) {\n\tcase *metav1.Status:\n\t\t// it's not need to cache for status\n\t\treturn nil, nil\n\t}\n\treturn out, nil\n}", "func (o JsonSerializationResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v JsonSerializationResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func newQueryDataResponse(reader recordReader, query sqlutil.Query, headers metadata.MD) backend.DataResponse {\n\tvar resp backend.DataResponse\n\tframe, err := frameForRecords(reader)\n\tif err != nil {\n\t\tresp.Error = err\n\t}\n\tif frame.Rows() == 0 {\n\t\tresp.Frames = data.Frames{}\n\t\treturn resp\n\t}\n\n\tframe.Meta.Custom = map[string]any{\n\t\t\"headers\": headers,\n\t}\n\tframe.Meta.ExecutedQueryString = query.RawSQL\n\tframe.Meta.DataTopic = data.DataTopic(query.RawSQL)\n\n\tswitch query.Format {\n\tcase sqlutil.FormatOptionTimeSeries:\n\t\tif _, idx := frame.FieldByName(\"time\"); idx == -1 {\n\t\t\tresp.Error = fmt.Errorf(\"no time column found\")\n\t\t\treturn resp\n\t\t}\n\n\t\tif frame.TimeSeriesSchema().Type == data.TimeSeriesTypeLong {\n\t\t\tvar err error\n\t\t\tframe, err = data.LongToWide(frame, nil)\n\t\t\tif err != nil {\n\t\t\t\tresp.Error = err\n\t\t\t\treturn resp\n\t\t\t}\n\t\t}\n\tcase sqlutil.FormatOptionTable:\n\t\t// No changes to the output. Send it as is.\n\tcase sqlutil.FormatOptionLogs:\n\t\t// TODO(brett): We need to find out what this actually is and if its\n\t\t// worth supporting. Pass through as \"table\" for now.\n\tdefault:\n\t\tresp.Error = fmt.Errorf(\"unsupported format\")\n\t}\n\n\tresp.Frames = data.Frames{frame}\n\treturn resp\n}", "func (pp *Pingresp) Type() Type {\n\treturn PINGRESP\n}", "func (v *Value) Data() interface{} {\n return v.data\n}", "func (this *Self) Type() value.Type { return value.JSON }", "func convertResult(ctx *HttpContext, v reflect.Value) HttpResult {\n\ti := v.Interface()\n\n\tif r, ok := i.(HttpResult); ok {\n\t\treturn r\n\t}\n\n\tif Formatters != nil {\n\t\tfor _, f := range Formatters {\n\t\t\tif formatted, ok := f(ctx, v.Interface()); ok {\n\t\t\t\treturn formatted\n\t\t\t}\n\t\t}\n\t}\n\n\tkind := reflect.Indirect(v).Kind()\n\n\tswitch {\n\tcase gotype.IsSimple(kind):\n\t\treturn &DataResult{Data: i}\n\tcase gotype.IsStruct(kind) || gotype.IsCollect(kind):\n\t\taccept := ctx.Accept()\n\t\tswitch {\n\t\tcase strings.Index(accept, \"xml\") > -1:\n\t\t\treturn &XmlResult{Data: v.Interface()}\n\t\tcase strings.Index(accept, \"jsonp\") > -1:\n\t\t\treturn &JsonpResult{Data: v.Interface()}\n\t\tdefault:\n\t\t\treturn &JsonResult{Data: v.Interface()}\n\t\t}\n\t}\n\treturn &ContentResult{Data: i}\n}", "func RetData(c echo.Context, data interface{}) error {\n\treturn c.JSON(http.StatusOK, DataRes{\n\t\tStatus: 200,\n\t\tData: data,\n\t})\n}", "func Response2Response(response *http.Response) (*HTTPResponse, error) {\n\tdefer response.Body.Close()\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trep := &HTTPResponse{\n\t\tStatusCode: response.StatusCode,\n\t\tHeader: response.Header,\n\t\tBody: body,\n\t}\n\treturn rep, nil\n}", "func (c *Context) Write(data interface{}) {\n\t// use DataWriter to write response if possible\n\tif dw, ok := c.Response.(DataWriter); ok {\n\t\tif err := dw.WriteData(data); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn\n\t}\n\n\tswitch data.(type) {\n\tcase []byte:\n\t\tc.Response.Write(data.([]byte))\n\tcase string:\n\t\tc.Response.Write([]byte(data.(string)))\n\tdefault:\n\t\tif data != nil {\n\t\t\tfmt.Fprint(c.Response, data)\n\t\t}\n\t}\n}", "func (o IoTHubStreamInputDataSourceResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IoTHubStreamInputDataSourceResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func (RPCResponse *RPCResponse) GetObject(toType interface{}) error {\n\tjs, err := json.Marshal(RPCResponse.Result)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(js, toType)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func ResponseDecoder(r *http.Response) (Decoder, error) {\n\tserverSent := mime.NewTypes(r.Header.Get(\"Content-Type\"))\n\tvar dec Decoder\n\terr := serverSent.Walk(func(x mime.Type) error {\n\t\tif decoderFunc, ok := decoders[x]; ok {\n\t\t\tdec = decoderFunc(r.Body)\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"%s isn't a registered decoder\", x)\n\t})\n\tif dec == nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn dec, nil\n\t}\n}", "func (o PowerBIOutputDataSourceResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v PowerBIOutputDataSourceResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func (lb LodgingBusiness) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (r *Response) String() string {\n\treturn string(r.Data)\n}", "func mapData(data []interface{}) (m mappers.Payload) {\n\n\tfor _, v := range data {\n\n\t\tswitch v.(type) {\n\t\tdefault:\n\t\t\tm.Data = v\n\t\t}\n\t}\n\n\treturn m\n}", "func (res *pbResponse) Unmarshal(data []byte) error {\n\tvar length = uint64(len(data))\n\tvar offset uint64\n\tvar n uint64\n\tvar tag uint64\n\tvar fieldNumber int\n\tvar wireType uint8\n\tfor {\n\t\tif offset < length {\n\t\t\ttag = uint64(data[offset])\n\t\t\toffset++\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t\tfieldNumber = int(tag >> 3)\n\t\twireType = uint8(tag & 0x7)\n\t\tswitch fieldNumber {\n\t\tcase 1:\n\t\t\tif wireType != 0 {\n\t\t\t\treturn fmt.Errorf(\"proto: wrong wireType = %d for field Seq\", wireType)\n\t\t\t}\n\t\t\tn = code.DecodeVarint(data[offset:], &res.Seq)\n\t\t\toffset += n\n\t\tcase 2:\n\t\t\tif wireType != 2 {\n\t\t\t\treturn fmt.Errorf(\"proto: wrong wireType = %d for field Error\", wireType)\n\t\t\t}\n\t\t\tn = code.DecodeString(data[offset:], &res.Error)\n\t\t\toffset += n\n\t\tcase 3:\n\t\t\tif wireType != 2 {\n\t\t\t\treturn fmt.Errorf(\"proto: wrong wireType = %d for field Reply\", wireType)\n\t\t\t}\n\t\t\tn = code.DecodeBytes(data[offset:], &res.Reply)\n\t\t\toffset += n\n\t\t}\n\t}\n\treturn nil\n}", "func Fmt (output *Data, data webapp.ReqData) {\n\toutput.Type = data[\"Content-Type\"]\n}", "func parseHTTPResponse(r *http.Response) (obj runtime.Object, details string, err error) {\n\traw, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"failed to read body (response code: %d): %v\", r.StatusCode, err)\n\t}\n\n\tlog.V(2).Infof(\"Response raw data: %s\", raw)\n\tobj, gvk, err := decode(raw)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"failed to parse json object (response code: %d): %v\", r.StatusCode, err)\n\t}\n\n\tif r.StatusCode < 200 || r.StatusCode >= 300 {\n\t\treturn nil, \"\", fmt.Errorf(\"%s (response code: %d)\", apierrors.FromObject(obj).Error(), r.StatusCode)\n\t}\n\n\tif s, ok := obj.(*metav1.Status); ok {\n\t\td := s.Details\n\t\tif d == nil {\n\t\t\treturn obj, s.Message, nil\n\t\t}\n\t\treturn obj, fmt.Sprintf(\"%s%s `%s\", d.Kind, d.Group, d.Name), nil\n\t}\n\n\tif in, ok := obj.(metav1.Object); ok {\n\t\treturn obj, fmt.Sprintf(\"%s%s `%s'\", strings.ToLower(gvk.Kind), maybeCore(gvk.Group), maybeNamespaced(in.GetName(), in.GetNamespace())), nil\n\t}\n\tif _, ok := obj.(metav1.ListInterface); ok {\n\t\treturn obj, fmt.Sprintf(\"%s%s'\", strings.ToLower(gvk.Kind), maybeCore(gvk.Group)), nil\n\t}\n\treturn nil, \"\", fmt.Errorf(\"returned object does not implement `metav1.Object` or `metav1.ListInterface`: %v\", obj)\n}", "func (h *Handler) Data(ctx context.Context, in *backend.DataRequest, out *backend.DataResponse) error {\n\tlog.Debug(\"Got data request:\", in)\n\tdata := map[string]string{}\n\tif err := json.Unmarshal(in.Data, &data); err != nil {\n\t\tlog.Error(\"Unmarshal data request error:\", err)\n\t\treturn err\n\t}\n\trid, ok := data[\"room_id\"]\n\tlog.Debugf(\"Data request room_id:%s\", rid)\n\tif ok {\n\t\tsvid, err := h.manager.GetRoomServerID(rid)\n\t\tlog.Debugf(\"Current server id:%s <==> Room server id:%s\", h.manager.GetService().Server().ID(), svid)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Failed to get room server id:\", err)\n\t\t\treturn err\n\t\t}\n\t\tif svid != h.manager.GetService().Server().ID() { // forward to another server\n\t\t\tlog.Debugf(\"Forwad data request to other server\")\n\t\t\tcli := backend.NewBackendService(\"chat-demo.chat\", h.manager.GetService().Client())\n\t\t\tcli.Data(ctx, in, client.WithSelectOption(selector.WithIDFilter([]string{svid})))\n\t\t\treturn nil\n\t\t}\n\t}\n\tt, ok := data[\"type\"]\n\tif !ok {\n\t\tlog.Error(\"Bad request data, 'type' is required\")\n\t\treturn errors.New(\"bad request data\")\n\t}\n\ttyp, err := strconv.Atoi(t)\n\tif err != nil {\n\t\tlog.Error(\"Bad request data, 'type' error:\", err)\n\t\treturn err\n\t}\n\tswitch msg.Type(typ) {\n\tcase msg.TypeCmdCreateRoom:\n\t\th.manager.CreateRoom(data[\"name\"], in.Meta[\"uid\"], data[\"nickname\"], in.Meta)\n\tcase msg.TypeCmdJoinRoom:\n\t\th.manager.JoinRoom(rid, in.Meta[\"uid\"], data[\"nickname\"], in.Meta)\n\tcase msg.TypeCmdQuitRoom:\n\t\th.manager.QuitRoom(rid, in.Meta[\"uid\"])\n\tcase msg.TypeCmdMessage:\n\t\th.manager.Message(rid, in.Meta[\"uid\"], data[\"message\"])\n\tdefault:\n\t\tlog.Error(\"Unsupported request data type:\", t)\n\t\treturn errors.New(\"Bad request data type\")\n\t}\n\treturn nil\n}", "func (x respExt) ConvertExt(v interface{}) interface{} {\n\tresponse := v.(Response)\n\n\t// Assemble the \"over-the-wire\" response dictionary\n\twire := wireFormat{\n\t\t[]byte(\"id\"),\n\t\tuint64(response.ID),\n\t}\n\tif response.Result != nil {\n\t\twire = append(wire, []byte(\"result\"), response.Result)\n\t}\n\tif response.Error != \"\" {\n\t\terrorDict := make(map[string]string)\n\t\terrorDict[\"message\"] = response.Error\n\t\twire = append(wire, []byte(\"error\"), errorDict)\n\t}\n\n\t// Convert it to CBOR bytes\n\tvar resp []byte\n\tencoder := codec.NewEncoderBytes(&resp, x.cbor)\n\tencoder.MustEncode(wire)\n\treturn resp\n}", "func DataResponse(body []byte) (frame []byte) {\r\n\treturn DataFrame(FrameResponse, body)\r\n}" ]
[ "0.6067754", "0.6017849", "0.5930762", "0.57525027", "0.5741373", "0.5659038", "0.565784", "0.5640045", "0.5633768", "0.55646974", "0.5547578", "0.55453074", "0.55345553", "0.5497515", "0.5489349", "0.5478702", "0.54747427", "0.543881", "0.53859806", "0.5373251", "0.5371848", "0.53621525", "0.5339227", "0.53022337", "0.52844024", "0.52613443", "0.5258448", "0.5216065", "0.52033365", "0.51999164", "0.51896626", "0.51804376", "0.51674896", "0.5163928", "0.51398265", "0.5137628", "0.51251274", "0.5118475", "0.5098096", "0.5095433", "0.5092715", "0.50879675", "0.5081105", "0.50747466", "0.5070347", "0.50675905", "0.50652266", "0.5063665", "0.5058405", "0.5057323", "0.5044374", "0.5033145", "0.5031552", "0.501715", "0.5010482", "0.49997646", "0.49854624", "0.4976483", "0.49757776", "0.49727973", "0.49720037", "0.49714157", "0.49642295", "0.49619383", "0.49361798", "0.49319115", "0.49308515", "0.49289426", "0.49237257", "0.4922478", "0.4916405", "0.490967", "0.4905921", "0.4902619", "0.4899245", "0.48927948", "0.4890009", "0.48874754", "0.48857626", "0.48705092", "0.48676184", "0.48589164", "0.48583055", "0.4855189", "0.48551106", "0.48537785", "0.48469082", "0.48447332", "0.4842326", "0.48391345", "0.48371536", "0.48366874", "0.48323742", "0.48233402", "0.48214602", "0.4820009", "0.4817007", "0.48162356", "0.48119292", "0.48094258" ]
0.5069115
45
Create a new Response type
func newResponse() *Response { return &Response{ // Header is always 5 bytes Header: make([]byte, 5), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func newResponse(r *http.Response) *Response {\n\tresponse := Response{Response: r}\n\n\treturn &response\n}", "func newResponse(r *http.Response) *Response {\n\treturn &Response{Response: r}\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\treturn response\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\treturn response\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\treturn response\n}", "func newResponse(w http.ResponseWriter) *response {\n\treturn &response{\n\t\tResponseWriter: w,\n\t\tsize: 0,\n\t\tstatus: http.StatusOK,\n\t\theadersSend: false,\n\t}\n}", "func NewResponseType(hasError bool, message string, data interface{}) ResponseType {\n\treturn ResponseType{Erro: hasError, Mensagem: message, Dados: data}\n}", "func CreateServiceStatusResponse() (response *ServiceStatusResponse) {\nresponse = &ServiceStatusResponse{\nBaseResponse: &responses.BaseResponse{},\n}\nreturn\n}", "func NewResponse(id string, result interface{}) *Response {\n\treturn &Response{JsonRPC: \"2.0\", Id: id, Result: result}\n}", "func (c *WSCodec) CreateResponse(id interface{}, reply interface{}) interface{} {\n\treturn &jsonSuccessResponse{Version: jsonrpcVersion, Id: id, Result: reply}\n}", "func NewApiResponse() *ApiResponse {\n this := ApiResponse{}\n return &this\n}", "func NewResponse(pt *influx.Point, tr *Tracer) Response {\r\n\treturn Response{\r\n\t\tPoint: pt,\r\n\t\tTracer: tr,\r\n\t}\r\n}", "func NewResponse(p interface{}) *Response {\n\treturn &Response{Payload: p}\n}", "func NewResponse(code int, body interface{}) Response {\n\treturn Response{\n\t\tcode: code,\n\t\tbody: body,\n\t}\n}", "func NewResponseStatus()(*ResponseStatus) {\n m := &ResponseStatus{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}", "func newResponse(code int, body io.Reader, req *http.Request) *http.Response {\n\tif body == nil {\n\t\tbody = &bytes.Buffer{}\n\t}\n\n\trc, ok := body.(io.ReadCloser)\n\tif !ok {\n\t\trc = ioutil.NopCloser(body)\n\t}\n\n\tres := &http.Response{\n\t\tStatusCode: code,\n\t\tStatus: fmt.Sprintf(\"%d %s\", code, http.StatusText(code)),\n\t\tProto: \"HTTP/1.1\",\n\t\tProtoMajor: 1,\n\t\tProtoMinor: 1,\n\t\tHeader: http.Header{},\n\t\tBody: rc,\n\t\tRequest: req,\n\t}\n\n\tif req != nil {\n\t\tres.Close = req.Close\n\t\tres.Proto = req.Proto\n\t\tres.ProtoMajor = req.ProtoMajor\n\t\tres.ProtoMinor = req.ProtoMinor\n\t}\n\n\treturn res\n}", "func makeResponse(value interface{}) methodResponse {\n\tvar r methodResponse\n\tswitch v := value.(type) {\n\tcase Fault:\n\t\tr.Fault = makeValue(v)\n\tcase error:\n\t\tr.Fault = makeValue(InternalError.New(v.Error()))\n\tdefault:\n\t\tr.Params = makeParams(v)\n\t}\n\treturn r\n}", "func NewResponse(statusCode int, contentType string, value interface{}) *Response {\n\tr := makeResponse(statusCode, contentType, value)\n\treturn &r\n}", "func newResponse(data map[string]string) (*AMIResponse, error) {\n\tr, found := data[\"Response\"]\n\tif !found {\n\t\treturn nil, errors.New(\"Not Response\")\n\t}\n\tresponse := &AMIResponse{ID: data[\"ActionID\"], Status: r, Params: make(map[string]string)}\n\tfor k, v := range data {\n\t\tif k == \"Response\" {\n\t\t\tcontinue\n\t\t}\n\t\tresponse.Params[k] = v\n\t}\n\treturn response, nil\n}", "func newResponse(r *http.Response) *Response {\n\tresp := &Response{\n\t\tResponse: r,\n\t}\n\tif v := r.Header.Get(headerXRemaining); v != \"\" {\n\t\tresp.Remaining, _ = strconv.Atoi(v)\n\t}\n\tif v := r.Header.Get(headerXReset); v != \"\" {\n\t\tresp.Reset, _ = strconv.Atoi(v)\n\t}\n\tif v := r.Header.Get(headerXTotal); v != \"\" {\n\t\tresp.Total, _ = strconv.Atoi(v)\n\t}\n\treturn resp\n}", "func NewResponse(ctx iris.Context) Response {\n\treturn Response{ctx: ctx}\n}", "func newErrorResp(key, msg string, err error) *ResponseType {\n\treturn &ResponseType{Ok: false, Message: msg, Error: err.Error(), Key: key}\n}", "func NewResponse(req *reqres.Req, data interface{}) []byte {\n\n\tres := &reqres.Res{\n\t\tData: data,\n\t}\n\n\tswitch req.ResponseMarshalingMode {\n\tcase \"DataOnly\":\n\t\treturn res.MarshalDataOnly(req)\n\tdefault:\n\t\treturn res.Marshal(req)\n\t}\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\tresponse.Rate = parseRate(r)\n\treturn response\n}", "func createResponse(req *http.Request) *http.Response {\n\treturn &http.Response{\n\t\tStatusCode: http.StatusOK,\n\t\tRequest: req,\n\t\tHeader: make(http.Header),\n\t\tBody: ioutil.NopCloser(bytes.NewBuffer([]byte{})),\n\t}\n}", "func New() *Response {\n\tresponse := Response{\n\t\tresult: nil,\n\t\tsuccess: true,\n\t\terrors: []*Error{},\n\t\tstatusCode: 0,\n\t}\n\n\treturn &response\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\tresponse.populatePageValues()\n\treturn response\n}", "func newSuccessResp(key, msg string) *ResponseType {\n\treturn &ResponseType{Ok: true, Message: msg, Key: key}\n}", "func NewResponse(req *Request) *Response {\n\treturn &Response{Request: req, items: []map[string]interface{}{}}\n}", "func CreateResponse(result interface{}, err error) *Response {\n\tif err == nil {\n\t\treturn CreateSuccessResponse(result)\n\t}\n\treturn CreateErrorResponse(err)\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\tresponse.populatePageValues()\n\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err == nil && data != nil {\n\t\tjson.Unmarshal(data, response)\n\t}\n\n\treturn response\n}", "func TestNewSimpleResponse(t *testing.T) {\n\t// Create without format.\n\tr := NewSimpleResponse(\"TAG\", \"XXX\")\n\tif r.Status != \"TAG\" {\n\t\tt.Fatalf(\"Unexpected status. Expected: %s - Found: %s.\", \"TAG\", r.Status)\n\t}\n\tif r.Message != \"XXX\" {\n\t\tt.Fatalf(\"Unexpected message. Expected: %s - Found: %s.\", \"XXX\", r.Message)\n\t}\n\t// Create with format.\n\tr = NewSimpleResponsef(\"TAG2\", \"the%s\", \"message\")\n\tif r.Status != \"TAG2\" {\n\t\tt.Fatalf(\"Unexpected status. Expected: %s - Found: %s.\", \"TAG2\", r.Status)\n\t}\n\tif r.Message != \"themessage\" {\n\t\tt.Fatalf(\"Unexpected message. Expected: %s - Found: %s.\", \"themessage\", r.Message)\n\t}\n}", "func NewResponse(status int, headers, body string, start time.Time) *Response {\n\treturn &Response{\n\t\tStatus: status,\n\t\tHeaders: headers,\n\t\tBody: body,\n\t\tStart: start,\n\t\tEnd: time.Now(),\n\t}\n}", "func CreateResponse(w *gin.Context, payload interface{}) {\n\tw.JSON(200, payload)\n}", "func NewResponse(w http.ResponseWriter) *Response {\n\treturn &Response{ResponseWriter: w, Status: http.StatusOK}\n}", "func NewResponse(version string) *Response {\n\treturn &Response{\n\t\tVersion: version,\n\t\tItems: []Item{},\n\t}\n}", "func newResponse(regex string, code int) Response {\n\tr := Response{Code: code}\n\tr.Regex = regexp.MustCompile(regex)\n\treturn r\n}", "func NewResponse(request Request) Response {\n\tr := Response{\n\t\tRequest: request,\n\t\tVersion: \"1.0\",\n\t\tBody: responseBody{},\n\t}\n\treturn r.EndSession(true)\n}", "func NewResponse(id string, ret Output, errMsg string) *Response {\n\tvar err *string\n\tif len(errMsg) != 0 {\n\t\terr = &errMsg\n\t}\n\n\treturn &Response{ID: id, Output: ret, Error: err}\n}", "func newResponse(total, perPage, currentPage, lastPage int) *Response {\n\tr := &Response{\n\t\tTotal: total,\n\t\tPerPage: perPage,\n\t\tCurrentPage: currentPage,\n\t\tLastPage: lastPage,\n\t}\n\n\t// Set the next page.\n\tif r.LastPage > r.CurrentPage {\n\t\tnextPage := r.CurrentPage + 1\n\t\tr.NextPage = &nextPage\n\t}\n\n\t// Set the previous page.\n\tif r.CurrentPage > 1 {\n\t\tprevPage := r.CurrentPage - 1\n\t\tr.PrevPage = &prevPage\n\t}\n\n\treturn r\n}", "func NewResponse(input sarah.Input, msg string, options ...RespOption) (*sarah.CommandResponse, error) {\n\ttyped, ok := input.(*Input)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"%T is not currently supported to automatically generate response\", input)\n\t}\n\n\tstash := &respOptions{\n\t\tattachments: []*webapi.MessageAttachment{},\n\t\tuserContext: nil,\n\t\tlinkNames: 1, // Linkify channel names and usernames. ref. https://api.slack.com/docs/message-formatting#parsing_modes\n\t\tparseMode: webapi.ParseModeFull,\n\t\tunfurlLinks: true,\n\t\tunfurlMedia: true,\n\t}\n\tfor _, opt := range options {\n\t\topt(stash)\n\t}\n\n\tpostMessage := webapi.NewPostMessage(typed.channelID, msg).\n\t\tWithAttachments(stash.attachments).\n\t\tWithLinkNames(stash.linkNames).\n\t\tWithParse(stash.parseMode).\n\t\tWithUnfurlLinks(stash.unfurlLinks).\n\t\tWithUnfurlMedia(stash.unfurlMedia)\n\tif replyInThread(typed, stash) {\n\t\tpostMessage.\n\t\t\tWithThreadTimeStamp(threadTimeStamp(typed).String()).\n\t\t\tWithReplyBroadcast(stash.replyBroadcast)\n\t}\n\treturn &sarah.CommandResponse{\n\t\tContent: postMessage,\n\t\tUserContext: stash.userContext,\n\t}, nil\n}", "func NewResponse(status int, body string) Response {\n\treturn &response{\n\t\tstatus: status,\n\t\tbody: body,\n\t}\n}", "func CreateFromResponse(u url.URL, r http.Response, b []byte) (Representor, error) {\n\n\tct := r.Header.Get(\"Content-Type\")\n\tif ct == \"\" {\n\t\treturn nil, errors.New(\"missing content-type\")\n\t}\n\n\t// TODO: Check the header for Link\n\n\treturn Create(u, ct, b)\n}", "func NewResponse(group string) *Response {\n\treturn &Response{\n\t\tGroup: group,\n\t\tErrors: make([]string, 0),\n\t\tData: make(map[string]interface{}, 0),\n\t}\n}", "func NewResponse(i int64, r string, e Error) Response {\n\treturn Response{\n\t\tID: i,\n\t\tJSONRpc: JSONRPCVersion,\n\t\tResult: r,\n\t\tError: e,\n\t}\n}", "func NewGenericResponse(stsCd, isError int, messages []string, data interface{}) *GenericResponse {\n\n\treturn &GenericResponse{\n\t\tStatus: stsCd,\n\t\tSuccess: isError == 0,\n\t\tMessages: messages,\n\t\tData: data,\n\t}\n}", "func NewResponse(speech string) Response {\n\treturn Response{\n\t\tVersion: \"1.0\",\n\t\tBody: ResBody{\n\t\t\tOutputSpeech: Payload{\n\t\t\t\tType: \"PlainText\",\n\t\t\t\tText: speech,\n\t\t\t},\n\t\t\tShouldEndSession: true,\n\t\t},\n\t}\n}", "func NewResponse(eventID string) *Response {\n\treturn &Response{\n\t\tEventID: eventID,\n\t\tStatus: shared.StatusOK,\n\t\tStarted: time.Now(),\n\t}\n}", "func NewResponse(in *yaml.Node, context *compiler.Context) (*Response, error) {\n\terrors := make([]error, 0)\n\tx := &Response{}\n\tm, ok := compiler.UnpackMap(in)\n\tif !ok {\n\t\tmessage := fmt.Sprintf(\"has unexpected value: %+v (%T)\", in, in)\n\t\terrors = append(errors, compiler.NewError(context, message))\n\t} else {\n\t\tallowedKeys := []string{\"$ref\"}\n\t\tvar allowedPatterns []*regexp.Regexp\n\t\tinvalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)\n\t\tif len(invalidKeys) > 0 {\n\t\t\tmessage := fmt.Sprintf(\"has invalid %s: %+v\", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, \", \"))\n\t\t\terrors = append(errors, compiler.NewError(context, message))\n\t\t}\n\t\t// string _ref = 1;\n\t\tv1 := compiler.MapValueForKey(m, \"$ref\")\n\t\tif v1 != nil {\n\t\t\tx.XRef, ok = compiler.StringForScalarNode(v1)\n\t\t\tif !ok {\n\t\t\t\tmessage := fmt.Sprintf(\"has unexpected value for $ref: %s\", compiler.Display(v1))\n\t\t\t\terrors = append(errors, compiler.NewError(context, message))\n\t\t\t}\n\t\t}\n\t}\n\treturn x, compiler.NewErrorGroupOrNil(errors)\n}", "func createPaymentResponse(classOfPayment string) PaymentResponse {\n\tcost := Cost{\n\t\tClassOfPayment: []string{classOfPayment},\n\t}\n\tpaymentResponse := PaymentResponse{\n\t\tCosts: []Cost{cost},\n\t}\n\treturn paymentResponse\n}", "func TestNewResponse(t *testing.T) {\n\n\tyml := `description: this is a response\nheaders:\n someHeader:\n description: a header\ncontent:\n something/thing:\n description: a thing\nx-pizza-man: pizza!\nlinks:\n someLink:\n description: a link! `\n\n\tvar idxNode yaml.Node\n\t_ = yaml.Unmarshal([]byte(yml), &idxNode)\n\tidx := index.NewSpecIndexWithConfig(&idxNode, index.CreateOpenAPIIndexConfig())\n\n\tvar n v3.Response\n\t_ = low.BuildModel(idxNode.Content[0], &n)\n\t_ = n.Build(idxNode.Content[0], idx)\n\n\tr := NewResponse(&n)\n\n\tassert.Len(t, r.Headers, 1)\n\tassert.Len(t, r.Content, 1)\n\tassert.Equal(t, \"pizza!\", r.Extensions[\"x-pizza-man\"])\n\tassert.Len(t, r.Links, 1)\n\tassert.Equal(t, 1, r.GoLow().Description.KeyNode.Line)\n\n}", "func (hnd Handlers) CreateResponseWrapper(status string) map[string]interface{} {\n\twrapper := make(map[string]interface{})\n\twrapper[\"status\"] = status\n\twrapper[\"version\"] = \"1.0\"\n\twrapper[\"ts\"] = time.Now()\n\n\treturn wrapper\n}", "func NewResponse(msg string, code int) *Response {\n\treturn &Response{\n\t\tStatus: http.StatusText(code),\n\t\tMessage: msg,\n\t\tStatusCode: code,\n\t}\n}", "func NewResponse(call *Call) *Response {\n\tresp := &Response{\n\t\tVersion: call.Version,\n\t\tID: call.ID,\n\t}\n\treturn resp\n}", "func (req *RequestMessage) CreateResponse(err error) (*ResponseMessage, error) {\n\tswitch val := req.request.(type) {\n\tcase *proto.ProduceReq:\n\t\treturn createProduceResponse(val, err)\n\tcase *proto.FetchReq:\n\t\treturn createFetchResponse(val, err)\n\tcase *proto.OffsetReq:\n\t\treturn createOffsetResponse(val, err)\n\tcase *proto.MetadataReq:\n\t\treturn createMetadataResponse(val, err)\n\tcase *proto.ConsumerMetadataReq:\n\t\treturn createConsumerMetadataResponse(val, err)\n\tcase *proto.OffsetCommitReq:\n\t\treturn createOffsetCommitResponse(val, err)\n\tcase *proto.OffsetFetchReq:\n\t\treturn createOffsetFetchResponse(val, err)\n\tcase nil:\n\t\treturn nil, fmt.Errorf(\"unsupported request API key %d\", req.kind)\n\tdefault:\n\t\t// The switch cases above must correspond exactly to the switch cases\n\t\t// in ReadRequest.\n\t\tlogrus.Panic(fmt.Sprintf(\"Kafka API key not handled: %d\", req.kind))\n\t}\n\treturn nil, nil\n}", "func NewResponse() *Response {\n\n\tresp := http.Response{\n\t\tHeader: http.Header{},\n\t}\n\treturn &Response{\n\t\tResp: &resp,\n\t}\n}", "func NewResponse(statusCode int16, reqID int32) *Response {\n\treturn &Response{\n\t\tProtocolVersionMajor: ProtocolVersionMajor,\n\t\tProtocolVersionMinor: ProtocolVersionMinor,\n\t\tStatusCode: statusCode,\n\t\tRequestId: reqID,\n\t\tOperationAttributes: make(Attributes),\n\t\tPrinterAttributes: make([]Attributes, 0),\n\t\tJobAttributes: make([]Attributes, 0),\n\t}\n}", "func CreateResponse() *application.HTTPResponse {\n\tresponse := &application.HTTPResponse{}\n\tresponse.Headers = make(map[string]*application.HTTPResponse_HTTPHeaderParameter)\n\treturn response\n}", "func NewResponse(err error, statusCode int) *Response {\n\treturn &Response{\n\t\tError: err,\n\t\tStatusCode: statusCode,\n\t}\n}", "func ParseCreateInstanceResponse(rsp *http.Response) (*CreateInstanceResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &CreateInstanceResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tvar dest Operation\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON200 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func NewResponse() (*Response) {\n\tr := new(Response)\n\tr.TableNames = make([]string,0)\n\treturn r\n}", "func NewResponse(status int, data json.RawMessage, err error) Response {\n\terrs := \"\"\n\n\tif err != nil {\n\t\terrs = err.Error()\n\t}\n\n\treturn Response(JSONResponse{status: status, Body: &data, Err: errs})\n}", "func NewResp(r *ghttp.Request, code int, msg string, data ...interface{}) *apiResp {\n\tvar d interface{}\n\tif len(data) > 0 {\n\t\td = data[0]\n\t}\n\n\treturn &apiResp{\n\t\tResp: Resp{\n\t\t\tCode: code,\n\t\t\tMsg: msg,\n\t\t\tData: d,\n\t\t},\n\t\tr: r,\n\t}\n}", "func NewResponse() *Response {\n\tr := &Response{}\n\treturn r\n}", "func BuildResponse(data interface{}, statusCode int, msg string, tid string) Response {\n\tstatus := OK\n\n\tif statusCode >= 400 {\n\t\tstatus = ERROR\n\t}\n\n\tresponse := Response{\n\t\tMeta: Meta{\n\t\t\tStatus: status,\n\t\t\tStatusCode: statusCode,\n\t\t\tMessage: msg,\n\t\t\tTransactionID: tid,\n\t\t},\n\t\tData: data,\n\t}\n\n\treturn response\n}", "func NewResponse(c echo.Context, success bool, code int, message string, content echo.Map) Response {\n\t// no custom context defined, returns basic response api\n\treturn NewResponseAPI(success, code, message, content)\n}", "func NewResponse(content ResponseContent, req *http.Request, opts *ResponseOptions) (*http.Response, error) {\n\tresp, err := exported.NewResponse(content, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif opts != nil {\n\t\tif opts.Body != nil {\n\t\t\tresp.Body = opts.Body\n\t\t}\n\t\tif opts.ContentType != \"\" {\n\t\t\tresp.Header.Set(shared.HeaderContentType, opts.ContentType)\n\t\t}\n\t}\n\treturn resp, nil\n}", "func newErrResponse(code int, err error) *ErrResponse {\n\treturn &ErrResponse{Code: code, Err: err}\n}", "func NewResponse(payload []byte) (*Response, error) {\n\tvar err error\n\tvar resp *Response = &Response{}\n\n\tif err = json.Unmarshal(payload, resp); err != nil {\n\t\tlog.Fatalln(\"Error on JSON marchal:\", err)\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func NewResponse(status int, message, errCode string) *Response {\n\treturn &Response{\n\t\tStatus: status,\n\t\tMessage: message,\n\t\tErrorCode: errCode,\n\t}\n}", "func NewResponse(statusCode int, data interface{}, headers map[string]string) *Response {\n\treturn &Response{\n\t\tStatusCode: statusCode,\n\t\tData: data,\n\t\tHeaders: headers,\n\t}\n}", "func NewResponse() (*Response) {\n\tr := new(Response)\n\tr.TableDescription.KeySchema = make(ep.KeySchema,0)\n\tr.TableDescription.LocalSecondaryIndexes = make([]ep.LocalSecondaryIndexDesc,0)\n\tr.TableDescription.GlobalSecondaryIndexes = make([]ep.GlobalSecondaryIndexDesc,0)\n\treturn r\n}", "func CreateResponse(resultCode uint32, internalCommand []byte) ([]byte, error) {\n\t// Response frame:\n\t// - uint32 (size of response)\n\t// - []byte (response)\n\t// - uint32 (code)\n\tvar buf bytes.Buffer\n\n\tif err := binary.Write(&buf, binary.BigEndian, uint32(len(internalCommand))); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif _, err := buf.Write(internalCommand); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := binary.Write(&buf, binary.BigEndian, resultCode); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func New(w http.ResponseWriter, status int, data interface{}) error {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\n\treturn json.NewEncoder(w).Encode(data)\n}", "func NewResponse(raw []byte) (res *Response, err error) {\n\tif len(raw) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// The minimum length is 3 + CRLF\n\tif len(raw) < 5 {\n\t\treturn nil, errors.New(\"invalid response length\")\n\t}\n\n\tres = &Response{}\n\n\tcode, isMultiline, err := res.parseCode(raw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar parser = libbytes.NewParser(raw[4:], []byte{'-', ' ', '\\n'})\n\n\terr = res.parseMessage(parser, isMultiline)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !isMultiline {\n\t\treturn res, nil\n\t}\n\n\terr = res.parseBody(parser, code)\n\n\treturn res, err\n}", "func NewResponse() *Response {\n\tresponse := Response{\n\t\tHeaders: make(map[string][]string),\n\t\tSelectors: make(map[string][]*element),\n\t}\n\treturn &response\n}", "func TestNewFailResponse(t *testing.T) {\n\t// Create without format.\n\tr := NewFailResponse(\"XXX\")\n\tif r.Status != NAK {\n\t\tt.Fatalf(\"Unexpected status. Expected: %s - Found: %s.\", NAK, r.Status)\n\t}\n\tif r.Message != \"XXX\" {\n\t\tt.Fatalf(\"Unexpected message. Expected: %s - Found: %s.\", \"XXX\", r.Message)\n\t}\n\t// Create with format.\n\tr = NewFailResponsef(\"the%s\", \"message\")\n\tif r.Status != NAK {\n\t\tt.Fatalf(\"Unexpected status. Expected: %s - Found: %s.\", NAK, r.Status)\n\t}\n\tif r.Message != \"themessage\" {\n\t\tt.Fatalf(\"Unexpected message. Expected: %s - Found: %s.\", \"themessage\", r.Message)\n\t}\n}", "func NewResponse() (*Response, error) {\n\treturn &Response{\n\t\tMoves: make([]*ResponseMove, 0, 10),\n\t}, nil\n}", "func newResult(resp *internal.Response) Result {\n\treturn &result{resp: resp}\n}", "func buildResponse(rawResponse *http.Response, err error) *Response {\n\treturn &Response{\n\t\tResponse: rawResponse,\n\t\tError: err,\n\t}\n}", "func decodeCreateResponse(_ context.Context, reply interface{}) (interface{}, error) {\n\tresp, found := reply.(*pb.CreateReply)\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"pb CreateReply type assertion error\")\n\t}\n\treturn endpoint1.CreateResponse{E1: nil, UUID: resp.Uuid}, nil\n}", "func (p *Processor) newResponse(load Load, accepted bool) *Response {\n\tr := Response{\n\t\tID: load.ID,\n\t\tCustomerID: load.CustomerID,\n\t\tAccepted: accepted,\n\t}\n\n\t// store load into processed Loads\n\tp.processedLoads[load.ID] = r\n\n\treturn &r\n}", "func NewBindResponse(i interface{}, description string) *spec.Response {\n\tresp := new(spec.Response)\n\tresp.Description = description\n\n\tt := reflect.TypeOf(i)\n\n\tif t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t}\n\n\tslice := false\n\tif t.Kind() == reflect.Slice {\n\t\tslice = true\n\t\tt = t.Elem()\n\t}\n\n\tif t.Kind() == reflect.Struct {\n\t\tschema := spec.RefSchema(\"#/definitions/\" + t.Name())\n\t\tif slice {\n\t\t\tresp.WithSchema(spec.ArrayProperty(schema))\n\t\t} else {\n\t\t\tresp.WithSchema(schema)\n\t\t}\n\t}\n\n\treturn resp\n}", "func (c *DNSBLResponseClient) Create() *DNSBLResponseCreate {\n\tmutation := newDNSBLResponseMutation(c.config, OpCreate)\n\treturn &DNSBLResponseCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}\n}", "func ResponseFromStruct(w http.ResponseWriter, data []byte) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}", "func CreateGetOpenNLUResponse() (response *GetOpenNLUResponse) {\n\tresponse = &GetOpenNLUResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (self *Operation) Response(code int) *Response {\n\tif self.Responses[code] == nil {\n\t\tself.Responses[code] = &Response{\n\t\t\tHeaders: map[string]*Header{},\n\t\t}\n\t}\n\treturn self.Responses[code]\n}", "func CreateModifyDomainResponse() (response *ModifyDomainResponse) {\n\tresponse = &ModifyDomainResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateRegisterLineageRelationResponse() (response *RegisterLineageRelationResponse) {\n\tresponse = &RegisterLineageRelationResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateUpdateSpeechModelResponse() (response *UpdateSpeechModelResponse) {\n\tresponse = &UpdateSpeechModelResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (ccr ContainersCreateResponse) Response() *http.Response {\n\treturn ccr.rawResponse\n}", "func CreateValuateTemplateResponse() (response *ValuateTemplateResponse) {\n\tresponse = &ValuateTemplateResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func decodeCreateResponse(_ context.Context, reply interface{}) (interface{}, error) {\n\treturn nil, errors.New(\"'Users' Decoder is not impelemented\")\n}", "func NewResponse(body []byte, rawResponse *http.Response) *Response {\n\treturn &Response{\n\t\tbody: body,\n\t\trawResponse: rawResponse,\n\t}\n}", "func NewReqRespStruct(c *gin.Context, kind string) TReqResp {\n\treturn TReqResp{c: c, Kind: kind}\n}", "func NewGenericResponse(status int, body interface{}) *GenericResponse {\n\treturn &GenericResponse{\n\t\tbody: body,\n\t\tstatus: status,\n\t}\n}", "func newTimeoutResponse(id string) *Response {\n\treturn &Response{\n\t\tID: id,\n\t\tError: &HTTPResponse{\n\t\t\tRawStatus: http.StatusGatewayTimeout,\n\t\t\tRawBody: map[string]interface{}{},\n\t\t},\n\t}\n}", "func NewResponse(expectedCode int, expectedJSONBody string) *Response {\n\treturn &Response{\n\t\texpectedCode: expectedCode,\n\t\texpectedBody: expectedJSONBody,\n\t\trecorder: httptest.NewRecorder(),\n\t}\n}", "func newEmptyResponse() *Response {\n\treturn &Response{\n\t\tBody: &HTTPResponse{},\n\t\tError: &HTTPResponse{},\n\t}\n}", "func NewResponse(resp *http.Response) (*Response, error) {\n\tjson, err := ParseJSON(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Response{\n\t\tCode: resp.StatusCode,\n\t\tBody: json,\n\t}, err\n}" ]
[ "0.69555134", "0.6907398", "0.686758", "0.686758", "0.686758", "0.6756983", "0.672033", "0.6685266", "0.66744035", "0.66368026", "0.6636733", "0.66237515", "0.6606498", "0.6605905", "0.6589414", "0.6578238", "0.6569864", "0.65480584", "0.6515209", "0.65057844", "0.6501976", "0.64916736", "0.64837533", "0.6480729", "0.64663494", "0.64662373", "0.64473075", "0.640449", "0.64030963", "0.6400951", "0.63841224", "0.63786364", "0.6341857", "0.6327901", "0.6316673", "0.629924", "0.62422466", "0.62340295", "0.6225986", "0.62180126", "0.6212624", "0.62121713", "0.62068695", "0.6189868", "0.6177229", "0.6152878", "0.6147144", "0.61298054", "0.6116068", "0.61014575", "0.60946965", "0.6088342", "0.6081525", "0.6046872", "0.6046441", "0.6019917", "0.6008689", "0.6007527", "0.5997901", "0.5989298", "0.5981786", "0.5967867", "0.5957954", "0.5951669", "0.5933472", "0.59224665", "0.58896464", "0.5887017", "0.58802056", "0.587906", "0.58776015", "0.58698905", "0.5849628", "0.58412", "0.5840901", "0.58394027", "0.5833636", "0.5812234", "0.5801564", "0.57993245", "0.5796251", "0.57942194", "0.5794149", "0.5792157", "0.5790696", "0.5790023", "0.5766861", "0.57653993", "0.57540053", "0.5752741", "0.5752367", "0.5746193", "0.5741942", "0.57361484", "0.57337475", "0.5729041", "0.57194495", "0.570032", "0.5689587", "0.56887525" ]
0.66314137
11
Serve sets up the server and listens for requests
func Serve(ctx context.Context, serviceName string, options ...ServerOption) error { var err error // Setup the server options serverOptions := &serverOptions{ serviceName: serviceName, log: log.WithField("service", serviceName), } options = append([]ServerOption{ WithServerConfig(ServerConfig{ Bind: "0.0.0.0", Listen: 5000, TLS: TLSConfig{}, }), WithHealthCheck(nil), WithPrometheusMetrics(), }, options...) // Process all server options (which may override any of the above) for _, option := range options { err = option.apply(ctx, serverOptions) if err != nil { return err } } handlers := &handlerHeap{} heap.Init(handlers) for _, option := range options { err = option.addHandler(ctx, serverOptions, handlers) if err != nil { return err } } // Create the HTTP server mux := http.NewServeMux() for handlers.Len() > 0 { pair := heap.Pop(handlers).(*handlerPair) serverOptions.log.WithField("endpoint", pair.pattern).Info("adding handler") mux.Handle(pair.pattern, pair.handler) } w := log.Writer() defer w.Close() // Start listening serverOptions.log.Info("listening") ln, err := net.Listen("tcp", serverOptions.addr) if err != nil { return err } // Serve requests if serverOptions.config.GetTLS().GetEnabled() { serverOptions.log.Trace("loading server tls certs") config, err := NewServerTLSConfig(serverOptions.config.GetTLS(), serverOptions.vault) if err != nil { ln.Close() return err } ln = tls.NewListener(ln, config) } defer ln.Close() serverOptions.log.Info("serving") srv := &http.Server{ Addr: serverOptions.addr, Handler: mux, ErrorLog: syslog.New(w, "[http]", 0), } return srv.Serve(ln) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func serve() error {\n\n\trouter := configureRoutes()\n\n\thttp.Handle(\"/\", router)\n\n\t// Define port and set to default if environment variable is not set\n\tport := PORT\n\tif len(os.Getenv(\"GO_PORT\")) > 0 {\n\t\tport = os.Getenv(\"GO_PORT\")\n\t}\n\n\tlogger.Info(\"Initiating HTTP Server on Port %v\", port)\n\treturn (http.ListenAndServe(port, router))\n}", "func serve(svr *http.Server) {\n\tlog.Info(\"accepting connections\", zap.String(\"addr\", config.Bind))\n\tif err := svr.ListenAndServe(); err != nil {\n\t\tlog.Fatal(\"error serving requests\", zap.Error(err))\n\t}\n}", "func serve(app *app.CallMe) {\n\thandlers.Register(app)\n\n\tapp.Logger.Info(\n\t\t\"Ready to ListenIP\",\n\t\tzap.Int(\"ListenPort\", app.ListenPort),\n\t\tzap.String(\"IP\", app.ListenIP),\n\t)\n\n\tlistenOn := fmt.Sprintf(\"%s:%d\", app.ListenIP, app.ListenPort)\n\terr := http.ListenAndServe(listenOn, nil)\n\tif err != nil {\n\t\tapp.Logger.Error(\"Server error\", zap.Error(err))\n\t}\n}", "func (s *Server) Serve() error {\n\tloadTemplate()\n\thttp.HandleFunc(\"/watch\", s.watchChanges)\n\thttp.HandleFunc(\"/\", s.home)\n\treturn http.ListenAndServe(\":5152\", nil)\n}", "func (s *Server) Serve() {\n\trouter := chi.NewRouter()\n\n\t// Middlewares\n\trouter.Use(middleware.RequestID)\n\trouter.Use(middleware.RealIP)\n\trouter.Use(middleware.Recoverer)\n\trouter.Use(HTTPLog)\n\trouter.Use(NewCORS())\n\n\trouter.HandleFunc(\"/ping\", s.Ping)\n\n\trouter.Get(\"/docs/swagger/swagger.json\", func(w http.ResponseWriter, r *http.Request) {\n\t\tdoc, err := docs.InitSwag()\n\t\tif err != nil {\n\t\t\tif _, err := w.Write([]byte(fmt.Sprintf(\"failed to parse json file: %v\", err))); err != nil {\n\t\t\t\ts.Log.WithField(\"err\", err).Error(\"failed to write http response\")\n\t\t\t}\n\t\t}\n\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tif _, err := w.Write(doc); err != nil {\n\t\t\ts.Log.WithField(\"err\", err).Error(\"failed to write http response\")\n\t\t}\n\t})\n\n\trouter.Get(\"/docs/swagger/*\", httpSwagger.Handler(httpSwagger.URL(\"swagger.json\")))\n\trouter.HandleFunc(\"/result\", s.Result)\n\n\terrChan := make(chan error, 1)\n\tdefer close(errChan)\n\n\tgo func() {\n\t\tc := make(chan os.Signal, 1)\n\t\tsignal.Notify(c, syscall.SIGINT, syscall.SIGHUP, syscall.SIGTERM, syscall.SIGQUIT)\n\t\terrChan <- fmt.Errorf(\"%s\", <-c)\n\t}()\n\n\tgo func() {\n\t\tport := viper.GetString(\"PORT\")\n\t\tif port == \"\" {\n\t\t\tport = \"80\"\n\t\t}\n\t\ts.Log.WithFields(logrus.Fields{\"transport\": \"http\", \"state\": \"listening\"}).Info(\"http init\")\n\t\terrChan <- http.ListenAndServe(\":\"+port, router)\n\t}()\n\n\ts.Log.WithFields(logrus.Fields{\"transport\": \"http\", \"state\": \"terminated\"}).Error(<-errChan)\n\tos.Exit(1)\n}", "func (s *Server) Serve(port string) error {\n\t// init all API\n\tshortenerApi.Init(s.shortenerSrv)\n\tredirectorApi.Init(s.shortenerSrv, s.statisticSrv)\n\tstatisticApi.Init(s.shortenerSrv, s.statisticSrv)\n\n\tlis, err := net.Listen(\"tcp4\", port)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.server.Serve(lis)\n}", "func (s *Server) Serve() error {\n\tif !s.isInitialized {\n\t\treturn ErrNotInitialized\n\t}\n\n\tif s.options.port == 0 {\n\t\ts.options.port = 7777\n\t}\n\n\thost := net.JoinHostPort(\"\", strconv.Itoa(s.options.port))\n\tlog.Println(\"Serve ProjectShareAPI on \" + host)\n\terr := http.ListenAndServe(host, s.router)\n\treturn err\n}", "func serve() {\n\tdb, err := repository.NewSqlite3(\"./tmp/tmp.db\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to start server: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tr := router.New(db)\n\tport := 8080\n\tfmt.Fprintf(os.Stdout, \"start to run http server at port %d\\n\", port)\n\tif err := http.ListenAndServe(fmt.Sprintf(\":%d\", port), r); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to start server: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func ListenAndServe(ctx context.Context, bin, address, port string) {\n\tfmt.Println(`\n\n███████╗███████╗██╗ ███████╗ ███████╗███████╗████████╗███████╗███████╗███╗ ███╗\n██╔════╝██╔════╝██║ ██╔════╝ ██╔════╝██╔════╝╚══██╔══╝██╔════╝██╔════╝████╗ ████║\n███████╗█████╗ ██║ █████╗█████╗█████╗ ███████╗ ██║ █████╗ █████╗ ██╔████╔██║\n╚════██║██╔══╝ ██║ ██╔══╝╚════╝██╔══╝ ╚════██║ ██║ ██╔══╝ ██╔══╝ ██║╚██╔╝██║\n███████║███████╗███████╗██║ ███████╗███████║ ██║ ███████╗███████╗██║ ╚═╝ ██║\n╚══════╝╚══════╝╚══════╝╚═╝ ╚══════╝╚══════╝ ╚═╝ ╚══════╝╚══════╝╚═╝ ╚═╝`)\n\tlog.Info(ctx, \"server listening\", \"bin\", bin, \"address\", address, \"port\", port)\n\thttp.ListenAndServe(fmt.Sprintf(\"%s:%s\", address, port), mux)\n}", "func (s *Service) Serve() {\n\thttp.ListenAndServe(\":3333\", s.r)\n}", "func serve() {\n\t// run the broker\n\tgo broker.Run()\n\n\t// Establish CORS parameters\n\tc := cors.New(cors.Options{\n\t\tAllowedOrigins: allowedOrigins,\n\t\tAllowedMethods: []string{\"GET\", \"POST\", \"DELETE\", \"OPTIONS\"},\n\t\tAllowedHeaders: []string{\"Content-Type\", \"Authorization\"},\n\t})\n\n\tport := os.Getenv(\"PORT\")\n\tif len(port) == 0 {\n\t\tport = \"3001\"\n\t}\n\n\tlog.Println(\"listening on port\", port)\n\n\t// Start HTTP server\n\tlog.Fatal(http.ListenAndServe(\":\"+port, c.Handler(\n\t\tregisterRoutes(middleware.InitializeMiddleware(jwtmiddleware.FromParameter(\"access_token\"))),\n\t)))\n}", "func Serve() {\n\trouter := createMuxRouter()\n\thttp.Handle(\"/\", router)\n\n\tsrv := &http.Server{\n\t\tHandler: router,\n\t\tAddr: \"127.0.0.1:\" + os.Getenv(\"PORT\"),\n\t\tWriteTimeout: 5 * time.Second,\n\t\tReadTimeout: 5 * time.Second,\n\t}\n\n\tlog.Printf(\"Server running on %s:%s\\n\", \"127.0.0.1\", os.Getenv(\"PORT\"))\n\tlog.Fatal(srv.ListenAndServe())\n}", "func (srv *Server) Serve() error {\n\ts := srv.s\n\tif s == nil {\n\t\treturn fmt.Errorf(\"Serve() failed: not initialized\")\n\t}\n\treturn srv.s.Serve(srv.lis)\n}", "func Serve() error {\n\tclientConnectionCount = metrics.RegisterGauge(\"client_connection_count\")\n\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"0.0.0.0:34601\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tlistener, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogrus.Infof(\"start server listen: %s\", listener.Addr())\n\tfor {\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"connection accept: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tclientConnectionCount.Inc()\n\t\t// TODO(wutao): add connections management\n\n\t\t// use one goroutine per connection\n\t\tgo serveConn(conn, conn.RemoteAddr().String())\n\t}\n}", "func (g *Server) Serve() {\n\n\taddr := g.Config.Port\n\tfmt.Printf(\"======= Server start to listen (%s) and serve =======\\n\", addr)\n\n\trouter := api.Router()\n\tif g.Config.GinMode != \"\" {\n\t\tgin.SetMode(g.Config.GinMode)\n\t}\n\n\trouter.Run(addr)\n\t//router.Run(\"localhost:8081\")\n\t\n\t//fmt.Println(router.ListenAndServe(\"addr\", nil))\n}", "func serve(config *config.Config) {\n\trouter := gin.Default()\n\n\t// Set the config in our handlers to give them access to server configuration\n\thandlers.SetConfig(config)\n\n\t// Initialize our routes to point to our handlers\n\tapi := router.Group(config.Server.APIPrefix)\n\tapi.GET(\"/ping\", handlers.PingHandler)\n\tapi.GET(\"/posts\", handlers.PostsHandler)\n\n\t// Configure the HTTP server\n\tserver := &http.Server {\n\t\tAddr: config.Server.Address,\n\t\tHandler: router,\n\t}\n\n\t// Start the HTTP server\n\tlog.Println(\"Starting HatchWays API Server\")\n\tif err := server.ListenAndServe(); err != nil {\n\t\tlog.Fatal(\"Error starting HatchWays API Server: \" + err.Error())\n\t}\n\n}", "func Serve(cfg Config) error {\n\tlog.Infof(\"starting http server on %s\", cfg.Listen)\n\thttp.Handle(\"/static/\", http.StripPrefix(\"/static/\", http.FileServer(http.Dir(cfg.Static))))\n\thttp.Handle(\"/ws\", websocket.Handler(wsHandler))\n\thttp.Handle(\"/\", http.Handler(withConfig(cfg, indexHandler)))\n\treturn http.ListenAndServe(cfg.Listen, nil)\n}", "func Serve(ctx context.Context) {\n\n\twriteTimeout := 15 * time.Second\n\treadTimeout := 15 * time.Second\n\taddress := fmt.Sprintf(\"%s:%d\", inet, port)\n\tlog := log.With(zap.String(\"address\", address))\n\tsrv := &http.Server{\n\t\tHandler: makeRouter(),\n\t\tAddr: address,\n\t\t// Good practice: enforce timeouts for servers you create!\n\t\tWriteTimeout: writeTimeout,\n\t\tReadTimeout: readTimeout,\n\t}\n\n\tlog.Info(\"Started\")\n\texited := make(chan bool)\n\n\tgo func() {\n\t\terr := srv.ListenAndServe()\n\t\tif err != nil {\n\t\t\tlog.With(zap.Error(err)).Fatal(\"Exited\")\n\t\t}\n\t\tclose(exited)\n\t}()\n\n\t<-ctx.Done()\n\tlog.Info(\"Context cancled\")\n\tsrv.Close()\n\tlog.Info(\"Server closed, listenAndServe will exit soon\")\n\n\t<-exited\n\tlog.Info(\"Server exited\")\n}", "func main() {\n\tlog.Printf(\"listening on %s and serving files from %s\\n\", port, dir)\n\thttp.ListenAndServe(port, server.Handler(dir))\n}", "func (srv *server) Serve() {\n\tfor {\n\t\tcli, err := srv.l.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Closing server:\", err)\n\t\t\tbreak // on srv.l.Close\n\t\t}\n\t\tgo srv.handle(newConn(cli))\n\t}\n}", "func (s *Server) serve(lis net.Listener) {\n\ts.wg.Add(1)\n\tgo func() {\n\t\tlog.Infof(\"Listening on %s\", lis.Addr())\n\t\terr := s.httpServer.Serve(lis)\n\t\tlog.Tracef(\"Finished serving RPC: %v\", err)\n\t\ts.wg.Done()\n\t}()\n}", "func (s *Server) Serve() {\n\tport := os.Getenv(\"HTTP_PORT\")\n\tif port == \"\" {\n\t\tport = \"7777\"\n\t}\n\n\tlocalhost = \"localhost:\" + port\n\n\tif err := http.ListenAndServe(\":\"+port, handlers.RecoveryHandler()(s.Mux)); err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n}", "func (manager *Manager) Serve(port string, httpHandler func()) {\n\tserver := manager.server\n\n\tgo server.Serve()\n\n\tlogger.Debug(\"Initializing fab.io...\")\n\n\thttp.Handle(\"/socket.io/\", server)\n\n\tif httpHandler != nil {\n\t\thttpHandler()\n\t}\n\n\tlogger.Debug(\"Starting Socket Server...\")\n\n\thttp.ListenAndServe(fmt.Sprintf(\":%v\", port), nil)\n}", "func (s *Service) serve() {\n\tvar err error\n\tif s.tls {\n\t\tlog.Printf(\" > httpd https://%s\", s.addr)\n\t\terr = s.ln.ListenAndServeTLS(s.certFile, s.keyFile)\n\t} else {\n\t\tlog.Printf(\" > httpd http://%s\", s.addr)\n\t\terr = s.ln.ListenAndServe()\n\t}\n\tif err != nil && !strings.Contains(err.Error(), \"closed\") {\n\t\ts.err <- fmt.Errorf(\"httpd http://%s\\n%s\", s.addr, err)\n\t}\n\t<-s.shutdownChan\n}", "func (srv *Server) Serve() (err error) {\n\tsrv.state = StateRunning\n\tdefer func() { srv.state = StateTerminate }()\n\n\t// 主动重启导致的错误为ErrReloadClose\n\tif err = srv.serve(); err != nil && err != ErrReloadClose {\n\t\tlog.Println(syscall.Getpid(), \"Server.Serve() error:\", err)\n\t\treturn err\n\t}\n\n\tlog.Println(syscall.Getpid(), srv.ln.Addr(), \"Listener closed.\")\n\t// wait for Shutdown to return\n\treturn <-srv.terminalChan\n}", "func serve(c *cli.Context) (err error) {\n\t// Create server configuration\n\tvar conf config.Config\n\tif conf, err = config.New(); err != nil {\n\t\treturn cli.Exit(err, 1)\n\t}\n\n\t// Update from CLI flags\n\tif addr := c.String(\"addr\"); addr != \"\" {\n\t\tconf.BindAddr = addr\n\t}\n\n\t// Create and run the whisper server\n\tvar server *whisper.Server\n\tif server, err = whisper.New(conf); err != nil {\n\t\treturn cli.Exit(err, 1)\n\t}\n\n\tif err = server.Serve(); err != nil {\n\t\treturn cli.Exit(err, 1)\n\t}\n\treturn nil\n}", "func (s *Server) Serve(port string) {\n\thttp.HandleFunc(\"/\", Router)\n\terr := http.ListenAndServe(\":\"+port, nil)\n\tlog.Println(err)\n}", "func main() {\n\n\tlog.Printf(\"Server started\")\n\n\trouter := sw.NewRouter()\n\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tport = \"5000\"\n\t}\n\n\theadersOk := handlers.AllowedHeaders([]string{\"X-Requested-With\", \"Content-Type\"})\n\toriginsOk := handlers.AllowedOrigins([]string{\"*\"})\n\tmethodsOk := handlers.AllowedMethods([]string{\"GET\", \"HEAD\", \"POST\", \"PUT\", \"OPTIONS\"})\n\n\tlog.Fatal(http.ListenAndServe(\":\"+port, handlers.CORS(originsOk, headersOk, methodsOk)(router)))\n}", "func Serve() {\n\tport := helpers.GeneratePortNumber()\n\tlog.Println(\"We are running on port: \" + strconv.Itoa(port))\n\tlog.Fatal(http.ListenAndServe(\":\"+strconv.Itoa(port), http.FileServer(http.Dir(\".\"))))\n}", "func (a *App) Serve() error {\n\ta.ctx.Trace(\"lego.serve\", \"Start serving...\")\n\n\terr := a.handlers.Serve()\n\tif err != nil {\n\t\ta.ctx.Error(\"lego.serve.error\", \"Error with handler.Serve (%s)\",\n\t\t\tlog.Error(err),\n\t\t)\n\t\treturn err\n\t}\n\n\t// Notify all callees that the app is up and running\n\ta.ready.Broadcast()\n\n\t<-a.done // Hang on\n\treturn nil\n}", "func (srv *Server) Serve(l net.Listener) error", "func (s *Server) Serve() (err error) {\n\ts.serverListener, err = net.Listen(\"tcp\", fmt.Sprint(\":\", s.ServerPort))\n\tif err != nil {\n\t\ts.log(err)\n\t\treturn err\n\t}\n\ts.clientListener, err = net.Listen(\"tcp\", fmt.Sprint(\":\", s.ClientPort))\n\tif err != nil {\n\t\ts.log(err)\n\t\treturn err\n\t}\n\n\tgo s.handleConn(s.serverListener)\n\tgo s.handleConn(s.clientListener)\n\n\treturn nil\n}", "func Serve(bindAddress string, port int) {\n\thttp.HandleFunc(\"/healthz\", handler.Healthz)\n\tlistenAddress := fmt.Sprintf(\"%s:%d\", bindAddress, port)\n\tgo http.ListenAndServe(listenAddress, nil)\n\tlogrus.Infof(\"Control plane coordinator HTTP server started (serving on %s)\", listenAddress)\n}", "func (p *ConcurrentServer) Serve() error {\n\terr := p.Listen()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn p.AcceptLoop()\n}", "func (manager *Manager) Serve(port string, httpHandler func()) {\n\tlogger.Debug(\"Initializing fab.io...\")\n\n\tserver := manager.server\n\n\thttp.Handle(\"/socket.io/\", corsMiddleware(server))\n\n\tif httpHandler != nil {\n\t\thttpHandler()\n\t}\n\n\tgo server.Serve()\n\n\tlogger.Debug(\"Starting Socket Server @ %v...\", port)\n\n\thttp.ListenAndServe(fmt.Sprintf(\":%v\", port), nil)\n}", "func main() {\n\tln, err := net.Listen(\"tcp\", \":8888\")\n\tif err != nil {\n\t\t// handle the error, e.g. `log.Fatal(err)`\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(\"Listening on \", ln.Addr())\n\tfor {\n\t\tc, err := ln.Accept()\n\t\tif err == nil {\n\t\t\t// do something with `c`\n\t\t\tfmt.Println(\"Connection: \", c)\n\t\t\t// Start goroutines by prepending the `go` keyword to call the serve function\n\t\t\tgo serve(c)\n\t\t}\n\t}\n}", "func (ws *WebServer) Serve(apiHandler APIHandler) {\n\tworkDir, _ := os.Getwd()\n\t//Allow cross-origin requests in non-production environment\n\tws.Router.Use(apiHandler.AllowCrossOrigin)\n\n\tdistDir := filepath.Join(workDir, \"/frontend/dist\")\n\tws.Router.Get(\"/*\", vueServer(distDir, apiHandler.Production))\n\n\tuploadsDir := filepath.Join(workDir, \"/uploads\")\n\tfileServer(ws.Router, \"/uploads\", http.Dir(uploadsDir))\n\n\tws.Router.Route(\"/api\", func(r chi.Router) {\n\t\tAPIRouter(r, apiHandler)\n\t})\n\n\tif *routes {\n\t\t// fmt.Println(docgen.JSONRoutesDoc(r))\n\t\tfmt.Println(docgen.MarkdownRoutesDoc(ws.Router, docgen.MarkdownOpts{\n\t\t\tProjectPath: \"github.com/jpoles1/root-cellar\",\n\t\t\tIntro: \"Welcome to the Root Cellar router docs.\",\n\t\t}))\n\t\treturn\n\t}\n\tif ws.BindPort != \"test\" && ws.BindIP != \"test\" {\n\t\tcolor.Green(\"Starting Web server on port: %s\", ws.BindPort)\n\t\tcolor.Green(\"Access the web server at: http://%s:%s\", ws.BindIP, ws.BindPort)\n\t\tlog.Fatal(http.ListenAndServe(ws.BindIP+\":\"+ws.BindPort, ws.Router))\n\t\tfmt.Println(\"Terminating TransitSign Web Server...\")\n\t}\n}", "func (srv *Server) Serve(addr string) error {\n\tln, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsrv.ln = ln\n\tdefer srv.Close()\n\thandler := http.NewServeMux()\n\thandler.Handle(\"/\", srv)\n\thttp.Serve(ln, handler)\n\treturn nil\n}", "func Serve() {\n\thttp.Handle(\"/\", Handler())\n}", "func Serve(port *string, db *store.Client) {\n\tlog.Printf(\"Starting server, listening on port %s \\n\", *port)\n\thttp.HandleFunc(\"/\", rootHandler)\n\thttp.HandleFunc(sendgrid.SGSend, sendgrid.Send(sendgrid.NewSendGrid(db)))\n\thttp.HandleFunc(sendgrid.SGAll, sendgrid.All(sendgrid.NewSendGrid(db)))\n\thttp.HandleFunc(sendgrid.SGHtmlByID, sendgrid.HTMLByID(sendgrid.NewSendGrid(db)))\n\thttp.HandleFunc(sendgrid.SGTxtByID, sendgrid.TXTByID(sendgrid.NewSendGrid(db)))\n\thttp.HandleFunc(sendgrid.SGUrlByID, sendgrid.LinkByID(sendgrid.NewSendGrid(db)))\n\thttp.ListenAndServe(fmt.Sprintf(\":%s\", *port), nil)\n}", "func (s *Server) Serve(ctx context.Context) error {\n\tif err := s.healthPoller.Wait(ctx); err != nil {\n\t\treturn fmt.Errorf(\"failed to wait for health checks: %w\", err)\n\t}\n\ts.logger.For(ctx).Info(\"starting to serve events\")\n\treturn s.service.Run(ctx)\n}", "func (srv *Server) serve() error {\n\t// The server will run until an error is encountered or the listener is\n\t// closed, via either the Close method or by signal handling. Closing the\n\t// listener will result in the benign error handled below.\n\terr := srv.apiServer.Serve(srv.listener)\n\tif err != nil && !strings.HasSuffix(err.Error(), \"use of closed network connection\") {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (sw *SimpleWebServer) Serve() error {\n\tif sw.running {\n\t\treturn fmt.Errorf(\"already running\")\n\t}\n\tsw.running = true\n\tgo func() {\n\t\t_ = sw.ListenAndServe()\n\t}()\n\n\treturn nil\n}", "func Serve() {\n\ttemplates = template.Must(template.ParseGlob(\"static/*.html\"))\n\trouter := mux.NewRouter()\n\n\t//router.PathPrefix(\"/site/\").Handler(http.StripPrefix(\"/site/\", fs))\n\trouter.HandleFunc(\"/\", ServeUp)\n\n\tlog.Fatal(http.ListenAndServe(\":8000\", router))\n\tfmt.Println(\"listening on port 8000\")\n\n}", "func Serve(how, addr string, acceptHandler SockHandler) error {\n\ts, err := Listen(how, addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.AcceptHandler = acceptHandler\n\treturn s.Accept()\n}", "func runServer() {\n\t// listen and serve on 0.0.0.0:8080 (for windows \"localhost:8080\")\n\tlog.Fatalln(router.Run(fmt.Sprintf(\":%s\", env.AppPort)))\n}", "func (s *HTTPServer) Serve() error {\n\treturn s.srv.Serve(s.l)\n}", "func (server *Server) Run(addr string) {\n\tlog.Println(\"Yinyo is ready and waiting.\")\n\tlog.Fatal(http.ListenAndServe(addr, server.router))\n}", "func main() {\n\thttp.ListenAndServe(\"127.0.0.1:8080\", NewServer())\n}", "func Serve(root, iface string, port int) (err error) {\n\t// Root the path, and clean it if necessary.\n\n\t// 18/01/2013 It might make sense to move this to a helper routine\n\t// or further up in the stack.\n\tif !path.IsAbs(root) {\n\t\tvar wd string\n\t\twd, err = os.Getwd()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\troot = path.Join(wd, root)\n\t} else {\n\t\troot = path.Clean(root)\n\t}\n\tl.Printf(\"Starting http server %s:%d\\nRoot path %q\", iface, port, root)\n\trootpath = root\n\n\thttp.HandleFunc(\"/p/\", handle)\n\thttp.ListenAndServe(iface+\":\"+strconv.Itoa(port), nil)\n\treturn\n}", "func (gss *Server) Serve(l net.Listener) error {\n\tctx, cancel := context.WithCancel(context.Background())\n\tgo func() {\n\t\tif err := gss.server.Serve(l); err != nil {\n\t\t\tgss.startErr = err\n\t\t\tcancel()\n\t\t}\n\t}()\n\treturn gss.waitForInterrupt(ctx)\n}", "func (m *multiplexer) Serve() {\n\tm.initialize()\n\terr := m.server.ListenAndServe()\n\tif err != nil {\n\t\tfmt.Errorf(\"failed calling ListenAndServe() on http.Server. %s\", err.Error())\n\t}\n}", "func (s *server) Run(addr string) error {\n\treturn http.ListenAndServe(addr, s.handler)\n}", "func (ms *MicroServer) Serve() error {\n\t// Use the muxed listeners for your servers.\n\tgo ms.GrpcServer.Serve(ms.grpcListener)\n\tgo ms.httpServer.Serve(ms.httpListener)\n\t// Start serving!\n\treturn ms.cmuxServer.Serve()\n\n}", "func (s *Server) Run() {\n\tgo func() {\n\t\t// start serving\n\t\tif err := s.httpServer.ListenAndServe(); err != nil {\n\t\t\tlog.Errora(err)\n\t\t}\n\t}()\n}", "func serve(bind, tzlibfile, endpoint, webroot string) error {\n\thandleAPI(endpoint, tzlibfile)\n\t// handleWebroot(webroot)\n\n\tlog.Printf(\"listening on 'http://%s'...\", bind)\n\treturn http.ListenAndServe(bind, nil)\n}", "func (s *Server) Serve(port uint) error {\n\taddress := fmt.Sprintf(\"localhost:%d\", port)\n\n\tl, err := net.Listen(\"tcp\", address)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgrpcServer := grpc.NewServer()\n\n\tserver := &Server{}\n\t//Register all sub server here\n\tpb.RegisterMovieServiceServer(grpcServer, server)\n\t//end register server\n\n\terr = grpcServer.Serve(l)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n\n}", "func (h *handler) Serve(ctx context.Context, _ *sync.WaitGroup) error {\n\tln, err := net.Listen(\"tcp\", h.addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo h.stop(ctx, ln)\n\n\th.registerRoutes()\n\n\treturn h.server.Serve(ln)\n}", "func serve(addr string) error {\n\thttp.HandleFunc(\"/serve\", func(w http.ResponseWriter, r *http.Request) {\n\t\t_ = r.ParseForm()\n\n\t\taddr := r.Form.Get(\"addr\")\n\t\tif addr == \"\" {\n\t\t\t_, _ = w.Write([]byte(\"no addr\"))\n\t\t\treturn\n\t\t}\n\n\t\tcmd := exec.Command(\"/bin/sh\", \"-c\", \"./main\", \"-addr\", addr)\n\t\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\t\t//Setsid: true,\n\t\t\tSetpgid: true,\n\t\t}\n\t\tout, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\t_, _ = w.Write([]byte(err.Error()))\n\t\t\treturn\n\t\t}\n\t\t// write the output to the http response\n\t\t_, _ = w.Write(out)\n\t})\n\n\tlog.Printf(\"Listening on: %s\\n\", addr)\n\treturn http.ListenAndServe(addr, nil)\n}", "func (s *server) Run() error {\n\ts.logger.Info(\"starting http server\", logger.String(\"addr\", s.server.Addr))\n\ts.server.Handler = s.gin\n\t// Open listener.\n\ttrackedListener, err := conntrack.NewTrackedListener(\"tcp\", s.addr, s.r)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn s.server.Serve(trackedListener)\n}", "func (e *Engine) Serve(ln net.Listener) error {\n\tln = &onceCloseListener{Listener: ln}\n\tdefer ln.Close()\n\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\te.Stat.AddConn()\n\t\tc := newConn(e, conn)\n\t\te.Log().Printf(\"Accept a connect from %s\", c.remoteAddr)\n\t\te.Log().Print(e.Stat.String())\n\t\tgo c.server()\n\t}\n\n}", "func Serve(c *config.Config) {\n\tserver.Serve(fmt.Sprintf(\":%d\", config.PortHello), func(s *grpc.Server) {\n\t\tpb.RegisterHelloServiceServer(s, &Server{})\n\t})\n}", "func Serve(ln net.Listener) error {\n\treturn Default.Serve(ln)\n}", "func (s *Service) serve() {\n\tdefer s.wg.Done()\n\terr := s.server.Serve(s.ln)\n\t// The listener was closed so exit\n\t// See https://github.com/golang/go/issues/4373\n\tif !strings.Contains(err.Error(), \"closed\") {\n\t\ts.err <- fmt.Errorf(\"listener failed: addr=%s, err=%s\", s.Addr(), err)\n\t} else {\n\t\ts.err <- nil\n\t}\n}", "func (s *Server) Run() {\n\tvar l net.Listener\n\tvar err error\n\thost := s.ip+\":\"+s.port\n\tl, err = net.Listen(\"tcp\", host)\n\tif err != nil {\n\t\tlog.Fatal(\"Listen: %v\", err)\n\t}\n\tif s.connLimit > 0 {\n\t\tl = netutil.LimitListener(l, s.connLimit)\n\t}\n\n\terr = http.Serve(l, s)\n\tif err != nil {\n\t\tlog.Fatal(\"http.listenAndServe failed: %s\", err.Error())\n\t}\n\ts.l = l\n\treturn\n}", "func Serve(opts *ServeOpts) {\n\th := opts.Handler\n\n\t// When deployed, force TLS\n\t// if hcruntime.Nomad() {\n\t// \tlog.Printf(\"[INFO] www: Nomad detected\")\n\t// \th = hchandlers.ForceTLS(h)\n\t// }\n\n\t// Record requests\n\t// h = hchandlers.Metrics(h)\n\n\t// Compress\n\th = handlers.CompressHandler(h)\n\n\t// CORS\n\tcors := handlers.CORS(\n\t\thandlers.AllowedMethods([]string{\"GET\", \"HEAD\", \"PUT\", \"POST\"}),\n\t\thandlers.AllowedHeaders([]string{\"Content-Type\"}))\n\t// handlers.AllowedHeaders([]string{\"Content-Type\"}),\n\t// handlers.AllowCredentials())\n\th = cors(h)\n\n\t// Create the server\n\tserver := &http.Server{\n\t\tAddr: opts.Addr,\n\t\tHandler: handlers.CombinedLoggingHandler(os.Stderr, h),\n\t\tReadTimeout: 10 * time.Second,\n\t\tWriteTimeout: 10 * time.Second,\n\t}\n\n\t// Kick it off\n\tlog.Printf(\"[INFO] api: api server listening on %s\", opts.Addr)\n\tserver.ListenAndServe()\n}", "func (g GenericService) Serve() {\n g.init()\n glog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", port), nil))\n}", "func (c *Core) Serve(args ...interface{}) error {\n\tvar (\n\t\taddr = \"8080\"\n\t\tln net.Listener\n\t\terr error\n\t\ttc *tls.Config\n\t)\n\n\tfor _, arg := range args {\n\t\tswitch a := arg.(type) {\n\t\tcase int:\n\t\t\taddr = strconv.Itoa(a)\n\t\tcase string:\n\t\t\taddr = a\n\t\tcase *tls.Config:\n\t\t\ttc = a\n\t\t}\n\t}\n\n\tif !strings.Contains(addr, \":\") {\n\t\taddr = \":\" + addr\n\t}\n\n\tif c.Prefork {\n\t\treturn c.prefork(addr, tc)\n\t}\n\n\tif ln, err = net.Listen(\"tcp\", addr); err != nil {\n\t\treturn err\n\t}\n\n\tif tc != nil {\n\t\tln = tls.NewListener(ln, tc)\n\t}\n\tLog.Info(\"Listen: %s\", addr)\n\treturn c.Server.Serve(ln)\n}", "func (s *Server) Serve() (err error) {\n\t// Initialize the gRPC server\n\ts.srv = grpc.NewServer()\n\tpb.RegisterTRISADemoServer(s.srv, s)\n\tpb.RegisterTRISAIntegrationServer(s.srv, s)\n\n\t// Catch OS signals for graceful shutdowns\n\tquit := make(chan os.Signal, 1)\n\tsignal.Notify(quit, os.Interrupt)\n\tgo func() {\n\t\t<-quit\n\t\ts.echan <- s.Shutdown()\n\t}()\n\n\t// Run the TRISA service on the TRISABindAddr\n\tif err = s.trisa.Serve(); err != nil {\n\t\treturn err\n\t}\n\n\t// Listen for TCP requests on the specified address and port\n\tvar sock net.Listener\n\tif sock, err = net.Listen(\"tcp\", s.conf.BindAddr); err != nil {\n\t\treturn fmt.Errorf(\"could not listen on %q\", s.conf.BindAddr)\n\t}\n\tdefer sock.Close()\n\n\t// Run the server\n\tgo func() {\n\t\tlog.Info().\n\t\t\tStr(\"listen\", s.conf.BindAddr).\n\t\t\tStr(\"version\", pkg.Version()).\n\t\t\tStr(\"name\", s.vasp.Name).\n\t\t\tMsg(\"server started\")\n\n\t\tif err := s.srv.Serve(sock); err != nil {\n\t\t\ts.echan <- err\n\t\t}\n\t}()\n\n\t// Listen for any errors that might have occurred and wait for all go routines to finish\n\tif err = <-s.echan; err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func Serve(l net.Listener, h Handler, appname, hostname string) (err error) {\n\tserv := Server{\n\t\tAppname: appname,\n\t\tHostname: hostname,\n\t\tHandler: h,\n\t}\n\treturn serv.Serve(l)\n}", "func (k *Kitops) Serve() {\n\tk.routes()\n\tlog.Fatal(http.ListenAndServe(\":8080\", k.router))\n}", "func (w *currentWorker) serve(c context.Context) (int, error) {\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tt := strings.Split(listener.Addr().String(), \":\")\n\tport, err := strconv.ParseInt(t[1], 10, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tlog.Info(\"Export variable HTTP server: %s\", listener.Addr().String())\n\tr := mux.NewRouter()\n\n\tr.HandleFunc(\"/artifacts\", w.artifactsHandler)\n\tr.HandleFunc(\"/cache/{ref}/pull\", w.cachePullHandler)\n\tr.HandleFunc(\"/cache/{ref}/push\", w.cachePushHandler)\n\tr.HandleFunc(\"/download\", w.downloadHandler)\n\tr.HandleFunc(\"/exit\", w.exitHandler)\n\tr.HandleFunc(\"/key/{key}/install\", w.keyInstallHandler)\n\tr.HandleFunc(\"/log\", w.logHandler)\n\tr.HandleFunc(\"/services/{type}\", w.serviceHandler)\n\tr.HandleFunc(\"/tag\", w.tagHandler)\n\tr.HandleFunc(\"/tmpl\", w.tmplHandler)\n\tr.HandleFunc(\"/upload\", w.uploadHandler)\n\tr.HandleFunc(\"/var\", w.addBuildVarHandler)\n\tr.HandleFunc(\"/vulnerability\", w.vulnerabilityHandler)\n\n\tsrv := &http.Server{\n\t\tHandler: r,\n\t\tAddr: \"127.0.0.1:0\",\n\t\tWriteTimeout: 6 * time.Minute,\n\t\tReadTimeout: 6 * time.Minute,\n\t}\n\n\t//Start the server\n\tgo func() {\n\t\tif err := srv.Serve(listener); err != nil {\n\t\t\tlog.Error(\"%v\", err)\n\t\t}\n\t}()\n\n\t//Handle shutdown\n\tgo func() {\n\t\t<-c.Done()\n\t\tsrv.Shutdown(c)\n\t}()\n\n\treturn int(port), nil\n}", "func Serve() {\n\tr := mux.NewRouter()\n\n\t// routes\n\tr.Handle(\"/\", withError(home))\n\n\t// resources\n\tr.PathPrefix(\"/dist/\").Handler(http.StripPrefix(\"/dist/\", http.FileServer(&assetfs.AssetFS{\n\t\tAsset: ui.Asset,\n\t\tAssetDir: ui.AssetDir,\n\t\tAssetInfo: ui.AssetInfo,\n\t\tPrefix: \"\",\n\t})))\n\n\tif err := http.ListenAndServe(\":8080\", r); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (sv *Server) Serve() {\n\tlog.Println(\"Server : Serve : Started\")\n\tdefer log.Println(\"Server : Serve : Completed\")\n\n\tfor {\n\t\t// Check if we should stop accepting connections and shutdown\n\t\tselect {\n\t\tcase <-sv.quit:\n\t\t\tlog.Println(\"Server : Serve : Shutting Down\")\n\t\t\tsv.Listener.Close()\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\t// Accept a new connection or timeout and loop again\n\t\tsv.Listener.SetDeadline(time.Now().Add(time.Second))\n\t\tconn, err := sv.Listener.AcceptTCP()\n\t\tif err != nil {\n\t\t\t// Loop if deadline expired\n\t\t\tif opErr, ok := err.(*net.OpError); ok && opErr.Timeout() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Println(\"Server: New connection from:\", conn.RemoteAddr())\n\t\t// Hand the connection off to PeerManager\n\t\tgo func() { sv.peerChans.conns <- conn }()\n\t}\n}", "func main() {\n\n\t// Dynamic port (used by Heroku for example)\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tlog.Println(\"$PORT must be set, using 5000 as default...\")\n\t\tport = \"5000\"\n\t}\n\n\thttp.HandleFunc(\"/\", makeHandler(homeHandler))\n\thttp.HandleFunc(\"/view/\", makeHandler(viewHandler))\n\thttp.HandleFunc(\"/edit/\", makeHandler(editHandler))\n\thttp.HandleFunc(\"/save/\", makeHandler(saveHandler))\n\n\t// For static files\n\thttp.Handle(\"/static/\", http.StripPrefix(\"/static/\", http.FileServer(http.Dir(\"assets/\"))))\n\n\tlog.Println(\"\")\n\tlog.Println(\"Server started... listening on port \" + port)\n\tlog.Println(\"URL: http://localhost:\" + port + \"/\")\n\tlog.Println(\"\")\n\n\tlog.Fatal(http.ListenAndServe(\":\"+port, nil))\n}", "func Serve(ctx context.Context, log *logger.Logger, healthChecker *health.Checker, listenAddress string) {\n\trouter := http.NewServeMux()\n\trouter.HandleFunc(\"/metrics\", handler.VarzHandler)\n\trouter.Handle(\"/health\", handler.Health(healthChecker))\n\n\trouter.HandleFunc(\"/debug/pprof/\", pprof.Index)\n\trouter.HandleFunc(\"/debug/pprof/cmdline\", pprof.Cmdline)\n\trouter.HandleFunc(\"/debug/pprof/profile\", pprof.Profile)\n\trouter.HandleFunc(\"/debug/pprof/symbol\", pprof.Symbol)\n\trouter.HandleFunc(\"/debug/pprof/trace\", pprof.Trace)\n\n\tserver := &http.Server{\n\t\tAddr: listenAddress,\n\t\tHandler: router,\n\t}\n\n\tgo func() {\n\t\tif err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {\n\t\t\tlog.Infof(\"shutting down the metrics http server: %s\", err)\n\t\t}\n\t}()\n\n\tlog.Infof(\"metrics http server listening on %s\", listenAddress)\n\n\t<-ctx.Done()\n\n\tif err := server.Close(); err != nil {\n\t\tlog.Warnf(\"error shutting down metrics http server: %s\", err)\n\t}\n}", "func (s *Server) Run() error {\n\t// start fetcher, reporter and doc generator in goroutines\n\tgo s.fetcher.Run()\n\tgo s.reporter.Run()\n\tgo s.docGenerator.Run()\n\n\t// start webserver\n\tlistenAddress := s.listenAddress\n\tif listenAddress == \"\" {\n\t\tlistenAddress = DefaultAddress\n\t}\n\n\tr := mux.NewRouter()\n\n\t// register ping api\n\tr.HandleFunc(\"/_ping\", pingHandler).Methods(\"GET\")\n\n\t// github webhook API\n\tr.HandleFunc(\"/events\", s.gitHubEventHandler).Methods(\"POST\")\n\n\t// travisCI webhook API\n\tr.HandleFunc(\"/ci_notifications\", s.ciNotificationHandler).Methods(\"POST\")\n\n\tlogrus.Infof(\"start http server on address %s\", listenAddress)\n\treturn http.ListenAndServe(listenAddress, r)\n}", "func Serve() {\n\t// Use defaults if not set via TOML config file.\n\tif tc.Server.Host == \"\" {\n\t\ttc.Server.Host = DefaultHost\n\t}\n\tif tc.Server.HTTPAddress == \"\" {\n\t\ttc.Server.HTTPAddress = DefaultWebAddress\n\t}\n\tif tc.Server.RPCAddress == \"\" {\n\t\ttc.Server.RPCAddress = DefaultRPCAddress\n\t}\n\n\tdvid.Infof(\"------------------\\n\")\n\tdvid.Infof(\"DVID code version: %s\\n\", gitVersion)\n\tdvid.Infof(\"Serving HTTP on %s (host alias %q)\\n\", tc.Server.HTTPAddress, tc.Server.Host)\n\tdvid.Infof(\"Serving command-line use via RPC %s\\n\", tc.Server.RPCAddress)\n\tdvid.Infof(\"Using web client files from %s\\n\", tc.Server.WebClient)\n\tdvid.Infof(\"Using %d of %d logical CPUs for DVID.\\n\", dvid.NumCPU, runtime.NumCPU())\n\n\t// Launch the web server\n\tgo serveHTTP()\n\n\t// Launch the rpc server\n\tgo func() {\n\t\tif err := rpc.StartServer(tc.Server.RPCAddress); err != nil {\n\t\t\tdvid.Criticalf(\"Could not start RPC server: %v\\n\", err)\n\t\t}\n\t}()\n\n\t<-shutdownCh\n}", "func Serve() {\n\tflag.Parse()\n\n\taddress := getAddress()\n\n\tserver.HandleHTTP(rpc.DefaultRPCPath, rpc.DefaultDebugPath)\n\tl, e := net.Listen(\"tcp\", address)\n\tif e != nil {\n\t\tlog.Fatal(\"listen error:\", e)\n\t}\n\n\tfmt.Println(ansi.Color(fmt.Sprintf(\"Started plugin on `%v`\", address), \"black+h\"))\n\n\tfor {\n\t\tconn, err := l.Accept()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tgo server.ServeCodec(jsonrpc.NewServerCodec(conn))\n\t}\n}", "func serve(ctx context.Context, server *ttrpc.Server, signals chan os.Signal, shutdown func()) error {\n\tdump := make(chan os.Signal, 32)\n\tsetupDumpStacks(dump)\n\n\tpath, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tl, err := serveListener(socketFlag)\n\tif err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error\n\t\treturn err\n\t}\n\tgo func() {\n\t\tdefer l.Close()\n\t\tif err := server.Serve(ctx, l); err != nil &&\n\t\t\t!strings.Contains(err.Error(), \"use of closed network connection\") {\n\t\t\tlog.G(ctx).WithError(err).Fatal(\"containerd-shim: ttrpc server failure\")\n\t\t}\n\t}()\n\tlogger := log.G(ctx).WithFields(logrus.Fields{\n\t\t\"pid\": os.Getpid(),\n\t\t\"path\": path,\n\t\t\"namespace\": namespaceFlag,\n\t})\n\tgo func() {\n\t\tfor range dump {\n\t\t\tdumpStacks(logger)\n\t\t}\n\t}()\n\n\tgo handleExitSignals(ctx, logger, shutdown)\n\treturn reap(ctx, logger, signals)\n}", "func (s *Server) ListenAndServe() error {\n\tif err := s.conf.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"server: config erorr: %v\", err)\n\t}\n\n\te := s.echo\n\n\t// show registered URLs\n\tif s.conf.ShowRoutes {\n\t\troutes := e.Routes()\n\t\tsort.Slice(routes, func(i, j int) bool {\n\t\t\tri, rj := routes[i], routes[j]\n\t\t\treturn len(ri.Path) < len(rj.Path)\n\t\t})\n\t\tfor _, url := range routes {\n\t\t\t// built-in routes are ignored\n\t\t\tif strings.Contains(url.Name, \"github.com/labstack/echo\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Printf(\"%8s : %-35s (%v)\\n\", url.Method, url.Path, url.Name)\n\t\t}\n\t}\n\n\t// start server\n\tserverURL := s.conf.HTTP\n\tlog.Println(\"server listen at \" + serverURL)\n\terr := e.Start(serverURL)\n\te.Logger.Error(err)\n\treturn err\n}", "func (hs *HttpServer) Start() (err error) {\n\tpanic(\"todo - StartServer\")\n\n\t// Start listening to the server port\n\n\t// Accept connection from client\n\n\t// Spawn a go routine to handle request\n\n}", "func main() {\n\twebserver.ServerStart()\n\twebserver.ServerRequest()\n}", "func Serve(\n\tctx context.Context,\n\tmux *goji.Mux,\n) error {\n\n\ts := &http.Server{\n\t\tAddr: fmt.Sprintf(\":%s\", mint.GetPort(ctx)),\n\t\tReadTimeout: 5 * time.Second,\n\t\tWriteTimeout: 5 * time.Second,\n\t\tHandler: mux,\n\t}\n\n\tlogging.Logf(ctx, \"Listening: port=%s\", mint.GetPort(ctx))\n\n\terr := gracehttp.Serve(s)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\treturn nil\n}", "func main() {\n // Routes\n http.HandleFunc(\"/\", index)\n // Setting Server\n http.ListenAndServe(\":8080\", nil)\n}", "func (srv *Server) Serve(l net.Listener) error {\n\treturn nil\n}", "func (s *Server) Serve() error {\n\tif err := s.server.Serve(s.config.Listener); err != nil {\n\t\tif errors.Is(err, grpc.ErrServerStopped) ||\n\t\t\tutils.IsUseOfClosedNetworkError(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn trace.Wrap(err)\n\t}\n\treturn nil\n}", "func TestServe(t *testing.T) {\n\tm := MarkHub{}\n\terr := m.ParseFile(\"./README.md\")\n\tif err != nil {\n\t\tt.Errorf(\"TestServe(): got -> %v, want: nil\", err)\n\t}\n\terr = m.Serve(\":4000\")\n\tif err != nil {\n\t\tt.Errorf(\"TestServe(): got -> %v, want: nil\", err)\n\t}\n\n}", "func (api *API) Serve() {\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\"%s:%d\", api.address, api.port), api.router))\n}", "func (s *Server) Serve(ctx context.Context, ln net.Listener) error {\n\tdefer ln.Close()\n\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tgo s.serveStream(ctx, conn)\n\t}\n}", "func (listener *Listener) Serve() <-chan error {\n\terrChan := make(chan error)\n\tgo func() {\n\t\tlistener.logger.Infow(\"serving\")\n\t\terr := listener.Server.Serve(listener.Listener)\n\t\terrChan <- err\n\t\tclose(errChan)\n\t}()\n\treturn errChan\n}", "func (srv *MonitorServer) ListenAndServe() error {\n\tlog.Logger.Info(\"Embeded web server start at port[%d]\", srv.port)\n\n\thttp.HandleFunc(\"/\", srv.webHandler)\n\n\tportStr := fmt.Sprintf(\":%d\", srv.port)\n\treturn http.ListenAndServe(portStr, nil)\n}", "func (h *HealthZ) Serve() {\n\tserveMux := http.NewServeMux()\n\tserveMux.HandleFunc(h.HealthCheckURL.EscapedPath(), h.ServeHTTP)\n\tserver := &http.Server{\n\t\tAddr: h.HealthCheckURL.Host,\n\t\tReadHeaderTimeout: readHeaderTimeout,\n\t\tHandler: serveMux,\n\t}\n\tif err := server.ListenAndServe(); err != nil && errors.Is(err, http.ErrServerClosed) {\n\t\tklog.ErrorS(err, \"failed to start health check server\")\n\t\tos.Exit(1)\n\t}\n}", "func (f *Frontend) Serve() {\n\t// Start the internal server on the internal port if requested.\n\tif f.flags.InternalPort != \"\" {\n\t\t// Add the profiling endpoints to the internal router.\n\t\tinternalRouter := chi.NewRouter()\n\n\t\t// Register pprof handlers\n\t\tinternalRouter.HandleFunc(\"/debug/pprof/\", pprof.Index)\n\t\tinternalRouter.HandleFunc(\"/debug/pprof/cmdline\", pprof.Cmdline)\n\t\tinternalRouter.HandleFunc(\"/debug/pprof/symbol\", pprof.Symbol)\n\t\tinternalRouter.HandleFunc(\"/debug/pprof/profile\", pprof.Profile)\n\t\tinternalRouter.HandleFunc(\"/debug/pprof/trace\", pprof.Trace)\n\t\tinternalRouter.HandleFunc(\"/debug/pprof/{profile}\", pprof.Index)\n\n\t\tgo func() {\n\t\t\tsklog.Infof(\"Internal server on %q\", f.flags.InternalPort)\n\t\t\tsklog.Info(http.ListenAndServe(f.flags.InternalPort, internalRouter))\n\t\t}()\n\t}\n\n\t// Resources are served directly.\n\trouter := chi.NewRouter()\n\n\tallowedHosts := []string{f.host}\n\tif len(config.Config.AllowedHosts) > 0 {\n\t\tallowedHosts = append(allowedHosts, config.Config.AllowedHosts...)\n\t}\n\n\trouter.Use(baseapp.SecurityMiddleware(allowedHosts, f.flags.Local, nil))\n\n\trouter.HandleFunc(\"/dist/*\", f.makeDistHandler())\n\n\t// Redirects for the old Perf URLs.\n\trouter.HandleFunc(\"/\", oldMainHandler)\n\trouter.HandleFunc(\"/clusters/\", oldClustersHandler)\n\trouter.HandleFunc(\"/alerts/\", oldAlertsHandler)\n\n\t// New endpoints that use ptracestore will go here.\n\trouter.HandleFunc(\"/e/\", f.templateHandler(\"newindex.html\"))\n\trouter.HandleFunc(\"/c/\", f.templateHandler(\"clusters2.html\"))\n\trouter.HandleFunc(\"/t/\", f.templateHandler(\"triage.html\"))\n\trouter.HandleFunc(\"/a/\", f.templateHandler(\"alerts.html\"))\n\trouter.HandleFunc(\"/d/\", f.templateHandler(\"dryrunalert.html\"))\n\trouter.HandleFunc(\"/r/\", f.templateHandler(\"trybot.html\"))\n\trouter.HandleFunc(\"/g/{dest:[ect]}/{hash:[a-zA-Z0-9]+}\", f.gotoHandler)\n\trouter.HandleFunc(\"/help/\", f.helpHandler)\n\n\t// JSON handlers.\n\n\t// Common endpoint for all long-running requests.\n\trouter.Get(\"/_/status/{id:[a-zA-Z0-9-]+}\", f.progressTracker.Handler)\n\n\trouter.Get(\"/_/alertgroup\", f.alertGroupQueryHandler)\n\trouter.HandleFunc(\"/_/initpage/\", f.initpageHandler)\n\trouter.Post(\"/_/cidRange/\", f.cidRangeHandler)\n\trouter.Post(\"/_/count/\", f.countHandler)\n\trouter.Post(\"/_/cid/\", f.cidHandler)\n\trouter.Post(\"/_/keys/\", f.keysHandler)\n\n\trouter.Post(\"/_/frame/start\", f.frameStartHandler)\n\trouter.Post(\"/_/cluster/start\", f.clusterStartHandler)\n\trouter.Post(\"/_/trybot/load/\", f.trybotLoadHandler)\n\trouter.Post(\"/_/dryrun/start\", f.dryrunRequests.StartHandler)\n\n\trouter.Post(\"/_/reg/\", f.regressionRangeHandler)\n\trouter.Get(\"/_/reg/count\", f.regressionCountHandler)\n\trouter.Get(\"/_/reg/current\", f.regressionCurrentHandler)\n\trouter.Post(\"/_/triage/\", f.triageHandler)\n\trouter.HandleFunc(\"/_/alerts/\", f.alertsHandler)\n\trouter.Post(\"/_/details/\", f.detailsHandler)\n\trouter.Post(\"/_/shift/\", f.shiftHandler)\n\trouter.Get(\"/_/alert/list/{show}\", f.alertListHandler)\n\trouter.Get(\"/_/alert/new\", alertNewHandler)\n\trouter.Post(\"/_/alert/update\", f.alertUpdateHandler)\n\trouter.Post(\"/_/alert/delete/{id:[0-9]+}\", f.alertDeleteHandler)\n\trouter.Post(\"/_/alert/bug/try\", f.alertBugTryHandler)\n\trouter.Post(\"/_/alert/notify/try\", f.alertNotifyTryHandler)\n\n\trouter.Get(\"/_/login/status\", f.loginStatus)\n\n\trouter.Post(\"/_/bisect/create\", f.createBisectHandler)\n\n\tvar h http.Handler = router\n\th = httputils.LoggingGzipRequestResponse(h)\n\tif !f.flags.Local {\n\t\th = httputils.HealthzAndHTTPS(h)\n\t}\n\thttp.Handle(\"/\", h)\n\n\tsklog.Info(\"Ready to serve.\")\n\n\t// We create our own server here instead of using http.ListenAndServe, so\n\t// that we don't expose the /debug/pprof endpoints to the open web.\n\tserver := &http.Server{\n\t\tAddr: f.flags.Port,\n\t\tHandler: h,\n\t}\n\tsklog.Fatal(server.ListenAndServe())\n}", "func (s *Service)ListenAndServe(port string, r http.Handler) {\n\t\tlog.Println(\"Starting Server!\")\n\t\ts.Server = &http.Server{Addr: port, Handler: r}\n\t\tif err := s.Server.ListenAndServe(); err != nil {\n\t\t\t\t// handle err\n\t\t}\n\n // Setting up signal capturing\n stop := make(chan os.Signal, 1)\n signal.Notify(stop, os.Interrupt)\n\n // Waiting for SIGINT (pkill -2)\n <-stop\n\t\tlog.Println(\"Received Server Stop!\")\n s.Stop()\n // Wait for ListenAndServe goroutine to close.\n}", "func (a *API) Serve() error {\n\thost := fmt.Sprintf(\"0.0.0.0:%d\", a.Config.ListenPort)\n\ta.logger.LogInfo(\"starting api\", \"host\", \"0.0.0.0\", \"port\", a.Config.ListenPort)\n\treturn http.ListenAndServe(host, a)\n}", "func Serve(port string) {\n\tmux := http.NewServeMux()\n\n\tmux.HandleFunc(\"/\", defaultHandler)\n\tmux.HandleFunc(\"/upload_image\", imageUploadHandler)\n\tmux.HandleFunc(\"/process_image\", imageProcessHandler)\n\tmux.HandleFunc(\"/effect_options\", effectOptionHandler)\n\n\tmux.Handle(\"/css/\", http.StripPrefix(\"/css/\", http.FileServer(http.Dir(\"web/css/\"))))\n\tmux.Handle(\"/js/\", http.StripPrefix(\"/js/\", http.FileServer(http.Dir(\"web/js/\"))))\n\tmux.Handle(\"/node_modules/\", http.StripPrefix(\"/node_modules/\", http.FileServer(http.Dir(\"node_modules/\"))))\n\tmux.Handle(\"/fonts/\", http.StripPrefix(\"/fonts/\", http.FileServer(http.Dir(\"web/resources/fonts/\"))))\n\tmux.Handle(\"/source_image/\", http.StripPrefix(\"/source_image/\", http.FileServer(http.Dir(\"storage/uploads/\"))))\n\tmux.Handle(\"/processed_image/\", http.StripPrefix(\"/processed_image/\", http.FileServer(http.Dir(\"storage/processed_images/\"))))\n\n\thandler := cors.Default().Handler(mux)\n\n\tfmt.Println(\"Server running on http://localhost\" + port)\n\tlog.Fatal(http.ListenAndServe(port, handler))\n}", "func (s *Server) Serve(port string) {\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/getBike/{bikeID}\", s.getBikeHandler)\n\trouter.HandleFunc(\"/getAllBikes\", s.getAllBikesHandler)\n\trouter.HandleFunc(\"/addBike\", s.addBikeHandler)\n\n\tcommon.Log(fmt.Sprintf(\"Listening on port %s\", port))\n\tlog.Fatal(http.ListenAndServe(\":\"+port, router))\n}", "func RunServer() {\n\tapp := applicationContext{\n\t\tconfig: config.LoadConfig(),\n\t\ttrackerLevel: RATIOLESS,\n\t}\n\n\tmux := http.NewServeMux()\n\n\tmux.HandleFunc(\"/announce\", app.requestHandler)\n\tmux.HandleFunc(\"/scrape\", scrapeHandler)\n\thttp.ListenAndServe(\":3000\", mux)\n}", "func Serve(l net.Listener) error {\n\tfor {\n\t\tconn, err := l.Accept()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgo handleConn(conn)\n\t}\n}" ]
[ "0.7704274", "0.75560105", "0.7507622", "0.7234669", "0.7211", "0.71590436", "0.7141204", "0.71229917", "0.7110679", "0.7095706", "0.70953053", "0.7070322", "0.70478725", "0.70459193", "0.7036709", "0.70193183", "0.6962676", "0.69522464", "0.69483596", "0.69476694", "0.69378453", "0.6935497", "0.69319123", "0.6924034", "0.6892406", "0.6887252", "0.6881541", "0.6873102", "0.68654025", "0.68609315", "0.6860159", "0.6852574", "0.6832801", "0.6829077", "0.6827893", "0.68230873", "0.68198967", "0.6805004", "0.67924356", "0.6776169", "0.67752886", "0.6759216", "0.6757719", "0.6753938", "0.6752601", "0.67500633", "0.6744777", "0.6743904", "0.6738805", "0.6734313", "0.6734066", "0.6725793", "0.6723191", "0.67187726", "0.6718656", "0.67156094", "0.67143315", "0.67136717", "0.66908383", "0.66857713", "0.6682762", "0.66813475", "0.6680876", "0.66746485", "0.6665081", "0.66626906", "0.66612834", "0.665785", "0.664814", "0.6641672", "0.664074", "0.6637167", "0.66355175", "0.6633622", "0.66277534", "0.66233104", "0.66099995", "0.6603", "0.66025317", "0.65932924", "0.6591774", "0.658316", "0.6581504", "0.6573545", "0.65667564", "0.65664345", "0.65590763", "0.65561754", "0.6547973", "0.6528591", "0.65151596", "0.65027434", "0.65020657", "0.65005666", "0.6498696", "0.6497777", "0.6497534", "0.649664", "0.6492783", "0.6490239" ]
0.6517445
90
NewPolicyDefinitionsClient creates a new instance of PolicyDefinitionsClient with the specified values.
func NewPolicyDefinitionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) *PolicyDefinitionsClient { cp := arm.ClientOptions{} if options != nil { cp = *options } if len(cp.Host) == 0 { cp.Host = arm.AzurePublicCloud } return &PolicyDefinitionsClient{subscriptionID: subscriptionID, ep: string(cp.Host), pl: armruntime.NewPipeline(module, version, credential, &cp)} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewRegistrationDefinitionsClient(credential azcore.TokenCredential, options *arm.ClientOptions) (*RegistrationDefinitionsClient, error) {\n\tcl, err := arm.NewClient(moduleName+\".RegistrationDefinitionsClient\", moduleVersion, credential, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := &RegistrationDefinitionsClient{\n\t\tinternal: cl,\n\t}\n\treturn client, nil\n}", "func NewClient(confs ...ClientConfiguration) *Client {\n\tq := setupClient()\n\n\t// Loop through the configurations and apply them to the client.\n\tfor _, c := range confs {\n\t\tc(q)\n\t}\n\n\treturn q\n}", "func NewPoliciesClient(cc grpc.ClientConnInterface) PoliciesClient { return src.NewPoliciesClient(cc) }", "func NewClient(cfg watson.Config) (Client, error) {\n\tdialog := Client{version: \"/\" + defaultMajorVersion}\n\tif len(cfg.Version) > 0 {\n\t\tdialog.version = \"/\" + cfg.Version\n\t}\n\tif len(cfg.Credentials.ServiceName) == 0 {\n\t\tcfg.Credentials.ServiceName = \"dialog\"\n\t}\n\tif len(cfg.Credentials.Url) == 0 {\n\t\tcfg.Credentials.Url = defaultUrl\n\t}\n\tclient, err := watson.NewClient(cfg.Credentials)\n\tif err != nil {\n\t\treturn Client{}, err\n\t}\n\tdialog.watsonClient = client\n\treturn dialog, nil\n}", "func NewClient(kubeClient client.Client, secretName, namespace, project string) (*gcpClient, error) {\n\tctx := context.Background()\n\tsecret := &corev1.Secret{}\n\terr := kubeClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: secretName}, secret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig, err := utils.GetCredentialsJSON(kubeClient, types.NamespacedName{Namespace: namespace, Name: secretName})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tservice, err := dnsv1.NewService(context.Background(), option.WithCredentials(config))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &gcpClient{\n\t\tclient: *service,\n\t\tproject: project,\n\t}, nil\n}", "func NewClient(options ...func(c *Client)) *Client {\n\tc := &Client{}\n\tfor _, option := range options {\n\t\toption(c)\n\t}\n\n\t// Set default user-agent if not set\n\tif c.UserAgent == \"\" {\n\t\tc.UserAgent = \"wporg/1.0\"\n\t}\n\n\t// Set default client if not set\n\tif c.HTTPClient == nil {\n\t\tc.HTTPClient = getDefaultClient()\n\t}\n\n\treturn c\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\tflowSid: properties.FlowSid,\n\t\tsid: properties.Sid,\n\n\t\tContext: func() *context.Client {\n\t\t\treturn context.New(client, context.ClientProperties{\n\t\t\t\tExecutionSid: properties.Sid,\n\t\t\t\tFlowSid: properties.FlowSid,\n\t\t\t})\n\t\t},\n\t\tStep: func(stepSid string) *step.Client {\n\t\t\treturn step.New(client, step.ClientProperties{\n\t\t\t\tExecutionSid: properties.Sid,\n\t\t\t\tFlowSid: properties.FlowSid,\n\t\t\t\tSid: stepSid,\n\t\t\t})\n\t\t},\n\t\tSteps: steps.New(client, steps.ClientProperties{\n\t\t\tExecutionSid: properties.Sid,\n\t\t\tFlowSid: properties.FlowSid,\n\t\t}),\n\t}\n}", "func NewRoleDefinitionsClient(credential azcore.TokenCredential, options *arm.ClientOptions) *RoleDefinitionsClient {\n\tcp := arm.ClientOptions{}\n\tif options != nil {\n\t\tcp = *options\n\t}\n\tif len(cp.Host) == 0 {\n\t\tcp.Host = arm.AzurePublicCloud\n\t}\n\treturn &RoleDefinitionsClient{ep: string(cp.Host), pl: armruntime.NewPipeline(module, version, credential, &cp)}\n}", "func NewClient(query, list, versionsByID, byCatalogKindNameVersion, byCatalogKindNameVersionReadme, byCatalogKindNameVersionYaml, byVersionID, byCatalogKindName, byID, getRawYamlByCatalogKindNameVersion, getLatestRawYamlByCatalogKindName goa.Endpoint) *Client {\n\treturn &Client{\n\t\tQueryEndpoint: query,\n\t\tListEndpoint: list,\n\t\tVersionsByIDEndpoint: versionsByID,\n\t\tByCatalogKindNameVersionEndpoint: byCatalogKindNameVersion,\n\t\tByCatalogKindNameVersionReadmeEndpoint: byCatalogKindNameVersionReadme,\n\t\tByCatalogKindNameVersionYamlEndpoint: byCatalogKindNameVersionYaml,\n\t\tByVersionIDEndpoint: byVersionID,\n\t\tByCatalogKindNameEndpoint: byCatalogKindName,\n\t\tByIDEndpoint: byID,\n\t\tGetRawYamlByCatalogKindNameVersionEndpoint: getRawYamlByCatalogKindNameVersion,\n\t\tGetLatestRawYamlByCatalogKindNameEndpoint: getLatestRawYamlByCatalogKindName,\n\t}\n}", "func NewClient(cfg *Config) (*Client, error) {\r\n\tBaseURL := new(url.URL)\r\n\tvar err error\r\n\r\n\tviper.SetEnvPrefix(\"TS\")\r\n\tviper.BindEnv(\"LOG\")\r\n\r\n\tswitch l := viper.Get(\"LOG\"); l {\r\n\tcase \"trace\":\r\n\t\tlog.SetLevel(log.TraceLevel)\r\n\tcase \"debug\":\r\n\t\tlog.SetLevel(log.DebugLevel)\r\n\tcase \"info\":\r\n\t\tlog.SetLevel(log.InfoLevel)\r\n\tcase \"warn\":\r\n\t\tlog.SetLevel(log.WarnLevel)\r\n\tcase \"fatal\":\r\n\t\tlog.SetLevel(log.FatalLevel)\r\n\tcase \"panic\":\r\n\t\tlog.SetLevel(log.PanicLevel)\r\n\t}\r\n\r\n\tif cfg.BaseURL != \"\" {\r\n\t\tBaseURL, err = url.Parse(cfg.BaseURL)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t} else {\r\n\t\tBaseURL, err = url.Parse(defaultBaseURL)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t}\r\n\r\n\tnewClient := &Client{\r\n\t\tBaseURL: BaseURL,\r\n\t\tclient: http.DefaultClient,\r\n\t\tcreds: &Credentials{\r\n\t\t\tAPIKey: cfg.APIKey,\r\n\t\t\tOrganizationID: cfg.OrganizationID,\r\n\t\t\tUserID: cfg.UserID,\r\n\t\t},\r\n\t}\r\n\r\n\tnewClient.Rulesets = &RulesetService{newClient}\r\n\tnewClient.Rules = &RuleService{newClient}\r\n\r\n\treturn newClient, nil\r\n}", "func NewClient(apiDef *raml.APIDefinition, packageName, rootImportPath, targetDir string,\n\tlibsRootURLs []string) (Client, error) {\n\t// rootImportPath only needed if we use libraries\n\tif rootImportPath == \"\" && len(apiDef.Libraries) > 0 {\n\t\treturn Client{}, fmt.Errorf(\"--import-path can't be empty when we use libraries\")\n\t}\n\n\trootImportPath = setRootImportPath(rootImportPath, targetDir)\n\tglobRootImportPath = rootImportPath\n\tglobAPIDef = apiDef\n\tglobLibRootURLs = libsRootURLs\n\n\tservices := map[string]*ClientService{}\n\tfor k, v := range apiDef.Resources {\n\t\trd := resource.New(apiDef, commons.NormalizeURITitle(apiDef.Title), packageName)\n\t\trd.GenerateMethods(&v, langGo, newServerMethod, newGoClientMethod)\n\t\tservices[k] = &ClientService{\n\t\t\trootEndpoint: k,\n\t\t\tPackageName: packageName,\n\t\t\tMethods: rd.Methods,\n\t\t}\n\t}\n\tclient := Client{\n\t\tapiDef: apiDef,\n\t\tName: escapeIdentifier(commons.NormalizeURI(apiDef.Title)),\n\t\tBaseURI: apiDef.BaseURI,\n\t\tlibraries: apiDef.Libraries,\n\t\tPackageName: packageName,\n\t\tRootImportPath: rootImportPath,\n\t\tServices: services,\n\t\tTargetDir: targetDir,\n\t\tlibsRootURLs: libsRootURLs,\n\t}\n\n\tif strings.Index(client.BaseURI, \"{version}\") > 0 {\n\t\tclient.BaseURI = strings.Replace(client.BaseURI, \"{version}\", apiDef.Version, -1)\n\t}\n\treturn client, nil\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\tidentity: properties.Identity,\n\t\tserviceSid: properties.ServiceSid,\n\n\t\tChallenge: func(challengeSid string) *challenge.Client {\n\t\t\treturn challenge.New(client, challenge.ClientProperties{\n\t\t\t\tIdentity: properties.Identity,\n\t\t\t\tServiceSid: properties.ServiceSid,\n\t\t\t\tSid: challengeSid,\n\t\t\t})\n\t\t},\n\t\tChallenges: challenges.New(client, challenges.ClientProperties{\n\t\t\tIdentity: properties.Identity,\n\t\t\tServiceSid: properties.ServiceSid,\n\t\t}),\n\t\tFactor: func(factorSid string) *factor.Client {\n\t\t\treturn factor.New(client, factor.ClientProperties{\n\t\t\t\tIdentity: properties.Identity,\n\t\t\t\tServiceSid: properties.ServiceSid,\n\t\t\t\tSid: factorSid,\n\t\t\t})\n\t\t},\n\t\tFactors: factors.New(client, factors.ClientProperties{\n\t\t\tIdentity: properties.Identity,\n\t\t\tServiceSid: properties.ServiceSid,\n\t\t}),\n\t}\n}", "func NewClient(pluginsDir, configsDir string) (*client, error) {\n\treturn &client{\n\t\tpluginsDir: pluginsDir,\n\t\tconfigsDir: configsDir,\n\t}, nil\n}", "func NewPolicyClient(cfg aws.Config) PolicyClient {\n\treturn iam.NewFromConfig(cfg)\n}", "func (c *clientsFactory) PoliciesClient() (authz.PoliciesServiceClient, error) {\n\tconn, err := c.connectionByName(\"authz-service\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn authz.NewPoliciesServiceClient(conn), nil\n}", "func NewClient() (*Client, error) {\n\tgoprscClient, err := newGoprscClient()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to initialize Postfix REST Server API client: %s\", err)\n\t}\n\n\tc := &Client{client: goprscClient}\n\ts := service{client: goprscClient} // Reuse the same structure instead of allocating one for each service\n\tc.Auth = (*AuthService)(&s)\n\tc.Domains = (*DomainService)(&s)\n\tc.Accounts = (*AccountService)(&s)\n\tc.Aliases = (*AliasService)(&s)\n\t// Allocate separate structs for the BCC services as they have different state\n\tc.InputBccs = NewInputBccService(goprscClient)\n\tc.OutputBccs = NewOutputBccService(goprscClient)\n\treturn c, nil\n}", "func NewClient(cfg *restclient.Config) (Client, error) {\n\tresult := &client{}\n\tc, err := dynamic.NewForConfig(cfg)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Failed to create client, with error: %v\", err)\n\t\tlog.Printf(\"[Error] %s\", msg)\n\t\treturn nil, fmt.Errorf(msg)\n\t}\n\tresult.dynamicClient = c\n\treturn result, nil\n}", "func NewClient(secretBytes []byte, authorizeHandler func(string) (string, error)) (Client, error) {\n\tvar secret map[string]interface{}\n\tif err := json.Unmarshal(secretBytes, &secret); err != nil {\n\t\treturn nil, err\n\t}\n\tif authorizeHandler == nil {\n\t\tauthorizeHandler = defaultAuthorizeFlowHandler\n\t}\n\n\t// TODO: support \"web\" client secret by using a local web server.\n\t// According to the content in the json, decide whether to run three-legged\n\t// flow (for client secret) or two-legged flow (for service account).\n\tif installed, ok := secret[\"installed\"]; ok {\n\t\t// When the secret contains \"installed\" field, it is a client secret. We\n\t\t// will run a three-legged flow\n\t\tinstalledMap, ok := installed.(map[string]interface{})\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"Malformatted secret json, expected map for param \\\"installed\\\"\")\n\t\t}\n\t\treturn ThreeLeggedClient{installedMap, authorizeHandler}, nil\n\t} else if tokenType, ok := secret[\"type\"]; ok && \"service_account\" == tokenType {\n\t\t// If the token type is \"service_account\", we will run the two-legged flow\n\t\treturn TwoLeggedClient{secret}, nil\n\t} else {\n\t\treturn nil, fmt.Errorf(\"Unsupported token type.\")\n\t}\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\tserviceSid: properties.ServiceSid,\n\t\tsid: properties.Sid,\n\n\t\tMessage: func(messageSid string) *message.Client {\n\t\t\treturn message.New(client, message.ClientProperties{\n\t\t\t\tConversationSid: properties.Sid,\n\t\t\t\tServiceSid: properties.ServiceSid,\n\t\t\t\tSid: messageSid,\n\t\t\t})\n\t\t},\n\t\tMessages: messages.New(client, messages.ClientProperties{\n\t\t\tConversationSid: properties.Sid,\n\t\t\tServiceSid: properties.ServiceSid,\n\t\t}),\n\t\tParticipant: func(participantSid string) *participant.Client {\n\t\t\treturn participant.New(client, participant.ClientProperties{\n\t\t\t\tConversationSid: properties.Sid,\n\t\t\t\tServiceSid: properties.ServiceSid,\n\t\t\t\tSid: participantSid,\n\t\t\t})\n\t\t},\n\t\tParticipants: participants.New(client, participants.ClientProperties{\n\t\t\tConversationSid: properties.Sid,\n\t\t\tServiceSid: properties.ServiceSid,\n\t\t}),\n\t\tWebhook: func(webhookSid string) *webhook.Client {\n\t\t\treturn webhook.New(client, webhook.ClientProperties{\n\t\t\t\tConversationSid: properties.Sid,\n\t\t\t\tServiceSid: properties.ServiceSid,\n\t\t\t\tSid: webhookSid,\n\t\t\t})\n\t\t},\n\t\tWebhooks: webhooks.New(client, webhooks.ClientProperties{\n\t\t\tConversationSid: properties.Sid,\n\t\t\tServiceSid: properties.ServiceSid,\n\t\t}),\n\t}\n}", "func NewClient(name string, initAddrs ...string) (*Client, error) {\n\tc := new(Client)\n\n\tlog.Debugf(\"pd-client: initial pds, pds=<%v>\",\n\t\tinitAddrs)\n\n\tc.name = name\n\tc.addrs = append(c.addrs, initAddrs...)\n\tc.seq = 0\n\n\terr := c.resetConn()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}", "func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {\n\tclientOpts := defaultGRPCClientOptions()\n\tif newClientHook != nil {\n\t\thookOpts, err := newClientHook(ctx, clientHookParams{})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclientOpts = append(clientOpts, hookOpts...)\n\t}\n\n\tconnPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := Client{CallOptions: defaultCallOptions()}\n\n\tc := &gRPCClient{\n\t\tconnPool: connPool,\n\t\tclient: osconfigpb.NewOsConfigServiceClient(connPool),\n\t\tCallOptions: &client.CallOptions,\n\t}\n\tc.setGoogleClientInfo()\n\n\tclient.internalClient = c\n\n\treturn &client, nil\n}", "func New(c rest.Interface) *Clientset {\n\tvar cs Clientset\n\tcs.policiesV1alpha2 = policiesv1alpha2.New(c)\n\n\tcs.DiscoveryClient = discovery.NewDiscoveryClient(c)\n\treturn &cs\n}", "func NewClient(cfg watson.Config) (Client, error) {\n\tci := Client{version: \"/\" + defaultMajorVersion}\n\tif len(cfg.Credentials.ServiceName) == 0 {\n\t\tcfg.Credentials.ServiceName = \"retrieve_and_rank\"\n\t}\n\tif len(cfg.Credentials.Url) == 0 {\n\t\tcfg.Credentials.Url = defaultUrl\n\t}\n\tclient, err := watson.NewClient(cfg.Credentials)\n\tif err != nil {\n\t\treturn Client{}, err\n\t}\n\tci.watsonClient = client\n\treturn ci, nil\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\taccountSid: properties.AccountSid,\n\t\tdomainSid: properties.DomainSid,\n\t\tsid: properties.Sid,\n\t}\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\tserviceSid: properties.ServiceSid,\n\t}\n}", "func NewPolicyStatesClient() PolicyStatesClient {\n\treturn NewPolicyStatesClientWithBaseURI(DefaultBaseURI)\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\tsid: properties.Sid,\n\n\t\tAccessTokens: access_tokens.New(client, access_tokens.ClientProperties{\n\t\t\tServiceSid: properties.Sid,\n\t\t}),\n\t\tEntities: entities.New(client, entities.ClientProperties{\n\t\t\tServiceSid: properties.Sid,\n\t\t}),\n\t\tEntity: func(identity string) *entity.Client {\n\t\t\treturn entity.New(client, entity.ClientProperties{\n\t\t\t\tIdentity: identity,\n\t\t\t\tServiceSid: properties.Sid,\n\t\t\t})\n\t\t},\n\t\tMessagingConfiguration: func(countryCode string) *messaging_configuration.Client {\n\t\t\treturn messaging_configuration.New(client, messaging_configuration.ClientProperties{\n\t\t\t\tCountryCode: countryCode,\n\t\t\t\tServiceSid: properties.Sid,\n\t\t\t})\n\t\t},\n\t\tMessagingConfigurations: messaging_configurations.New(client, messaging_configurations.ClientProperties{\n\t\t\tServiceSid: properties.Sid,\n\t\t}),\n\t\tRateLimit: func(rateLimitSid string) *rate_limit.Client {\n\t\t\treturn rate_limit.New(client, rate_limit.ClientProperties{\n\t\t\t\tServiceSid: properties.Sid,\n\t\t\t\tSid: rateLimitSid,\n\t\t\t})\n\t\t},\n\t\tRateLimits: rate_limits.New(client, rate_limits.ClientProperties{\n\t\t\tServiceSid: properties.Sid,\n\t\t}),\n\t\tVerification: func(verificationSid string) *verification.Client {\n\t\t\treturn verification.New(client, verification.ClientProperties{\n\t\t\t\tServiceSid: properties.Sid,\n\t\t\t\tSid: verificationSid,\n\t\t\t})\n\t\t},\n\t\tVerificationCheck: verification_check.New(client, verification_check.ClientProperties{\n\t\t\tServiceSid: properties.Sid,\n\t\t}),\n\t\tVerifications: verifications.New(client, verifications.ClientProperties{\n\t\t\tServiceSid: properties.Sid,\n\t\t}),\n\t\tWebhook: func(webhookSid string) *webhook.Client {\n\t\t\treturn webhook.New(client, webhook.ClientProperties{\n\t\t\t\tServiceSid: properties.Sid,\n\t\t\t\tSid: webhookSid,\n\t\t\t})\n\t\t},\n\t\tWebhooks: webhooks.New(client, webhooks.ClientProperties{\n\t\t\tServiceSid: properties.Sid,\n\t\t}),\n\t}\n}", "func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {\n\tclientOpts := defaultGRPCClientOptions()\n\tif newClientHook != nil {\n\t\thookOpts, err := newClientHook(ctx, clientHookParams{})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclientOpts = append(clientOpts, hookOpts...)\n\t}\n\n\tconnPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := Client{CallOptions: defaultCallOptions()}\n\n\tc := &gRPCClient{\n\t\tconnPool: connPool,\n\t\tclient: firestorepb.NewFirestoreClient(connPool),\n\t\tCallOptions: &client.CallOptions,\n\t\toperationsClient: longrunningpb.NewOperationsClient(connPool),\n\t}\n\tc.setGoogleClientInfo()\n\n\tclient.internalClient = c\n\n\treturn &client, nil\n}", "func NewClient(api client.KeysAPI) storage.Client {\n\treturn &Client{\n\t\tAPI: api,\n\t}\n}", "func NewClient(config *clientcmdapi.Config, log log.Logger, commandPath string) Client {\n\treturn &client{\n\t\tconfig: config,\n\t\tlog: log,\n\t\thelmPath: commandPath,\n\t}\n}", "func NewClient(userList, getUser, createUser, updateUser, deleteUser goa.Endpoint) *Client {\n\treturn &Client{\n\t\tUserListEndpoint: userList,\n\t\tGetUserEndpoint: getUser,\n\t\tCreateUserEndpoint: createUser,\n\t\tUpdateUserEndpoint: updateUser,\n\t\tDeleteUserEndpoint: deleteUser,\n\t}\n}", "func NewClient(kclient k8s.Client) (*Client, error) {\n\tctx := context.Background()\n\tsecret := &corev1.Secret{}\n\terr := kclient.Get(\n\t\tctx,\n\t\ttypes.NamespacedName{\n\t\t\tName: config.GCPSecretName,\n\t\t\tNamespace: config.OperatorNamespace,\n\t\t},\n\t\tsecret)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't get Secret with credentials %w\", err)\n\t}\n\tserviceAccountJSON, ok := secret.Data[\"service_account.json\"]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"access credentials missing service account\")\n\t}\n\n\t// initialize actual client\n\tc, err := newClient(ctx, serviceAccountJSON)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't create GCP client %s\", err)\n\t}\n\n\t// enchant the client with params required\n\tregion, err := getClusterRegion(kclient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.region = region\n\n\tmasterList, err := baseutils.GetMasterMachines(kclient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.masterList = masterList\n\tinfrastructureName, err := baseutils.GetClusterName(kclient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.clusterName = infrastructureName\n\tbaseDomain, err := baseutils.GetClusterBaseDomain(kclient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.baseDomain = baseDomain\n\n\treturn c, nil\n}", "func NewGroupPolicyDefinition()(*GroupPolicyDefinition) {\n m := &GroupPolicyDefinition{\n Entity: *NewEntity(),\n }\n return m\n}", "func NewClient(opts ...func(c *Client) error) (*Client, error) {\n\tc := &Client{\n\t\tclient: http.DefaultClient,\n\t\tBasePath: basePath,\n\t}\n\tc.common.client = c\n\tfor _, opt := range opts {\n\t\tif err := opt(c); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tc.Run = (*RunService)(&c.common)\n\n\treturn c, nil\n}", "func NewClient() *Client {\n\tclient := &Client{\n\t\tapi: C.Create(),\n\t\tVariables: map[SettableVariable]string{},\n\t\tTrim: true,\n\t\tshouldInit: true,\n\t\tLanguages: []string{\"eng\"},\n\t}\n\treturn client\n}", "func NewGCPClient(keys, projectName string) (*GCPClient, error) {\n\tlog.Debugf(\"Connecting to GCP\")\n\tctx := context.Background()\n\tvar client *GCPClient\n\tif projectName == \"\" {\n\t\treturn nil, fmt.Errorf(\"the project name is not specified\")\n\t}\n\tif keys != \"\" {\n\t\tlog.Debugf(\"Using Keys %s\", keys)\n\t\tf, err := os.Open(keys)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tjsonKey, err := io.ReadAll(f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tconfig, err := google.JWTConfigFromJSON(jsonKey,\n\t\t\tstorage.DevstorageReadWriteScope,\n\t\t\tcompute.ComputeScope,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tclient = &GCPClient{\n\t\t\tclient: config.Client(ctx),\n\t\t\tprojectName: projectName,\n\t\t}\n\t} else {\n\t\tlog.Debugf(\"Using Application Default credentials\")\n\t\tgc, err := google.DefaultClient(\n\t\t\tctx,\n\t\t\tstorage.DevstorageReadWriteScope,\n\t\t\tcompute.ComputeScope,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclient = &GCPClient{\n\t\t\tclient: gc,\n\t\t\tprojectName: projectName,\n\t\t}\n\t}\n\n\tvar err error\n\tclient.compute, err = compute.NewService(ctx, option.WithHTTPClient(client.client))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient.storage, err = storage.NewService(ctx, option.WithHTTPClient(client.client))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Debugf(\"Generating SSH Keypair\")\n\tclient.privKey, err = rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn client, nil\n}", "func NewClient(projectID string) (*Client, error) {\n\tctx := context.Background()\n\toauthClient, err := google.DefaultClient(ctx, dataflow.CloudPlatformScope)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tservice, err := dataflow.New(oauthClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Client{\n\t\tservice: service,\n\t\tprojectID: projectID,\n\t}, nil\n}", "func (f *extendedPodFactory) CreateClient(cfg *rest.Config) (interface{}, error) {\n\treturn f.client, nil\n}", "func NewClient(cfg Config) (*Client, error) {\n\tvar (\n\t\tc Client\n\t\tv *validator.Validate\n\t\terr error\n\t)\n\n\tv = validator.New()\n\n\terr = v.Struct(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.cfg = cfg\n\tc.clntCredCfg = clientcredentials.Config{\n\t\tClientID: cfg.ClientID,\n\t\tClientSecret: cfg.ClientSecret,\n\t}\n\n\tc.oauth = OAuth{\n\t\tClientID: cfg.ClientID,\n\t\tClientSecret: cfg.ClientSecret,\n\t}\n\n\terr = c.SetRegionParameters(cfg.Region, cfg.Locale)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &c, nil\n}", "func (o *ClientConfig) NewClient(options ...ClientOption) (Client, error) {\n\n\t// Run provided ClientOption configuration options.\n\tfor _, opt := range options {\n\t\terr := opt(o)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed applying functional option: %w\", err)\n\t\t}\n\t}\n\n\t// Check mandatory option is provided.\n\tif o.githubUserClient == nil {\n\t\treturn nil, fmt.Errorf(\"github client not provided\")\n\t}\n\n\ttokenGenerator := secret.GetTokenGenerator(o.tokenPath)\n\n\tgitFactory, err := o.GitClient(o.githubUserClient, tokenGenerator, secret.Censor, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgitClient := &client{}\n\t// Initialize map to enable writing to it in methods.\n\tgitClient.clonedRepos = make(map[string]string)\n\tgitClient.ClientFactory = gitFactory\n\treturn gitClient, err\n}", "func NewClient(cacheDir string, quiet bool, policyBundleRepo string, opts ...Option) (*Client, error) {\n\to := &options{\n\t\tclock: clock.RealClock{},\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(o)\n\t}\n\n\tif policyBundleRepo == \"\" {\n\t\tpolicyBundleRepo = fmt.Sprintf(\"%s:%d\", BundleRepository, BundleVersion)\n\t}\n\n\treturn &Client{\n\t\toptions: o,\n\t\tpolicyDir: filepath.Join(cacheDir, \"policy\"),\n\t\tpolicyBundleRepo: policyBundleRepo,\n\t\tquiet: quiet,\n\t}, nil\n}", "func NewClient(cfg *rest.Config) (versioned.Interface, error) {\n\tglog.Info(\"NewClient()\")\n\tscheme := runtime.NewScheme()\n\tif err := v1.AddToScheme(scheme); err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig := *cfg\n\tconfig.GroupVersion = &v1.SchemeGroupVersion\n\tconfig.APIPath = \"/apis\"\n\tconfig.ContentType = runtime.ContentTypeJSON\n\tconfig.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: serializer.NewCodecFactory(scheme)}\n\n\tcs, err := versioned.NewForConfig(&config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cs, nil\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\taccountSid: properties.AccountSid,\n\t\tcallSid: properties.CallSid,\n\t}\n}", "func NewClient(kubeConfig string, opts ...ClientOption) (*Client, error) {\n\toptions := clientOptions{ // Default options\n\t\tNamespace: \"default\",\n\t\tDriver: \"secrets\",\n\t\tDebugLog: func(format string, v ...interface{}) {},\n\t}\n\tfor _, opt := range opts {\n\t\topt.apply(&options)\n\t}\n\t// Create actionConfig, which will be used for all actions of this helm client.\n\tactionConfig := new(action.Configuration)\n\tclientGetter := &restClientGetter{\n\t\tNamespace: options.Namespace,\n\t\tKubeConfig: kubeConfig,\n\t}\n\tif err := actionConfig.Init(clientGetter, options.Namespace, options.Driver, options.DebugLog); err != nil {\n\t\treturn nil, err\n\t}\n\tc := &Client{\n\t\tkubeConfig: kubeConfig,\n\t\toptions: options,\n\t\tactionConfig: actionConfig,\n\t\trepoFile: repo.NewFile(),\n\t}\n\tif err := c.setupDirs(); err != nil {\n\t\tc.Close()\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}", "func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {\n\tclientOpts := defaultGRPCClientOptions()\n\tif newClientHook != nil {\n\t\thookOpts, err := newClientHook(ctx, clientHookParams{})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclientOpts = append(clientOpts, hookOpts...)\n\t}\n\n\tconnPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := Client{CallOptions: defaultCallOptions()}\n\n\tc := &gRPCClient{\n\t\tconnPool: connPool,\n\t\tclient: cloudtaskspb.NewCloudTasksClient(connPool),\n\t\tCallOptions: &client.CallOptions,\n\t\tlocationsClient: locationpb.NewLocationsClient(connPool),\n\t}\n\tc.setGoogleClientInfo()\n\n\tclient.internalClient = c\n\n\treturn &client, nil\n}", "func (client *PolicyDefinitionsClient) listCreateRequest(ctx context.Context, options *PolicyDefinitionsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewClient(conn *rpcc.Conn) *domainClient {\n\treturn &domainClient{conn: conn}\n}", "func NewClient(conn *rpcc.Conn) *domainClient {\n\treturn &domainClient{conn: conn}\n}", "func NewClient(conn *rpcc.Conn) *domainClient {\n\treturn &domainClient{conn: conn}\n}", "func newClient(configuration *Configuration, options ...ClientOption) (Client, error) {\n\tclientCfg, err := newTLSClientConfig(configuration)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading and/or parsing the certification files. Cause: %w\", err)\n\t}\n\n\tnetClient := http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: clientCfg,\n\t\t},\n\t}\n\n\tinstance := &client{client: &netClient, configuration: configuration, encoder: newJSONEncoder(), decoder: newJSONDecoder()}\n\n\t// Apply options if there are any, can overwrite default\n\tfor _, option := range options {\n\t\toption(instance)\n\t}\n\n\treturn instance, nil\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\taccountSid: properties.AccountSid,\n\t\tcountryCode: properties.CountryCode,\n\t}\n}", "func NewAccountQuotaPolicyClient(subscriptionID string) AccountQuotaPolicyClient {\n return NewAccountQuotaPolicyClientWithBaseURI(DefaultBaseURI, subscriptionID)\n}", "func NewClient(graphql GraphQLClient, opts ...ClientOption) *Client {\n\tc := &Client{graphQLAPI: graphql}\n\tfor _, opt := range opts {\n\t\topt(c)\n\t}\n\treturn c\n}", "func New() (*Client, error) {\n\tvar client Client\n\tvar err error\n\n\t// initialize client-go clients\n\tloadingRules := clientcmd.NewDefaultClientConfigLoadingRules()\n\tconfigOverrides := &clientcmd.ConfigOverrides{}\n\tclient.KubeConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)\n\n\tclient.KubeClientConfig, err = client.KubeConfig.ClientConfig()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, errorMsg)\n\t}\n\n\tclient.KubeClient, err = kubernetes.NewForConfig(client.KubeClientConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient.Namespace, _, err = client.KubeConfig.Namespace()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient.OperatorClient, err = operatorsclientset.NewForConfig(client.KubeClientConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient.DynamicClient, err = dynamic.NewForConfig(client.KubeClientConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &client, nil\n}", "func NewClient() (*Client, error) {\n\tpipePath := client.PipePath(GroupName, Version)\n\treturn NewClientWithPipePath(pipePath)\n}", "func NewClient(c credentials) Client {\n\treturn Client{\n\t\tcredentials: c,\n\t\tAPIVersion: defaultAPIVersion,\n\t\tEndpoint: defaultEndpoint,\n\t\tDebug: false,\n\t\thttpClient: &http.Client{},\n\t\tLogger: defaultLogger,\n\t}\n}", "func NewClient(httpClient *http.Client) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tc := &Client{}\n\n\tbaseURL, _ := url.Parse(DefaultBaseURL)\n\tvsspsBaseURL, _ := url.Parse(DefaultVsspsBaseURL)\n\tvsaexBaseURL, _ := url.Parse(DefaultVsaexBaseURL)\n\n\tc.client = httpClient\n\tc.BaseURL = *baseURL\n\tc.VsspsBaseURL = *vsspsBaseURL\n\tc.VsaexBaseURL = *vsaexBaseURL\n\tc.UserAgent = userAgent\n\n\tc.Boards = &BoardsService{client: c}\n\tc.BuildDefinitions = &BuildDefinitionsService{client: c}\n\tc.Builds = &BuildsService{client: c}\n\tc.DeliveryPlans = &DeliveryPlansService{client: c}\n\tc.Favourites = &FavouritesService{client: c}\n\tc.Git = &GitService{client: c}\n\tc.Iterations = &IterationsService{client: c}\n\tc.PolicyEvaluations = &PolicyEvaluationsService{client: c}\n\tc.PullRequests = &PullRequestsService{client: c}\n\tc.Teams = &TeamsService{client: c}\n\tc.Tests = &TestsService{client: c}\n\tc.Users = &UsersService{client: c}\n\tc.UserEntitlements = &UserEntitlementsService{client: c}\n\tc.WorkItems = &WorkItemsService{client: c}\n\n\treturn c, nil\n}", "func NewHCRPAssignmentsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*HCRPAssignmentsClient, error) {\n\tcl, err := arm.NewClient(moduleName+\".HCRPAssignmentsClient\", moduleVersion, credential, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := &HCRPAssignmentsClient{\n\t\tsubscriptionID: subscriptionID,\n\t\tinternal: cl,\n\t}\n\treturn client, nil\n}", "func NewCreatePolicyParamsWithHTTPClient(client *http.Client) *CreatePolicyParams {\n\tvar ()\n\treturn &CreatePolicyParams{\n\t\tHTTPClient: client,\n\t}\n}", "func New(ctx context.Context, credentials, token string, opts ...ClientOption) (*Client, error) {\r\n\r\n\tclient := &Client{\r\n\t\tscope: \"https://www.googleapis.com/auth/spreadsheets.readonly\",\r\n\t}\r\n\r\n\tfor _, opt := range opts {\r\n\t\tclient = opt(client)\r\n\t}\r\n\r\n\treturn new(ctx, credentials, token, client)\r\n}", "func NewClient(clientType int, credentials Credentials) (*Client, error) {\n\tvar client Client\n\n\tswitch clientType {\n\tcase ClientTypeATT:\n\t\tclient = ATTClient{\n\t\t\tcredentials: credentials,\n\t\t}\n\tcase ClientTypeVerizon:\n\t\tclient = VerizonClient{\n\t\t\tcredentials: credentials,\n\t\t}\n\t}\n\n\treturn &client, nil\n}", "func NewClient(cfg *rest.Config) (versioned.Interface, error) {\n\tscheme := runtime.NewScheme()\n\tif err := api.AddToScheme(scheme); err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig := *cfg\n\tconfig.GroupVersion = &api.SchemeGroupVersion\n\tconfig.APIPath = \"/apis\"\n\tconfig.ContentType = runtime.ContentTypeJSON\n\tconfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: serializer.NewCodecFactory(scheme)}\n\n\tcs, err := versioned.NewForConfig(&config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cs, nil\n}", "func NewClient(registry *syncbase.Registry, dispatcher orchestrator.Dispatcher) ConfigClient {\n\treturn &client{\n\t\tregistry: registry,\n\t\tdispatcher: dispatcher,\n\t}\n}", "func NewClient(list, create, show, update, delete_ goa.Endpoint) *Client {\n\treturn &Client{\n\t\tListEndpoint: list,\n\t\tCreateEndpoint: create,\n\t\tShowEndpoint: show,\n\t\tUpdateEndpoint: update,\n\t\tDeleteEndpoint: delete_,\n\t}\n}", "func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {\n\tclientOpts := defaultGRPCClientOptions()\n\tif newClientHook != nil {\n\t\thookOpts, err := newClientHook(ctx, clientHookParams{})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclientOpts = append(clientOpts, hookOpts...)\n\t}\n\n\tconnPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := Client{CallOptions: defaultCallOptions()}\n\n\tc := &gRPCClient{\n\t\tconnPool: connPool,\n\t\tclient: serviceusagepb.NewServiceUsageClient(connPool),\n\t\tCallOptions: &client.CallOptions,\n\t}\n\tc.setGoogleClientInfo()\n\n\tclient.internalClient = c\n\n\tclient.LROClient, err = lroauto.NewOperationsClient(ctx, gtransport.WithConnPool(connPool))\n\tif err != nil {\n\t\t// This error \"should not happen\", since we are just reusing old connection pool\n\t\t// and never actually need to dial.\n\t\t// If this does happen, we could leak connp. However, we cannot close conn:\n\t\t// If the user invoked the constructor with option.WithGRPCConn,\n\t\t// we would close a connection that's still in use.\n\t\t// TODO: investigate error conditions.\n\t\treturn nil, err\n\t}\n\tc.LROClient = &client.LROClient\n\treturn &client, nil\n}", "func NewClient(conf ClientConfig) (*Client, error) {\n\tkubeClient, kubeConfig, err := getKubeClient(conf.DNSAddress)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\ttunnel, err := portforwarder.New(\"kube-system\", kubeClient, kubeConfig)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\toptions := []helm.Option{\n\t\thelm.Host(fmt.Sprintf(\"127.0.0.1:%d\", tunnel.Local)),\n\t}\n\treturn &Client{\n\t\tclient: helm.NewClient(options...),\n\t\ttunnel: tunnel,\n\t}, nil\n}", "func NewPipelinesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*PipelinesClient, error) {\n\tcl, err := arm.NewClient(moduleName+\".PipelinesClient\", moduleVersion, credential, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := &PipelinesClient{\n\t\tsubscriptionID: subscriptionID,\n\t\tinternal: cl,\n\t}\n\treturn client, nil\n}", "func newClientFromFlags(fs tbnflag.FlagSet) client {\n\treturn &clientImpl{}\n}", "func NewPermissionsClient(credential azcore.TokenCredential, options *arm.ClientOptions) *PermissionsClient {\n\tcp := arm.ClientOptions{}\n\tif options != nil {\n\t\tcp = *options\n\t}\n\tif len(cp.Endpoint) == 0 {\n\t\tcp.Endpoint = arm.AzurePublicCloud\n\t}\n\tclient := &PermissionsClient{\n\t\thost: string(cp.Endpoint),\n\t\tpl: armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, &cp),\n\t}\n\treturn client\n}", "func New(clientID string, options ...Option) (Client, error) {\n\topts := clientOptions{\n\t\tauthority: base.AuthorityPublicCloud,\n\t\thttpClient: shared.DefaultClient,\n\t}\n\n\tfor _, o := range options {\n\t\to(&opts)\n\t}\n\tif err := opts.validate(); err != nil {\n\t\treturn Client{}, err\n\t}\n\n\tbase, err := base.New(clientID, opts.authority, oauth.New(opts.httpClient), base.WithCacheAccessor(opts.accessor), base.WithClientCapabilities(opts.capabilities), base.WithInstanceDiscovery(!opts.disableInstanceDiscovery))\n\tif err != nil {\n\t\treturn Client{}, err\n\t}\n\treturn Client{base}, nil\n}", "func NewClient(t string) *gophercloud.ServiceClient {\n\tvar err error\n\tao, region, err := authMethod()\n\tif err != nil {\n\t\tfmt.Printf(\"Error retrieving authentication credentials: %s\\n\", err)\n\t}\n\tif ao.IdentityEndpoint == \"\" {\n\t\tao.IdentityEndpoint = rackspace.RackspaceUSIdentity\n\t}\n\tpc, err := rackspace.AuthenticatedClient(ao)\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating ProviderClient: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tvar sc *gophercloud.ServiceClient\n\tswitch t {\n\tcase \"compute\":\n\t\tsc, err = rackspace.NewComputeV2(pc, gophercloud.EndpointOpts{\n\t\t\tRegion: region,\n\t\t})\n\t\tbreak\n\tcase \"blockstorage\":\n\t\tsc, err = rackspace.NewBlockStorageV1(pc, gophercloud.EndpointOpts{\n\t\t\tRegion: region,\n\t\t})\n\t\tbreak\n\tcase \"networking\":\n\t\tsc, err = rackspace.NewNetworkV2(pc, gophercloud.EndpointOpts{\n\t\t\tRegion: region,\n\t\t})\n\t\tbreak\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating ServiceClient (%s): %s\\n\", err, t)\n\t\tos.Exit(1)\n\t}\n\t// sc.UserAgent.Prepend(\"rack/\" + util.Version)\n\treturn sc\n}", "func NewClient(config *Settings) (*PlatformClient, error) {\n\tif err := config.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\tcfClient, err := config.CF.CFClientProvider(&config.CF.Config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &PlatformClient{\n\t\tclient: cfClient,\n\t\tsettings: config,\n\t\tplanResolver: NewPlanResolver(),\n\t}, nil\n}", "func NewPeeringPoliciesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*PeeringPoliciesClient, error) {\n\tcl, err := arm.NewClient(moduleName+\".PeeringPoliciesClient\", moduleVersion, credential, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := &PeeringPoliciesClient{\n\t\tsubscriptionID: subscriptionID,\n\t\tinternal: cl,\n\t}\n\treturn client, nil\n}", "func NewClient(config *Configuration) (*Client, error) {\n\t// Check that authorization values are defined at all\n\tif config.AuthorizationHeaderToken == \"\" {\n\t\treturn nil, fmt.Errorf(\"No authorization is defined. You need AuthorizationHeaderToken\")\n\t}\n\n\tif config.ApplicationID == \"\" {\n\t\treturn nil, fmt.Errorf(\"ApplicationID is required - this is the only way to identify your requests in highwinds logs\")\n\t}\n\n\t// Configure the client from final configuration\n\tc := &Client{\n\t\tc: http.DefaultClient,\n\t\tDebug: config.Debug,\n\t\tApplicationID: config.ApplicationID,\n\t\tIdentity: &identity.Identification{\n\t\t\tAuthorizationHeaderToken: config.AuthorizationHeaderToken,\n\t\t},\n\t}\n\n\t// TODO eventually instantiate a custom client but not ready for that yet\n\n\t// Configure timeout on default client\n\tif config.Timeout == 0 {\n\t\tc.c.Timeout = time.Second * 10\n\t} else {\n\t\tc.c.Timeout = time.Second * time.Duration(config.Timeout)\n\t}\n\n\t// Set default headers\n\tc.Headers = c.GetHeaders()\n\treturn c, nil\n}", "func NewClient(config *Config) (c *Client, err error) {\n\tif config == nil {\n\t\treturn nil, errClientConfigNil\n\t}\n\n\tc = &Client{\n\t\trevocationTransport: http.DefaultTransport,\n\t}\n\n\tif c.transport, err = ghinstallation.NewAppsTransport(\n\t\thttp.DefaultTransport,\n\t\tint64(config.AppID),\n\t\t[]byte(config.PrvKey),\n\t); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c.url, err = url.ParseRequestURI(fmt.Sprintf(\n\t\t\"%s/app/installations/%v/access_tokens\",\n\t\tstrings.TrimSuffix(fmt.Sprint(config.BaseURL), \"/\"),\n\t\tconfig.InsID,\n\t)); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c.revocationURL, err = url.ParseRequestURI(fmt.Sprintf(\n\t\t\"%s/installation/token\",\n\t\tstrings.TrimSuffix(fmt.Sprint(config.BaseURL), \"/\"),\n\t)); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}", "func NewClient(opts *cli.Options) (pulsar.Client, error) {\n\tclientOpts := pulsar.ClientOptions{\n\t\tURL: opts.Pulsar.Address,\n\t\tOperationTimeout: 30 * time.Second,\n\t\tConnectionTimeout: opts.Pulsar.ConnectTimeout,\n\t\tTLSAllowInsecureConnection: opts.Pulsar.InsecureTLS,\n\t}\n\n\tif opts.Pulsar.AuthCertificateFile != \"\" && opts.Pulsar.AuthKeyFile != \"\" {\n\t\tclientOpts.Authentication = pulsar.NewAuthenticationTLS(opts.Pulsar.AuthCertificateFile, opts.Pulsar.AuthKeyFile)\n\t}\n\n\tclient, err := pulsar.NewClient(clientOpts)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Could not instantiate Pulsar client\")\n\t}\n\n\treturn client, nil\n}", "func NewClient() (*Client, error) {\n\tpath, err := exec.LookPath(\"helm\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"helm must be installed and available in path: %s\", err.Error())\n\t}\n\tklog.V(3).Infof(\"found helm at %s\", path)\n\treturn &Client{path}, nil\n}", "func NewClient(healthcheck, listDevices, createDevice, updateCharge, getChargeHistory, updateDevice goa.Endpoint) *Client {\n\treturn &Client{\n\t\tHealthcheckEndpoint: healthcheck,\n\t\tListDevicesEndpoint: listDevices,\n\t\tCreateDeviceEndpoint: createDevice,\n\t\tUpdateChargeEndpoint: updateCharge,\n\t\tGetChargeHistoryEndpoint: getChargeHistory,\n\t\tUpdateDeviceEndpoint: updateDevice,\n\t}\n}", "func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {\n\tclientOpts := defaultGRPCClientOptions()\n\tif newClientHook != nil {\n\t\thookOpts, err := newClientHook(ctx, clientHookParams{})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclientOpts = append(clientOpts, hookOpts...)\n\t}\n\n\tconnPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := Client{CallOptions: defaultCallOptions()}\n\n\tc := &gRPCClient{\n\t\tconnPool: connPool,\n\t\tclient: netapppb.NewNetAppClient(connPool),\n\t\tCallOptions: &client.CallOptions,\n\t\toperationsClient: longrunningpb.NewOperationsClient(connPool),\n\t\tlocationsClient: locationpb.NewLocationsClient(connPool),\n\t}\n\tc.setGoogleClientInfo()\n\n\tclient.internalClient = c\n\n\tclient.LROClient, err = lroauto.NewOperationsClient(ctx, gtransport.WithConnPool(connPool))\n\tif err != nil {\n\t\t// This error \"should not happen\", since we are just reusing old connection pool\n\t\t// and never actually need to dial.\n\t\t// If this does happen, we could leak connp. However, we cannot close conn:\n\t\t// If the user invoked the constructor with option.WithGRPCConn,\n\t\t// we would close a connection that's still in use.\n\t\t// TODO: investigate error conditions.\n\t\treturn nil, err\n\t}\n\tc.LROClient = &client.LROClient\n\treturn &client, nil\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\taccountSid: properties.AccountSid,\n\t\tsid: properties.Sid,\n\t}\n}", "func NewClient(options ...DNSOption) networkservice.NetworkServiceClient {\n\tvar c = &dnsContextClient{\n\t\tchainContext: context.Background(),\n\t\tdefaultNameServerIP: \"127.0.0.1\",\n\t\tresolveConfigPath: \"/etc/resolv.conf\",\n\t}\n\tfor _, o := range options {\n\t\to.apply(c)\n\t}\n\n\tc.initialize()\n\n\treturn c\n}", "func NewClient(id string, wsClient ws.WsClient, dispatcher ClientDispatcher, stateHandler ClientState, profiles ...*ocpp.Profile) *Client {\n\tendpoint := Endpoint{}\n\tif wsClient == nil {\n\t\tpanic(\"wsClient parameter cannot be nil\")\n\t}\n\tfor _, profile := range profiles {\n\t\tendpoint.AddProfile(profile)\n\t}\n\tif dispatcher == nil {\n\t\tdispatcher = NewDefaultClientDispatcher(NewFIFOClientQueue(10))\n\t}\n\tif stateHandler == nil {\n\t\tstateHandler = NewClientState()\n\t}\n\tdispatcher.SetNetworkClient(wsClient)\n\tdispatcher.SetPendingRequestState(stateHandler)\n\treturn &Client{Endpoint: endpoint, client: wsClient, Id: id, dispatcher: dispatcher, RequestState: stateHandler}\n}", "func NewClient(health, deposit, withdraw, transfer, balance, adminWallets goa.Endpoint) *Client {\n\treturn &Client{\n\t\tHealthEndpoint: health,\n\t\tDepositEndpoint: deposit,\n\t\tWithdrawEndpoint: withdraw,\n\t\tTransferEndpoint: transfer,\n\t\tBalanceEndpoint: balance,\n\t\tAdminWalletsEndpoint: adminWallets,\n\t}\n}", "func NewClient(batchAvailabilityLookup, checkAvailability, createBooking, updateBooking, getBookingStatus, listBookings goa.Endpoint) *Client {\n\treturn &Client{\n\t\tBatchAvailabilityLookupEndpoint: batchAvailabilityLookup,\n\t\tCheckAvailabilityEndpoint: checkAvailability,\n\t\tCreateBookingEndpoint: createBooking,\n\t\tUpdateBookingEndpoint: updateBooking,\n\t\tGetBookingStatusEndpoint: getBookingStatus,\n\t\tListBookingsEndpoint: listBookings,\n\t}\n}", "func NewClient(token string) *Client {\n\tc := Client{\n\t\ttoken: token,\n\t\thclient: retryablehttp.NewClient(),\n\t}\n\n\t// set up http client\n\tc.hclient.Logger = nil\n\tc.hclient.ErrorHandler = c.errorHandler\n\tc.hclient.RetryMax = retryLimit\n\tc.hclient.RetryWaitMin = maxRateLimit / 3\n\tc.hclient.RetryWaitMax = maxRateLimit\n\n\t// add services\n\tc.Account = &AccountService{&c}\n\tc.Server = &ServerService{&c}\n\tc.Transaction = &TransactionService{&c}\n\tc.CreateOptions = &CreateOptionsService{&c}\n\tc.SSHKey = &SSHKeyService{&c}\n\n\treturn &c\n}", "func NewClient(ctx *pulumi.Context,\n\tname string, args *ClientArgs, opts ...pulumi.ResourceOption) (*Client, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Brand == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Brand'\")\n\t}\n\tif args.DisplayName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'DisplayName'\")\n\t}\n\tsecrets := pulumi.AdditionalSecretOutputs([]string{\n\t\t\"secret\",\n\t})\n\topts = append(opts, secrets)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Client\n\terr := ctx.RegisterResource(\"gcp:iap/client:Client\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewClient(modifiers ...RequestModifier) *Client {\n\treturn &Client{modifiers}\n}", "func New(clientSet apimachinery.ClientSetInterface) Factory {\n\treturn &factory{\n\t\tclientSet: clientSet,\n\t}\n}", "func NewClient(opts ClientOptions, client *http.Client) (Client, error) {\n\tif err := opts.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif opts.ResourceType == \"\" {\n\t\topts.ResourceType = DefaultResourceType\n\t}\n\tif opts.ResourceID == \"\" {\n\t\tvar err error\n\t\thostname, err := os.Hostname()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\topts.ResourceID = hostname\n\t}\n\tif opts.LogID == \"\" {\n\t\treturn nil, errors.New(\"cloudlogging: no LogID is provided\")\n\t}\n\n\tservice, err := cloudlog.New(client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif opts.UserAgent != \"\" {\n\t\tservice.UserAgent = opts.UserAgent\n\t}\n\n\tc := clientImpl{\n\t\tClientOptions: &opts,\n\t\tservice: cloudlog.NewProjectsLogsEntriesService(service),\n\t\tcommonLabels: make(map[string]string, len(opts.CommonLabels)),\n\t}\n\tfor k, v := range opts.CommonLabels {\n\t\tc.commonLabels[k] = v\n\t}\n\tif c.ResourceType != \"\" {\n\t\tc.commonLabels[\"compute.googleapis.com/resource_type\"] = c.ResourceType\n\t}\n\tif c.ResourceID != \"\" {\n\t\tc.commonLabels[\"compute.googleapis.com/resource_id\"] = c.ResourceID\n\t}\n\treturn &c, nil\n}", "func NewClient(options ...ClientOption) (*Client, error) {\n\tc := &Client{\n\t\thttpClient: http.DefaultClient,\n\t\tbaseURL: DefaultBaseURL,\n\t\tlangs: append([]Lang{DefaultLang}, Langs...),\n\t}\n\tfor _, o := range options {\n\t\to(c)\n\t}\n\ttags := make([]language.Tag, len(c.langs))\n\tfor i, lang := range c.langs {\n\t\ttag, err := language.Parse(string(lang))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttags[i] = tag\n\t}\n\tc.matcher = language.NewMatcher(tags)\n\treturn c, nil\n}", "func (auth ClientCredentialsAuth) NewClient(ctx context.Context) Client {\n\tclient := auth.config.Client(ctx)\n\treturn Client{\n\t\tclient: client,\n\t\tbaseURL: BaseURL,\n\t}\n}", "func NewClient(batchGetWaitEstimates, createWaitlistEntry, getWaitlistEntry, deleteWaitlistEntry goa.Endpoint) *Client {\n\treturn &Client{\n\t\tBatchGetWaitEstimatesEndpoint: batchGetWaitEstimates,\n\t\tCreateWaitlistEntryEndpoint: createWaitlistEntry,\n\t\tGetWaitlistEntryEndpoint: getWaitlistEntry,\n\t\tDeleteWaitlistEntryEndpoint: deleteWaitlistEntry,\n\t}\n}", "func NewDomainsClient(tenantId string) *DomainsClient {\n\treturn &DomainsClient{\n\t\tBaseClient: NewClient(Version10, tenantId),\n\t}\n}", "func NewProcessesClient(subscriptionID string) ProcessesClient {\n\treturn NewProcessesClientWithBaseURI(DefaultBaseURI, subscriptionID)\n}", "func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {\n\tclientOpts := defaultGRPCClientOptions()\n\tif newClientHook != nil {\n\t\thookOpts, err := newClientHook(ctx, clientHookParams{})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclientOpts = append(clientOpts, hookOpts...)\n\t}\n\n\tdisableDeadlines, err := checkDisableDeadlines()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconnPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := Client{CallOptions: defaultCallOptions()}\n\n\tc := &gRPCClient{\n\t\tconnPool: connPool,\n\t\tdisableDeadlines: disableDeadlines,\n\t\tclient: bigtablepb.NewBigtableClient(connPool),\n\t\tCallOptions: &client.CallOptions,\n\t}\n\tc.setGoogleClientInfo()\n\n\tclient.internalClient = c\n\n\treturn &client, nil\n}", "func NewClient(protocol Protocol, pool Pool) (Client, error) {\n\tfactory, ok := clients[protocol]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"client for protocol '%v' does not exist\", protocol)\n\t}\n\n\treturn factory(pool)\n}", "func newClient() (*storage.Client, error) {\n\tctx := context.Background()\n\n\tbyteKey, err := gcloud.GetDecodedKey()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get gcp key, err: %w\", err)\n\t}\n\tclient, err := storage.NewClient(ctx, option.WithCredentialsJSON(byteKey))\n\tif err != nil {\n\t\tlog.Println(\"failed to login with GCP_KEY, trying with default application credentials...\")\n\t\tclient, err = storage.NewClient(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to open Google Cloud Storage client: %w\", err)\n\t\t}\n\t}\n\n\treturn client, nil\n}", "func (g *Gitlab) NewClient(ctx context.Context, store esv1alpha1.GenericStore, kube kclient.Client, namespace string) (provider.SecretsClient, error) {\n\tstoreSpec := store.GetSpec()\n\tif storeSpec == nil || storeSpec.Provider == nil || storeSpec.Provider.Gitlab == nil {\n\t\treturn nil, fmt.Errorf(\"no store type or wrong store type\")\n\t}\n\tstoreSpecGitlab := storeSpec.Provider.Gitlab\n\n\tcliStore := gClient{\n\t\tkube: kube,\n\t\tstore: storeSpecGitlab,\n\t\tnamespace: namespace,\n\t\tstoreKind: store.GetObjectKind().GroupVersionKind().Kind,\n\t}\n\n\tif err := cliStore.setAuth(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar err error\n\n\t// Create client options\n\tvar opts []gitlab.ClientOptionFunc\n\tif cliStore.store.URL != \"\" {\n\t\topts = append(opts, gitlab.WithBaseURL(cliStore.store.URL))\n\t}\n\t// ClientOptionFunc from the gitlab package can be mapped with the CRD\n\t// in a similar way to extend functionality of the provider\n\n\t// Create a new Gitlab client using credentials and options\n\tgitlabClient, err := gitlab.NewClient(string(cliStore.credentials), opts...)\n\tif err != nil {\n\t\tlog.Logf(\"Failed to create client: %v\", err)\n\t}\n\n\tg.client = gitlabClient.ProjectVariables\n\tg.projectID = cliStore.store.ProjectID\n\n\treturn g, nil\n}", "func NewClient(ctx context.Context, cfg ClientConfig) (*Client, error) {\n\tconfig := &tfe.Config{\n\t\tToken: cfg.Token,\n\t}\n\ttfeClient, err := tfe.NewClient(config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create a new TFE tfeClient: %w\", err)\n\t}\n\n\tw, err := tfeClient.Workspaces.Read(ctx, cfg.Organization, cfg.Workspace)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not retrieve workspace '%v/%v': %w\", cfg.Organization, cfg.Workspace, err)\n\t}\n\n\tc := Client{\n\t\tclient: tfeClient,\n\t\tworkspace: w,\n\t}\n\treturn &c, nil\n}", "func NewClient(config *sdk.Config, credential *auth.Credential) *Client {\n\tvar handler sdk.RequestHandler = func(c *sdk.Client, req request.Common) (request.Common, error) {\n\t\terr := req.SetProjectId(PickResourceID(req.GetProjectId()))\n\t\treturn req, err\n\t}\n\tvar (\n\t\tuaccountClient = *uaccount.NewClient(config, credential)\n\t\tuhostClient = *uhost.NewClient(config, credential)\n\t\tunetClient = *unet.NewClient(config, credential)\n\t\tvpcClient = *vpc.NewClient(config, credential)\n\t\tudpnClient = *udpn.NewClient(config, credential)\n\t\tpathxClient = *pathx.NewClient(config, credential)\n\t\tudiskClient = *udisk.NewClient(config, credential)\n\t\tulbClient = *ulb.NewClient(config, credential)\n\t\tudbClient = *udb.NewClient(config, credential)\n\t\tumemClient = *umem.NewClient(config, credential)\n\t\tuphostClient = *uphost.NewClient(config, credential)\n\t\tpuhostClient = *puhost.NewClient(config, credential)\n\t\tpudbClient = *pudb.NewClient(config, credential)\n\t\tpumemClient = *pumem.NewClient(config, credential)\n\t\tppathxClient = *ppathx.NewClient(config, credential)\n\t)\n\n\tuaccountClient.Client.AddRequestHandler(handler)\n\tuhostClient.Client.AddRequestHandler(handler)\n\tunetClient.Client.AddRequestHandler(handler)\n\tvpcClient.Client.AddRequestHandler(handler)\n\tudpnClient.Client.AddRequestHandler(handler)\n\tpathxClient.Client.AddRequestHandler(handler)\n\tudiskClient.Client.AddRequestHandler(handler)\n\tulbClient.Client.AddRequestHandler(handler)\n\tudbClient.Client.AddRequestHandler(handler)\n\tumemClient.Client.AddRequestHandler(handler)\n\tuphostClient.Client.AddRequestHandler(handler)\n\tpuhostClient.Client.AddRequestHandler(handler)\n\tpudbClient.Client.AddRequestHandler(handler)\n\tpumemClient.Client.AddRequestHandler(handler)\n\tppathxClient.Client.AddRequestHandler(handler)\n\n\treturn &Client{\n\t\tuaccountClient,\n\t\tuhostClient,\n\t\tunetClient,\n\t\tvpcClient,\n\t\tudpnClient,\n\t\tpathxClient,\n\t\tudiskClient,\n\t\tulbClient,\n\t\tudbClient,\n\t\tumemClient,\n\t\tuphostClient,\n\t\tpuhostClient,\n\t\tpudbClient,\n\t\tpumemClient,\n\t\tppathxClient,\n\t}\n}" ]
[ "0.5942576", "0.573209", "0.57141393", "0.54691076", "0.53663653", "0.5332414", "0.5331938", "0.53184956", "0.53092986", "0.52872276", "0.5274721", "0.52648884", "0.52600336", "0.5255136", "0.5240479", "0.52287835", "0.51977056", "0.5182397", "0.51636803", "0.5156131", "0.5116386", "0.51113915", "0.5105239", "0.50993115", "0.50757015", "0.5075413", "0.507471", "0.5074036", "0.50378627", "0.5020879", "0.50100976", "0.49984142", "0.4997607", "0.49972013", "0.49884763", "0.4982421", "0.49777135", "0.49741006", "0.49663725", "0.4962417", "0.49534974", "0.4947993", "0.49471518", "0.49378332", "0.49305797", "0.49293706", "0.49263668", "0.49263668", "0.49263668", "0.49243793", "0.492412", "0.49188536", "0.49185416", "0.49184158", "0.49164593", "0.49143887", "0.49129698", "0.49074313", "0.49062622", "0.4897761", "0.48948616", "0.4894256", "0.48908487", "0.48842347", "0.48840714", "0.48835513", "0.488216", "0.48792246", "0.48745388", "0.48727855", "0.48701784", "0.48679662", "0.48658457", "0.4863561", "0.48624408", "0.48524603", "0.4849794", "0.48478934", "0.48435977", "0.48368266", "0.48366392", "0.48352078", "0.48331523", "0.48309857", "0.48286328", "0.48226783", "0.4819294", "0.48162434", "0.48145646", "0.4814161", "0.48112723", "0.48107338", "0.48077616", "0.48054874", "0.48044482", "0.48041838", "0.48023784", "0.47993535", "0.47984073", "0.4794498" ]
0.80434936
0
createOrUpdateCreateRequest creates the CreateOrUpdate request.
func (client *PolicyDefinitionsClient) createOrUpdateCreateRequest(ctx context.Context, policyDefinitionName string, parameters PolicyDefinition, options *PolicyDefinitionsCreateOrUpdateOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}" if policyDefinitionName == "" { return nil, errors.New("parameter policyDefinitionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{policyDefinitionName}", url.PathEscape(policyDefinitionName)) if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, runtime.MarshalAsJSON(req, parameters) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *RoleDefinitionsClient) createOrUpdateCreateRequest(ctx context.Context, scope string, roleDefinitionID string, roleDefinition RoleDefinition, options *RoleDefinitionsCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleDefinitions/{roleDefinitionId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif roleDefinitionID == \"\" {\n\t\treturn nil, errors.New(\"parameter roleDefinitionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{roleDefinitionId}\", url.PathEscape(roleDefinitionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, roleDefinition)\n}", "func (client *ApplyUpdatesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, providerName string, resourceType string, resourceName string, options *ApplyUpdatesClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/applyUpdates/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif providerName == \"\" {\n\t\treturn nil, errors.New(\"parameter providerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{providerName}\", url.PathEscape(providerName))\n\tif resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(resourceType))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RegistrationDefinitionsClient) createOrUpdateCreateRequest(ctx context.Context, registrationDefinitionID string, scope string, requestBody RegistrationDefinition, options *RegistrationDefinitionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions/{registrationDefinitionId}\"\n\tif registrationDefinitionID == \"\" {\n\t\treturn nil, errors.New(\"parameter registrationDefinitionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registrationDefinitionId}\", url.PathEscape(registrationDefinitionID))\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, requestBody)\n}", "func (client *WorkspacesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, parameters Workspace, options *WorkspacesBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *ServersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serverName string, parameters Server, options *ServersClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *DefenderSettingsClient) createOrUpdateCreateRequest(ctx context.Context, defenderSettingsModel DefenderSettingsModel, options *DefenderSettingsClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.IoTSecurity/defenderSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, defenderSettingsModel)\n}", "func (client *FactoriesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, factory Factory, options *FactoriesClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header[\"If-Match\"] = []string{*options.IfMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, factory)\n}", "func (client *AssociationsClient) createOrUpdateCreateRequest(ctx context.Context, scope string, associationName string, association Association, options *AssociationsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.CustomProviders/associations/{associationName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif associationName == \"\" {\n\t\treturn nil, errors.New(\"parameter associationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{associationName}\", url.PathEscape(associationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, association)\n}", "func (client *CloudServicesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, parameters CloudService, options *CloudServicesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif cloudServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter cloudServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cloudServiceName}\", url.PathEscape(cloudServiceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *IotSecuritySolutionClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, solutionName string, iotSecuritySolutionData IoTSecuritySolutionModel, options *IotSecuritySolutionClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif solutionName == \"\" {\n\t\treturn nil, errors.New(\"parameter solutionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{solutionName}\", url.PathEscape(solutionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, iotSecuritySolutionData)\n}", "func (client *WebAppsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, name string, siteEnvelope Site, options *WebAppsBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, siteEnvelope)\n}", "func (client *APIClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, apiID string, parameters APICreateOrUpdateParameter, options *APIClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif apiID == \"\" {\n\t\treturn nil, errors.New(\"parameter apiID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{apiId}\", url.PathEscape(apiID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header[\"If-Match\"] = []string{*options.IfMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *VirtualApplianceSitesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, siteName string, parameters VirtualApplianceSite, options *VirtualApplianceSitesBeginCreateOrUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites/{siteName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{networkVirtualApplianceName}\", url.PathEscape(networkVirtualApplianceName))\n\turlPath = strings.ReplaceAll(urlPath, \"{siteName}\", url.PathEscape(siteName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(parameters)\n}", "func (client *UserMetricsKeysClient) createOrUpdateCreateRequest(ctx context.Context, options *UserMetricsKeysClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/trafficManagerUserMetricsKeys/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, resource LocalRulestackResource, options *LocalRulestacksClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif localRulestackName == \"\" {\n\t\treturn nil, errors.New(\"parameter localRulestackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{localRulestackName}\", url.PathEscape(localRulestackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, resource)\n}", "func (client *ClustersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, parameters Cluster, options *ClustersBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/clusters/{clusterName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *SQLVirtualMachinesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineName string, parameters SQLVirtualMachine, options *SQLVirtualMachinesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/{sqlVirtualMachineName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineName}\", url.PathEscape(sqlVirtualMachineName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *AvailabilitySetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySet, options *AvailabilitySetsCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif availabilitySetName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilitySetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilitySetName}\", url.PathEscape(availabilitySetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *IncidentsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, incidentID string, incident Incident, options *IncidentsClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif incidentID == \"\" {\n\t\treturn nil, errors.New(\"parameter incidentID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{incidentId}\", url.PathEscape(incidentID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-05-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, incident)\n}", "func (client *DevicesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, deviceName string, parameters Device, options *DevicesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridNetwork/devices/{deviceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif deviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter deviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{deviceName}\", url.PathEscape(deviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *ServerVulnerabilityAssessmentClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, resourceNamespace string, resourceType string, resourceName string, options *ServerVulnerabilityAssessmentClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Security/serverVulnerabilityAssessments/{serverVulnerabilityAssessment}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif resourceNamespace == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceNamespace cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceNamespace}\", url.PathEscape(resourceNamespace))\n\tif resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(resourceType))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\turlPath = strings.ReplaceAll(urlPath, \"{serverVulnerabilityAssessment}\", url.PathEscape(\"default\"))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-01-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ConnectedEnvironmentsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, connectedEnvironmentName string, environmentEnvelope ConnectedEnvironment, options *ConnectedEnvironmentsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.App/connectedEnvironments/{connectedEnvironmentName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif connectedEnvironmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter connectedEnvironmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{connectedEnvironmentName}\", url.PathEscape(connectedEnvironmentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, environmentEnvelope)\n}", "func (client *InteractionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, hubName string, interactionName string, parameters InteractionResourceFormat, options *InteractionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/interactions/{interactionName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif interactionName == \"\" {\n\t\treturn nil, errors.New(\"parameter interactionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{interactionName}\", url.PathEscape(interactionName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *RouteTablesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, routeTableName string, parameters RouteTable, options *RouteTablesBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif routeTableName == \"\" {\n\t\treturn nil, errors.New(\"parameter routeTableName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{routeTableName}\", url.PathEscape(routeTableName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *PortalConfigClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, portalConfigID string, ifMatch string, parameters PortalConfigContract, options *PortalConfigClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalconfigs/{portalConfigId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif portalConfigID == \"\" {\n\t\treturn nil, errors.New(\"parameter portalConfigID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{portalConfigId}\", url.PathEscape(portalConfigID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"If-Match\"] = []string{ifMatch}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *JobExecutionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serverName string, jobAgentName string, jobName string, jobExecutionID string, options *JobExecutionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/jobAgents/{jobAgentName}/jobs/{jobName}/executions/{jobExecutionId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif jobAgentName == \"\" {\n\t\treturn nil, errors.New(\"parameter jobAgentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{jobAgentName}\", url.PathEscape(jobAgentName))\n\tif jobName == \"\" {\n\t\treturn nil, errors.New(\"parameter jobName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{jobName}\", url.PathEscape(jobName))\n\turlPath = strings.ReplaceAll(urlPath, \"{jobExecutionId}\", url.PathEscape(jobExecutionID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *IPAllocationsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, ipAllocationName string, parameters IPAllocation, options *IPAllocationsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ipAllocationName == \"\" {\n\t\treturn nil, errors.New(\"parameter ipAllocationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ipAllocationName}\", url.PathEscape(ipAllocationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *NotebookWorkspacesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, accountName string, notebookWorkspaceName NotebookWorkspaceName, notebookCreateUpdateParameters NotebookWorkspaceCreateUpdateParameters, options *NotebookWorkspacesBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces/{notebookWorkspaceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif notebookWorkspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter notebookWorkspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{notebookWorkspaceName}\", url.PathEscape(string(notebookWorkspaceName)))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, notebookCreateUpdateParameters)\n}", "func (client *KpiClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, hubName string, kpiName string, parameters KpiResourceFormat, options *KpiClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/kpi/{kpiName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif kpiName == \"\" {\n\t\treturn nil, errors.New(\"parameter kpiName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{kpiName}\", url.PathEscape(kpiName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *ManagedInstancesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, parameters ManagedInstance, options *ManagedInstancesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *DatasetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, datasetName string, dataset DatasetResource, options *DatasetsClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/datasets/{datasetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\tif datasetName == \"\" {\n\t\treturn nil, errors.New(\"parameter datasetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{datasetName}\", url.PathEscape(datasetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header[\"If-Match\"] = []string{*options.IfMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, dataset)\n}", "func (client *VirtualMachineImageTemplatesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, imageTemplateName string, parameters ImageTemplate, options *VirtualMachineImageTemplatesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imageTemplates/{imageTemplateName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif imageTemplateName == \"\" {\n\t\treturn nil, errors.New(\"parameter imageTemplateName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{imageTemplateName}\", url.PathEscape(imageTemplateName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *PipelinesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, pipelineName string, pipeline PipelineResource, options *PipelinesClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelines/{pipelineName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\tif pipelineName == \"\" {\n\t\treturn nil, errors.New(\"parameter pipelineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{pipelineName}\", url.PathEscape(pipelineName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header[\"If-Match\"] = []string{*options.IfMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, pipeline)\n}", "func (client *ManagedClustersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedCluster, options *ManagedClustersClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *TagRulesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, ruleSetName string, resource TagRule, options *TagRulesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/tagRules/{ruleSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\tif ruleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleSetName}\", url.PathEscape(ruleSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-27\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, resource); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *GroupClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, groupID string, parameters GroupCreateParameters, options *GroupCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif groupID == \"\" {\n\t\treturn nil, errors.New(\"parameter groupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *ManagementAssociationsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, managementAssociationName string, parameters ManagementAssociation, options *ManagementAssociationsCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.OperationsManagement/ManagementAssociations/{managementAssociationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.providerName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.providerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{providerName}\", url.PathEscape(client.providerName))\n\tif client.resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(client.resourceType))\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif managementAssociationName == \"\" {\n\t\treturn nil, errors.New(\"parameter managementAssociationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementAssociationName}\", url.PathEscape(managementAssociationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2015-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *SparkJobDefinitionClient) createOrUpdateSparkJobDefinitionCreateRequest(ctx context.Context, sparkJobDefinitionName string, sparkJobDefinition SparkJobDefinitionResource, options *SparkJobDefinitionBeginCreateOrUpdateSparkJobDefinitionOptions) (*azcore.Request, error) {\n\turlPath := \"/sparkJobDefinitions/{sparkJobDefinitionName}\"\n\tif sparkJobDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter sparkJobDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sparkJobDefinitionName}\", url.PathEscape(sparkJobDefinitionName))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(sparkJobDefinition)\n}", "func (client *AgentsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, storageMoverName string, agentName string, agent Agent, options *AgentsClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageMover/storageMovers/{storageMoverName}/agents/{agentName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif storageMoverName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageMoverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storageMoverName}\", url.PathEscape(storageMoverName))\n\tif agentName == \"\" {\n\t\treturn nil, errors.New(\"parameter agentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{agentName}\", url.PathEscape(agentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, agent)\n}", "func (client *ActionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, ruleID string, actionID string, action ActionRequest, options *ActionsClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/alertRules/{ruleId}/actions/{actionId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif ruleID == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleId}\", url.PathEscape(ruleID))\n\tif actionID == \"\" {\n\t\treturn nil, errors.New(\"parameter actionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{actionId}\", url.PathEscape(actionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, action)\n}", "func (client *ScriptExecutionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, privateCloudName string, scriptExecutionName string, scriptExecution ScriptExecution, options *ScriptExecutionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/scriptExecutions/{scriptExecutionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateCloudName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateCloudName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateCloudName}\", url.PathEscape(privateCloudName))\n\tif scriptExecutionName == \"\" {\n\t\treturn nil, errors.New(\"parameter scriptExecutionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{scriptExecutionName}\", url.PathEscape(scriptExecutionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, scriptExecution); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *WebAppsClient) createOrUpdateConfigurationCreateRequest(ctx context.Context, resourceGroupName string, name string, siteConfig SiteConfigResource, options *WebAppsCreateOrUpdateConfigurationOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/web\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, siteConfig)\n}", "func (client *FirewallRulesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serverName string, firewallRuleName string, parameters FirewallRule, options *FirewallRulesBeginCreateOrUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/firewallRules/{firewallRuleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif firewallRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter firewallRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{firewallRuleName}\", url.PathEscape(firewallRuleName))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2017-12-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(parameters)\n}", "func (client *SchemaRegistryClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, schemaGroupName string, parameters SchemaGroup, options *SchemaRegistryClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups/{schemaGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif schemaGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter schemaGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{schemaGroupName}\", url.PathEscape(schemaGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-10-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *AlertProcessingRulesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, alertProcessingRuleName string, alertProcessingRule AlertProcessingRule, options *AlertProcessingRulesClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AlertsManagement/actionRules/{alertProcessingRuleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif alertProcessingRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter alertProcessingRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{alertProcessingRuleName}\", url.PathEscape(alertProcessingRuleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, alertProcessingRule)\n}", "func (client *AgentPoolsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, kubernetesClusterName string, agentPoolName string, agentPoolParameters AgentPool, options *AgentPoolsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/kubernetesClusters/{kubernetesClusterName}/agentPools/{agentPoolName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif kubernetesClusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter kubernetesClusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{kubernetesClusterName}\", url.PathEscape(kubernetesClusterName))\n\tif agentPoolName == \"\" {\n\t\treturn nil, errors.New(\"parameter agentPoolName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{agentPoolName}\", url.PathEscape(agentPoolName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, agentPoolParameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *SourceControlConfigurationsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, clusterRp string, clusterResourceName string, clusterName string, sourceControlConfigurationName string, sourceControlConfiguration SourceControlConfiguration, options *SourceControlConfigurationsClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterRp == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterRp cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterRp}\", url.PathEscape(clusterRp))\n\tif clusterResourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterResourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterResourceName}\", url.PathEscape(clusterResourceName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\tif sourceControlConfigurationName == \"\" {\n\t\treturn nil, errors.New(\"parameter sourceControlConfigurationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sourceControlConfigurationName}\", url.PathEscape(sourceControlConfigurationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, sourceControlConfiguration)\n}", "func (client *ServersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroup string, fluidRelayServerName string, resource Server, options *ServersClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{fluidRelayServerName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroup == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroup cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroup}\", url.PathEscape(resourceGroup))\n\tif fluidRelayServerName == \"\" {\n\t\treturn nil, errors.New(\"parameter fluidRelayServerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{fluidRelayServerName}\", url.PathEscape(fluidRelayServerName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, resource)\n}", "func (client *SubscriptionClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, sid string, parameters SubscriptionCreateParameters, options *SubscriptionClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/subscriptions/{sid}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif sid == \"\" {\n\t\treturn nil, errors.New(\"parameter sid cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sid}\", url.PathEscape(sid))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Notify != nil {\n\t\treqQP.Set(\"notify\", strconv.FormatBool(*options.Notify))\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\tif options != nil && options.AppType != nil {\n\t\treqQP.Set(\"appType\", string(*options.AppType))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header[\"If-Match\"] = []string{*options.IfMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *VirtualNetworkTapsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, tapName string, parameters VirtualNetworkTap, options *VirtualNetworkTapsBeginCreateOrUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{tapName}\", url.PathEscape(tapName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(parameters)\n}", "func (client *ProductPolicyClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, productID string, policyID PolicyIDName, parameters PolicyContract, options *ProductPolicyClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/policies/{policyId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif productID == \"\" {\n\t\treturn nil, errors.New(\"parameter productID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{productId}\", url.PathEscape(productID))\n\tif policyID == \"\" {\n\t\treturn nil, errors.New(\"parameter policyID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyId}\", url.PathEscape(string(policyID)))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header[\"If-Match\"] = []string{*options.IfMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *DedicatedHostsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHost, options *DedicatedHostsBeginCreateOrUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{hostName}\", url.PathEscape(hostName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(parameters)\n}", "func (client *VideosClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, accountName string, videoName string, parameters VideoEntity, options *VideosClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos/{videoName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif videoName == \"\" {\n\t\treturn nil, errors.New(\"parameter videoName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{videoName}\", url.PathEscape(videoName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *VendorSKUPreviewClient) createOrUpdateCreateRequest(ctx context.Context, vendorName string, skuName string, previewSubscription string, parameters PreviewSubscription, options *VendorSKUPreviewClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/vendors/{vendorName}/vendorSkus/{skuName}/previewSubscriptions/{previewSubscription}\"\n\tif vendorName == \"\" {\n\t\treturn nil, errors.New(\"parameter vendorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vendorName}\", url.PathEscape(vendorName))\n\tif skuName == \"\" {\n\t\treturn nil, errors.New(\"parameter skuName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{skuName}\", url.PathEscape(skuName))\n\tif previewSubscription == \"\" {\n\t\treturn nil, errors.New(\"parameter previewSubscription cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{previewSubscription}\", url.PathEscape(previewSubscription))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *CapacityReservationsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservation, options *CapacityReservationsBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif capacityReservationGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationGroupName}\", url.PathEscape(capacityReservationGroupName))\n\tif capacityReservationName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationName}\", url.PathEscape(capacityReservationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *DedicatedHostsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHost, options *DedicatedHostsBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hostGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter hostGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\tif hostName == \"\" {\n\t\treturn nil, errors.New(\"parameter hostName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hostName}\", url.PathEscape(hostName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *HCRPAssignmentsClient) createOrUpdateCreateRequest(ctx context.Context, guestConfigurationAssignmentName string, resourceGroupName string, machineName string, parameters Assignment, options *HCRPAssignmentsClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridCompute/machines/{machineName}/providers/Microsoft.GuestConfiguration/guestConfigurationAssignments/{guestConfigurationAssignmentName}\"\n\tif guestConfigurationAssignmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter guestConfigurationAssignmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{guestConfigurationAssignmentName}\", url.PathEscape(guestConfigurationAssignmentName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif machineName == \"\" {\n\t\treturn nil, errors.New(\"parameter machineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{machineName}\", url.PathEscape(machineName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-25\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *DicomServicesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, dicomServiceName string, dicomservice DicomService, options *DicomServicesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/workspaces/{workspaceName}/dicomservices/{dicomServiceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif dicomServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter dicomServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dicomServiceName}\", url.PathEscape(dicomServiceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, dicomservice)\n}", "func (client *WebAppsClient) createOrUpdateHostSecretCreateRequest(ctx context.Context, resourceGroupName string, name string, keyType string, keyName string, key KeyInfo, options *WebAppsCreateOrUpdateHostSecretOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/host/default/{keyType}/{keyName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif keyType == \"\" {\n\t\treturn nil, errors.New(\"parameter keyType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{keyType}\", url.PathEscape(keyType))\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{keyName}\", url.PathEscape(keyName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, key)\n}", "func (client *MetricAlertsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, ruleName string, parameters MetricAlertResource, options *MetricAlertsClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ruleName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleName}\", url.PathEscape(ruleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *StorageTargetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, cacheName string, storageTargetName string, storagetarget StorageTarget, options *StorageTargetsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif cacheName == \"\" {\n\t\treturn nil, errors.New(\"parameter cacheName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cacheName}\", url.PathEscape(cacheName))\n\tif storageTargetName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageTargetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storageTargetName}\", url.PathEscape(storageTargetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, storagetarget)\n}", "func (client *ManagedDatabasesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, parameters ManagedDatabase, options *ManagedDatabasesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *NotebookClient) createOrUpdateNotebookCreateRequest(ctx context.Context, notebookName string, notebook NotebookResource, options *NotebookClientBeginCreateOrUpdateNotebookOptions) (*policy.Request, error) {\n\turlPath := \"/notebooks/{notebookName}\"\n\tif notebookName == \"\" {\n\t\treturn nil, errors.New(\"parameter notebookName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{notebookName}\", url.PathEscape(notebookName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.endpoint, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header[\"If-Match\"] = []string{*options.IfMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, notebook); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *VirtualMachineScaleSetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSet, options *VirtualMachineScaleSetsBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *CertificateOrdersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, certificateOrderName string, certificateDistinguishedName CertificateOrder, options *CertificateOrdersClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif certificateOrderName == \"\" {\n\t\treturn nil, errors.New(\"parameter certificateOrderName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{certificateOrderName}\", url.PathEscape(certificateOrderName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, certificateDistinguishedName)\n}", "func (client *SyncGroupsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, syncGroupName string, parameters SyncGroup, options *SyncGroupsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif syncGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter syncGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{syncGroupName}\", url.PathEscape(syncGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *DiskEncryptionSetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSet, options *DiskEncryptionSetsBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskEncryptionSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskEncryptionSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskEncryptionSetName}\", url.PathEscape(diskEncryptionSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, diskEncryptionSet)\n}", "func (client *ContainerGroupsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, containerGroupName string, containerGroup ContainerGroup, options *ContainerGroupsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif containerGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerGroupName}\", url.PathEscape(containerGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, containerGroup)\n}", "func (client *ApplicationTypeVersionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string, parameters ApplicationTypeVersionResource, options *ApplicationTypeVersionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applicationTypes/{applicationTypeName}/versions/{version}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\tif applicationTypeName == \"\" {\n\t\treturn nil, errors.New(\"parameter applicationTypeName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{applicationTypeName}\", url.PathEscape(applicationTypeName))\n\tif version == \"\" {\n\t\treturn nil, errors.New(\"parameter version cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{version}\", url.PathEscape(version))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *IscsiTargetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, diskPoolName string, iscsiTargetName string, iscsiTargetCreatePayload IscsiTargetCreate, options *IscsiTargetsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}/iscsiTargets/{iscsiTargetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskPoolName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskPoolName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskPoolName}\", url.PathEscape(diskPoolName))\n\tif iscsiTargetName == \"\" {\n\t\treturn nil, errors.New(\"parameter iscsiTargetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{iscsiTargetName}\", url.PathEscape(iscsiTargetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, iscsiTargetCreatePayload)\n}", "func (client *PublicIPAddressesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress, options *PublicIPAddressesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif publicIPAddressName == \"\" {\n\t\treturn nil, errors.New(\"parameter publicIPAddressName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{publicIpAddressName}\", url.PathEscape(publicIPAddressName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func CreateOrUpdate(ctx context.Context, client *k8s.Client, req k8s.Resource, options ...k8s.Option) error {\n\tif err := client.Create(ctx, req, options...); err == nil {\n\t\treturn nil\n\t} else if !IsK8sAlreadyExists(err) {\n\t\treturn maskAny(err)\n\t}\n\t// Exists, update it\n\tif err := client.Update(ctx, req, options...); err != nil {\n\t\treturn maskAny(err)\n\t}\n\treturn nil\n}", "func (client *ConfigurationProfilesVersionsClient) createOrUpdateCreateRequest(ctx context.Context, configurationProfileName string, versionName string, resourceGroupName string, parameters ConfigurationProfile, options *ConfigurationProfilesVersionsClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automanage/configurationProfiles/{configurationProfileName}/versions/{versionName}\"\n\tif configurationProfileName == \"\" {\n\t\treturn nil, errors.New(\"parameter configurationProfileName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{configurationProfileName}\", url.PathEscape(configurationProfileName))\n\tif versionName == \"\" {\n\t\treturn nil, errors.New(\"parameter versionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{versionName}\", url.PathEscape(versionName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-05-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *PeeringPoliciesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkPeeringPolicyName string, managedNetworkPolicy PeeringPolicy, options *PeeringPoliciesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies/{managedNetworkPeeringPolicyName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedNetworkName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedNetworkName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedNetworkName}\", url.PathEscape(managedNetworkName))\n\tif managedNetworkPeeringPolicyName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedNetworkPeeringPolicyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedNetworkPeeringPolicyName}\", url.PathEscape(managedNetworkPeeringPolicyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, managedNetworkPolicy)\n}", "func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {\n\n\t// check if the name key has to be generated\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn controllerutil.OperationResultNone, err\n\t}\n\tkey := client.ObjectKey{Namespace: accessor.GetNamespace(), Name: accessor.GetName()}\n\n\tif accessor.GetName() == \"\" && accessor.GetGenerateName() != \"\" {\n\t\tif err := Mutate(f, key, obj); err != nil {\n\t\t\treturn controllerutil.OperationResultNone, err\n\t\t}\n\t\tif err := c.Create(ctx, obj); err != nil {\n\t\t\treturn controllerutil.OperationResultNone, err\n\t\t}\n\t\treturn controllerutil.OperationResultCreated, nil\n\t}\n\n\treturn controllerutil.CreateOrUpdate(ctx, c, obj, f)\n}", "func (ro *ResourceOperations) CreateOrUpdate(resourceGroupName string, identity *ResourceIdentity) (*ResourceCreateOrUpdateResult, *AzureOperationResponse, error) {\n\treturn nil, nil, nil\n}", "func (client *VirtualRoutersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, virtualRouterName string, parameters VirtualRouter, options *VirtualRoutersClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif virtualRouterName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualRouterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualRouterName}\", url.PathEscape(virtualRouterName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func CreateOrUpdate(ctx context.Context, client resource.Interface, obj runtime.Object, mutate MutateFn) (OperationResult, error) {\n\treturn maybeCreateOrUpdate(ctx, client, obj, mutate, opCreate)\n}", "func (client *ApplyUpdatesClient) createOrUpdateParentCreateRequest(ctx context.Context, resourceGroupName string, providerName string, resourceParentType string, resourceParentName string, resourceType string, resourceName string, options *ApplyUpdatesClientCreateOrUpdateParentOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceParentType}/{resourceParentName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/applyUpdates/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif providerName == \"\" {\n\t\treturn nil, errors.New(\"parameter providerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{providerName}\", url.PathEscape(providerName))\n\tif resourceParentType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceParentType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceParentType}\", url.PathEscape(resourceParentType))\n\tif resourceParentName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceParentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceParentName}\", url.PathEscape(resourceParentName))\n\tif resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(resourceType))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *FirewallRulesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, cacheName string, ruleName string, parameters FirewallRule, options *FirewallRulesClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{cacheName}/firewallRules/{ruleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif cacheName == \"\" {\n\t\treturn nil, errors.New(\"parameter cacheName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cacheName}\", url.PathEscape(cacheName))\n\tif ruleName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleName}\", url.PathEscape(ruleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *RecordSetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSet, options *RecordSetsCreateOrUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif recordType == \"\" {\n\t\treturn nil, errors.New(\"parameter recordType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{recordType}\", url.PathEscape(string(recordType)))\n\tif relativeRecordSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter relativeRecordSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{relativeRecordSetName}\", relativeRecordSetName)\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\tif options != nil && options.IfNoneMatch != nil {\n\t\treq.Header.Set(\"If-None-Match\", *options.IfNoneMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(parameters)\n}", "func (client *DatasetClient) createOrUpdateDatasetCreateRequest(ctx context.Context, datasetName string, dataset DatasetResource, options *DatasetBeginCreateOrUpdateDatasetOptions) (*azcore.Request, error) {\n\turlPath := \"/datasets/{datasetName}\"\n\tif datasetName == \"\" {\n\t\treturn nil, errors.New(\"parameter datasetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{datasetName}\", url.PathEscape(datasetName))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(dataset)\n}", "func (client *ManagedDatabaseSecurityAlertPoliciesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, securityAlertPolicyName SecurityAlertPolicyName, parameters ManagedDatabaseSecurityAlertPolicy, options *ManagedDatabaseSecurityAlertPoliciesClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/securityAlertPolicies/{securityAlertPolicyName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif securityAlertPolicyName == \"\" {\n\t\treturn nil, errors.New(\"parameter securityAlertPolicyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{securityAlertPolicyName}\", url.PathEscape(string(securityAlertPolicyName)))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *AvailabilityGroupListenersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineGroupName string, availabilityGroupListenerName string, parameters AvailabilityGroupListener, options *AvailabilityGroupListenersClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners/{availabilityGroupListenerName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineGroupName}\", url.PathEscape(sqlVirtualMachineGroupName))\n\tif availabilityGroupListenerName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilityGroupListenerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilityGroupListenerName}\", url.PathEscape(availabilityGroupListenerName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *CustomDomainsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, appName string, domainName string, domainResource CustomDomainResource, options *CustomDomainsBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif appName == \"\" {\n\t\treturn nil, errors.New(\"parameter appName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{appName}\", url.PathEscape(appName))\n\tif domainName == \"\" {\n\t\treturn nil, errors.New(\"parameter domainName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{domainName}\", url.PathEscape(domainName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, domainResource)\n}", "func (client *PermissionBindingsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, permissionBindingName string, permissionBindingInfo PermissionBinding, options *PermissionBindingsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/namespaces/{namespaceName}/permissionBindings/{permissionBindingName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif permissionBindingName == \"\" {\n\t\treturn nil, errors.New(\"parameter permissionBindingName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{permissionBindingName}\", url.PathEscape(permissionBindingName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, permissionBindingInfo)\n}", "func (client *MachineExtensionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, name string, extensionName string, extensionParameters MachineExtension, options *MachineExtensionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{name}/extensions/{extensionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif extensionName == \"\" {\n\t\treturn nil, errors.New(\"parameter extensionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{extensionName}\", url.PathEscape(extensionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, extensionParameters)\n}", "func (client *GalleryImagesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage, options *GalleryImagesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif galleryName == \"\" {\n\t\treturn nil, errors.New(\"parameter galleryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryName}\", url.PathEscape(galleryName))\n\tif galleryImageName == \"\" {\n\t\treturn nil, errors.New(\"parameter galleryImageName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryImageName}\", url.PathEscape(galleryImageName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, galleryImage)\n}", "func (client *VirtualNetworkLinksClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, parameters VirtualNetworkLink, options *VirtualNetworkLinksBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks/{virtualNetworkLinkName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif virtualNetworkLinkName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualNetworkLinkName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualNetworkLinkName}\", url.PathEscape(virtualNetworkLinkName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\tif options != nil && options.IfNoneMatch != nil {\n\t\treq.Raw().Header.Set(\"If-None-Match\", *options.IfNoneMatch)\n\t}\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *VendorNetworkFunctionsClient) createOrUpdateCreateRequest(ctx context.Context, locationName string, vendorName string, serviceKey string, parameters VendorNetworkFunction, options *VendorNetworkFunctionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/locations/{locationName}/vendors/{vendorName}/networkFunctions/{serviceKey}\"\n\tif locationName == \"\" {\n\t\treturn nil, errors.New(\"parameter locationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{locationName}\", url.PathEscape(locationName))\n\tif vendorName == \"\" {\n\t\treturn nil, errors.New(\"parameter vendorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vendorName}\", url.PathEscape(vendorName))\n\tif serviceKey == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceKey cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceKey}\", url.PathEscape(serviceKey))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *NotificationRecipientEmailClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, notificationName NotificationName, email string, options *NotificationRecipientEmailClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/notifications/{notificationName}/recipientEmails/{email}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif notificationName == \"\" {\n\t\treturn nil, errors.New(\"parameter notificationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{notificationName}\", url.PathEscape(string(notificationName)))\n\tif email == \"\" {\n\t\treturn nil, errors.New(\"parameter email cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{email}\", url.PathEscape(email))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CassandraClustersClient) createUpdateCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, body ClusterResource, options *CassandraClustersClientBeginCreateUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, body)\n}", "func (client *BuildServiceClient) createOrUpdateBuildCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, buildName string, buildParam Build, options *BuildServiceClientCreateOrUpdateBuildOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builds/{buildName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\tif buildName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildName}\", url.PathEscape(buildName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, buildParam)\n}", "func (client *VirtualMachineScaleSetVMRunCommandsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommand, options *VirtualMachineScaleSetVMRunCommandsBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif instanceID == \"\" {\n\t\treturn nil, errors.New(\"parameter instanceID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{instanceId}\", url.PathEscape(instanceID))\n\tif runCommandName == \"\" {\n\t\treturn nil, errors.New(\"parameter runCommandName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{runCommandName}\", url.PathEscape(runCommandName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json, text/json\")\n\treturn req, runtime.MarshalAsJSON(req, runCommand)\n}", "func (client *GalleryImageVersionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion, options *GalleryImageVersionsBeginCreateOrUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryName}\", url.PathEscape(galleryName))\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryImageName}\", url.PathEscape(galleryImageName))\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryImageVersionName}\", url.PathEscape(galleryImageVersionName))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-09-30\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(galleryImageVersion)\n}", "func CreateValuateTemplateRequest() (request *ValuateTemplateRequest) {\n\trequest = &ValuateTemplateRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"BPStudio\", \"2021-09-31\", \"ValuateTemplate\", \"bpstudio\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *PacketCoreDataPlanesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, packetCoreControlPlaneName string, packetCoreDataPlaneName string, parameters PacketCoreDataPlane, options *PacketCoreDataPlanesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/packetCoreControlPlanes/{packetCoreControlPlaneName}/packetCoreDataPlanes/{packetCoreDataPlaneName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif packetCoreControlPlaneName == \"\" {\n\t\treturn nil, errors.New(\"parameter packetCoreControlPlaneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{packetCoreControlPlaneName}\", url.PathEscape(packetCoreControlPlaneName))\n\tif packetCoreDataPlaneName == \"\" {\n\t\treturn nil, errors.New(\"parameter packetCoreDataPlaneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{packetCoreDataPlaneName}\", url.PathEscape(packetCoreDataPlaneName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *PrivateDNSZoneGroupsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string, parameters PrivateDNSZoneGroup, options *PrivateDNSZoneGroupsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}/privateDnsZoneGroups/{privateDnsZoneGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateEndpointName}\", url.PathEscape(privateEndpointName))\n\tif privateDNSZoneGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateDNSZoneGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateDnsZoneGroupName}\", url.PathEscape(privateDNSZoneGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (c *MockRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroupName, routeTableName string, parameters network.RouteTable) error {\n\t// Ignore resourceGroupName for simplicity.\n\tif _, ok := c.RTs[routeTableName]; ok {\n\t\treturn fmt.Errorf(\"update not supported\")\n\t}\n\tparameters.Name = &routeTableName\n\tc.RTs[routeTableName] = parameters\n\treturn nil\n}", "func (c *MockPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourceGroupName, publicIPAddressName string, parameters network.PublicIPAddress) (*network.PublicIPAddress, error) {\n\tif _, ok := c.PubIPs[publicIPAddressName]; ok {\n\t\treturn nil, fmt.Errorf(\"update not supported\")\n\t}\n\tparameters.Name = &publicIPAddressName\n\tc.PubIPs[publicIPAddressName] = parameters\n\treturn &parameters, nil\n}" ]
[ "0.716981", "0.6974975", "0.69614655", "0.6951513", "0.69285613", "0.68497676", "0.6784493", "0.67738926", "0.6750997", "0.67280304", "0.6690952", "0.6675086", "0.6671258", "0.6670252", "0.664669", "0.6608871", "0.6569229", "0.65341884", "0.6533084", "0.647038", "0.6438516", "0.6425159", "0.6408161", "0.6404987", "0.64041424", "0.6401796", "0.63853526", "0.6384109", "0.63840634", "0.63785195", "0.63676035", "0.6366991", "0.6365389", "0.6337108", "0.63213724", "0.62981117", "0.629042", "0.6267041", "0.6252352", "0.6233127", "0.62270796", "0.62270266", "0.6208398", "0.620413", "0.61944807", "0.6182333", "0.6180793", "0.617838", "0.61696935", "0.6163759", "0.61583614", "0.61542094", "0.61495376", "0.61331797", "0.61249065", "0.6124418", "0.6124305", "0.6111921", "0.6110269", "0.61093163", "0.60906965", "0.60836834", "0.6081216", "0.60785604", "0.60762787", "0.60730594", "0.60691416", "0.60396636", "0.6020484", "0.6006138", "0.5992349", "0.5971212", "0.5949746", "0.59238976", "0.5923075", "0.58981013", "0.5875367", "0.5849852", "0.5845767", "0.5842828", "0.5824946", "0.57579225", "0.5739348", "0.57110316", "0.5697961", "0.5648799", "0.5619693", "0.5611493", "0.56029093", "0.55952024", "0.5584893", "0.55734396", "0.5560341", "0.55541956", "0.55520713", "0.5549516", "0.5504143", "0.55006266", "0.54762214", "0.5465384" ]
0.7014643
1
createOrUpdateHandleResponse handles the CreateOrUpdate response.
func (client *PolicyDefinitionsClient) createOrUpdateHandleResponse(resp *http.Response) (PolicyDefinitionsCreateOrUpdateResponse, error) { result := PolicyDefinitionsCreateOrUpdateResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil { return PolicyDefinitionsCreateOrUpdateResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *ActionsClient) createOrUpdateHandleResponse(resp *http.Response) (ActionsClientCreateOrUpdateResponse, error) {\n\tresult := ActionsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ActionResponse); err != nil {\n\t\treturn ActionsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ServerVulnerabilityAssessmentClient) createOrUpdateHandleResponse(resp *http.Response) (ServerVulnerabilityAssessmentClientCreateOrUpdateResponse, error) {\n\tresult := ServerVulnerabilityAssessmentClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerVulnerabilityAssessment); err != nil {\n\t\treturn ServerVulnerabilityAssessmentClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ApplyUpdatesClient) createOrUpdateHandleResponse(resp *http.Response) (ApplyUpdatesClientCreateOrUpdateResponse, error) {\n\tresult := ApplyUpdatesClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ApplyUpdate); err != nil {\n\t\treturn ApplyUpdatesClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VideosClient) createOrUpdateHandleResponse(resp *http.Response) (VideosClientCreateOrUpdateResponse, error) {\n\tresult := VideosClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VideoEntity); err != nil {\n\t\treturn VideosClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IotSecuritySolutionClient) createOrUpdateHandleResponse(resp *http.Response) (IotSecuritySolutionClientCreateOrUpdateResponse, error) {\n\tresult := IotSecuritySolutionClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IoTSecuritySolutionModel); err != nil {\n\t\treturn IotSecuritySolutionClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RoleDefinitionsClient) createOrUpdateHandleResponse(resp *http.Response) (RoleDefinitionsCreateOrUpdateResponse, error) {\n\tresult := RoleDefinitionsCreateOrUpdateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RoleDefinition); err != nil {\n\t\treturn RoleDefinitionsCreateOrUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *PipelinesClient) createOrUpdateHandleResponse(resp *http.Response) (PipelinesClientCreateOrUpdateResponse, error) {\n\tresult := PipelinesClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PipelineResource); err != nil {\n\t\treturn PipelinesClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SchemaRegistryClient) createOrUpdateHandleResponse(resp *http.Response) (SchemaRegistryClientCreateOrUpdateResponse, error) {\n\tresult := SchemaRegistryClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SchemaGroup); err != nil {\n\t\treturn SchemaRegistryClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AvailabilitySetsClient) createOrUpdateHandleResponse(resp *http.Response) (AvailabilitySetsCreateOrUpdateResponse, error) {\n\tresult := AvailabilitySetsCreateOrUpdateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilitySet); err != nil {\n\t\treturn AvailabilitySetsCreateOrUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ServersClient) createOrUpdateHandleResponse(resp *http.Response) (ServersClientCreateOrUpdateResponse, error) {\n\tresult := ServersClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Server); err != nil {\n\t\treturn ServersClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DatasetsClient) createOrUpdateHandleResponse(resp *http.Response) (DatasetsClientCreateOrUpdateResponse, error) {\n\tresult := DatasetsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DatasetResource); err != nil {\n\t\treturn DatasetsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *HCRPAssignmentsClient) createOrUpdateHandleResponse(resp *http.Response) (HCRPAssignmentsClientCreateOrUpdateResponse, error) {\n\tresult := HCRPAssignmentsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Assignment); err != nil {\n\t\treturn HCRPAssignmentsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MetricAlertsClient) createOrUpdateHandleResponse(resp *http.Response) (MetricAlertsClientCreateOrUpdateResponse, error) {\n\tresult := MetricAlertsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MetricAlertResource); err != nil {\n\t\treturn MetricAlertsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagementAssociationsClient) createOrUpdateHandleResponse(resp *http.Response) (ManagementAssociationsCreateOrUpdateResponse, error) {\n\tresult := ManagementAssociationsCreateOrUpdateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagementAssociation); err != nil {\n\t\treturn ManagementAssociationsCreateOrUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *IncidentsClient) createOrUpdateHandleResponse(resp *http.Response) (IncidentsClientCreateOrUpdateResponse, error) {\n\tresult := IncidentsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Incident); err != nil {\n\t\treturn IncidentsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SourceControlConfigurationsClient) createOrUpdateHandleResponse(resp *http.Response) (SourceControlConfigurationsClientCreateOrUpdateResponse, error) {\n\tresult := SourceControlConfigurationsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SourceControlConfiguration); err != nil {\n\t\treturn SourceControlConfigurationsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PortalConfigClient) createOrUpdateHandleResponse(resp *http.Response) (PortalConfigClientCreateOrUpdateResponse, error) {\n\tresult := PortalConfigClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PortalConfigContract); err != nil {\n\t\treturn PortalConfigClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualApplianceSitesClient) createOrUpdateHandleResponse(resp *azcore.Response) (VirtualApplianceSiteResponse, error) {\n\tvar val *VirtualApplianceSite\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn VirtualApplianceSiteResponse{}, err\n\t}\n\treturn VirtualApplianceSiteResponse{RawResponse: resp.Response, VirtualApplianceSite: val}, nil\n}", "func (client *GroupClient) createOrUpdateHandleResponse(resp *http.Response) (GroupCreateOrUpdateResponse, error) {\n\tresult := GroupCreateOrUpdateResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GroupContract); err != nil {\n\t\treturn GroupCreateOrUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ManagedDatabaseSecurityAlertPoliciesClient) createOrUpdateHandleResponse(resp *http.Response) (ManagedDatabaseSecurityAlertPoliciesClientCreateOrUpdateResponse, error) {\n\tresult := ManagedDatabaseSecurityAlertPoliciesClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedDatabaseSecurityAlertPolicy); err != nil {\n\t\treturn ManagedDatabaseSecurityAlertPoliciesClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *UserMetricsKeysClient) createOrUpdateHandleResponse(resp *http.Response) (UserMetricsKeysClientCreateOrUpdateResponse, error) {\n\tresult := UserMetricsKeysClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.UserMetricsModel); err != nil {\n\t\treturn UserMetricsKeysClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DefenderSettingsClient) createOrUpdateHandleResponse(resp *http.Response) (DefenderSettingsClientCreateOrUpdateResponse, error) {\n\tresult := DefenderSettingsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DefenderSettingsModel); err != nil {\n\t\treturn DefenderSettingsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ConfigurationProfilesVersionsClient) createOrUpdateHandleResponse(resp *http.Response) (ConfigurationProfilesVersionsClientCreateOrUpdateResponse, error) {\n\tresult := ConfigurationProfilesVersionsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ConfigurationProfile); err != nil {\n\t\treturn ConfigurationProfilesVersionsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ProductPolicyClient) createOrUpdateHandleResponse(resp *http.Response) (ProductPolicyClientCreateOrUpdateResponse, error) {\n\tresult := ProductPolicyClientCreateOrUpdateResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyContract); err != nil {\n\t\treturn ProductPolicyClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AgentsClient) createOrUpdateHandleResponse(resp *http.Response) (AgentsClientCreateOrUpdateResponse, error) {\n\tresult := AgentsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Agent); err != nil {\n\t\treturn AgentsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) createOrUpdateConfigurationHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateConfigurationResponse, error) {\n\tresult := WebAppsCreateOrUpdateConfigurationResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SiteConfigResource); err != nil {\n\t\treturn WebAppsCreateOrUpdateConfigurationResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *FirewallRulesClient) createOrUpdateHandleResponse(resp *http.Response) (FirewallRulesClientCreateOrUpdateResponse, error) {\n\tresult := FirewallRulesClientCreateOrUpdateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.FirewallRule); err != nil {\n\t\treturn FirewallRulesClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RecordSetsClient) createOrUpdateHandleResponse(resp *azcore.Response) (RecordSetResponse, error) {\n\tvar val *RecordSet\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn RecordSetResponse{}, err\n\t}\n\treturn RecordSetResponse{RawResponse: resp.Response, RecordSet: val}, nil\n}", "func (client *FactoriesClient) createOrUpdateHandleResponse(resp *http.Response) (FactoriesClientCreateOrUpdateResponse, error) {\n\tresult := FactoriesClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Factory); err != nil {\n\t\treturn FactoriesClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ApplyUpdatesClient) createOrUpdateParentHandleResponse(resp *http.Response) (ApplyUpdatesClientCreateOrUpdateParentResponse, error) {\n\tresult := ApplyUpdatesClientCreateOrUpdateParentResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ApplyUpdate); err != nil {\n\t\treturn ApplyUpdatesClientCreateOrUpdateParentResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SubscriptionClient) createOrUpdateHandleResponse(resp *http.Response) (SubscriptionClientCreateOrUpdateResponse, error) {\n\tresult := SubscriptionClientCreateOrUpdateResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SubscriptionContract); err != nil {\n\t\treturn SubscriptionClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AlertProcessingRulesClient) createOrUpdateHandleResponse(resp *http.Response) (AlertProcessingRulesClientCreateOrUpdateResponse, error) {\n\tresult := AlertProcessingRulesClientCreateOrUpdateResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"x-ms-request-id\"); val != \"\" {\n\t\tresult.XMSRequestID = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AlertProcessingRule); err != nil {\n\t\treturn AlertProcessingRulesClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *GalleryImageVersionsClient) createOrUpdateHandleResponse(resp *azcore.Response) (GalleryImageVersionResponse, error) {\n\tvar val *GalleryImageVersion\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn GalleryImageVersionResponse{}, err\n\t}\n\treturn GalleryImageVersionResponse{RawResponse: resp.Response, GalleryImageVersion: val}, nil\n}", "func (client *WebAppsClient) createOrUpdateConfigurationSlotHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateConfigurationSlotResponse, error) {\n\tresult := WebAppsCreateOrUpdateConfigurationSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SiteConfigResource); err != nil {\n\t\treturn WebAppsCreateOrUpdateConfigurationSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client VersionsClient) CreateOrUpdateResponder(resp *http.Response) (result VersionTemplatespecs, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *NotificationRecipientEmailClient) createOrUpdateHandleResponse(resp *http.Response) (NotificationRecipientEmailClientCreateOrUpdateResponse, error) {\n\tresult := NotificationRecipientEmailClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RecipientEmailContract); err != nil {\n\t\treturn NotificationRecipientEmailClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse, error) {\n\tresult := PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil {\n\t\treturn PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *DedicatedHostsClient) createOrUpdateHandleResponse(resp *azcore.Response) (DedicatedHostResponse, error) {\n\tvar val *DedicatedHost\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DedicatedHostResponse{}, err\n\t}\n\treturn DedicatedHostResponse{RawResponse: resp.Response, DedicatedHost: val}, nil\n}", "func (client *WebAppsClient) createOrUpdateHostSecretHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateHostSecretResponse, error) {\n\tresult := WebAppsCreateOrUpdateHostSecretResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyInfo); err != nil {\n\t\treturn WebAppsCreateOrUpdateHostSecretResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WCFRelaysClient) createOrUpdateHandleResponse(resp *http.Response) (WCFRelaysClientCreateOrUpdateResponse, error) {\n\tresult := WCFRelaysClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WcfRelay); err != nil {\n\t\treturn WCFRelaysClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) createOrUpdateHostSecretSlotHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateHostSecretSlotResponse, error) {\n\tresult := WebAppsCreateOrUpdateHostSecretSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyInfo); err != nil {\n\t\treturn WebAppsCreateOrUpdateHostSecretSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) createOrReplaceHandleResponse(resp *http.Response) (OutputsClientCreateOrReplaceResponse, error) {\n\tresult := OutputsClientCreateOrReplaceResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Output); err != nil {\n\t\treturn OutputsClientCreateOrReplaceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client JobClient) CreateOrUpdateResponder(resp *http.Response) (result JobResourceDescription, err error) {\n err = autorest.Respond(\n resp,\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusCreated,http.StatusAccepted),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }", "func (client *WebAppsClient) createOrUpdateFunctionSecretHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateFunctionSecretResponse, error) {\n\tresult := WebAppsCreateOrUpdateFunctionSecretResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyInfo); err != nil {\n\t\treturn WebAppsCreateOrUpdateFunctionSecretResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client AccountClient) CreateOrUpdateResponder(resp *http.Response) (result AccountResourceDescription, err error) {\n err = autorest.Respond(\n resp,\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusCreated,http.StatusAccepted),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }", "func (client *OutputsClient) createOrReplaceHandleResponse(resp *http.Response) (OutputsCreateOrReplaceResponse, error) {\n\tresult := OutputsCreateOrReplaceResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Output); err != nil {\n\t\treturn OutputsCreateOrReplaceResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) createOrUpdateFunctionSecretSlotHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateFunctionSecretSlotResponse, error) {\n\tresult := WebAppsCreateOrUpdateFunctionSecretSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyInfo); err != nil {\n\t\treturn WebAppsCreateOrUpdateFunctionSecretSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *BuildServiceClient) createOrUpdateBuildHandleResponse(resp *http.Response) (BuildServiceClientCreateOrUpdateBuildResponse, error) {\n\tresult := BuildServiceClientCreateOrUpdateBuildResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Build); err != nil {\n\t\treturn BuildServiceClientCreateOrUpdateBuildResponse{}, err\n\t}\n\treturn result, nil\n}", "func (h Handlers[R, T]) CreateOrUpdateResource(r *http.Request) (HandlerResponse, error) {\n\tvar response HandlerResponse\n\tpayload, err := request.Resource[R](r)\n\tif err != nil {\n\t\treturn response, actions.NewError(actions.InvalidArgument, err)\n\t}\n\tmeta := payload.GetMetadata()\n\n\tif meta == nil {\n\t\treturn response, actions.NewError(actions.InvalidArgument, errors.New(\"nil metadata\"))\n\t}\n\n\tif err := checkMeta(*meta, mux.Vars(r), \"id\"); err != nil {\n\t\treturn response, actions.NewError(actions.InvalidArgument, err)\n\t}\n\n\tctx, err := matchHeaderContext(r)\n\tif err != nil {\n\t\treturn response, actions.NewErrorf(actions.InvalidArgument, err)\n\t}\n\tctx = storev2.ContextWithTxInfo(ctx, &response.TxInfo)\n\n\tif claims := jwt.GetClaimsFromContext(ctx); claims != nil {\n\t\tmeta.CreatedBy = claims.StandardClaims.Subject\n\t}\n\n\tgstore := storev2.Of[R](h.Store)\n\n\tif err := gstore.CreateOrUpdate(ctx, payload); err != nil {\n\t\tswitch err := err.(type) {\n\t\tcase *store.ErrPreconditionFailed:\n\t\t\treturn response, actions.NewError(actions.PreconditionFailed, err)\n\t\tcase *store.ErrNotValid:\n\t\t\treturn response, actions.NewError(actions.InvalidArgument, err)\n\t\tdefault:\n\t\t\treturn response, actions.NewError(actions.InternalErr, err)\n\t\t}\n\t}\n\n\treturn response, nil\n}", "func (client IotHubResourceClient) CreateOrUpdateResponder(resp *http.Response) (result IotHubDescription, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *VirtualNetworkTapsClient) createOrUpdateHandleResponse(resp *azcore.Response) (VirtualNetworkTapResponse, error) {\n\tvar val *VirtualNetworkTap\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn VirtualNetworkTapResponse{}, err\n\t}\n\treturn VirtualNetworkTapResponse{RawResponse: resp.Response, VirtualNetworkTap: val}, nil\n}", "func (client *WebAppsClient) createOrUpdateHostNameBindingHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateHostNameBindingResponse, error) {\n\tresult := WebAppsCreateOrUpdateHostNameBindingResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.HostNameBinding); err != nil {\n\t\treturn WebAppsCreateOrUpdateHostNameBindingResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client DeploymentsClient) CreateOrUpdateResponder(resp *http.Response) (result DeploymentResource, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client Client) CreateOrUpdateResponder(resp *http.Response) (result ResourceWithAccessKey, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *WebAppsClient) createOrUpdateHybridConnectionHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateHybridConnectionResponse, error) {\n\tresult := WebAppsCreateOrUpdateHybridConnectionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.HybridConnection); err != nil {\n\t\treturn WebAppsCreateOrUpdateHybridConnectionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) createOrUpdateDomainOwnershipIdentifierHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateDomainOwnershipIdentifierResponse, error) {\n\tresult := WebAppsCreateOrUpdateDomainOwnershipIdentifierResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Identifier); err != nil {\n\t\treturn WebAppsCreateOrUpdateDomainOwnershipIdentifierResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client ServicesClient) CreateOrUpdateResponder(resp *http.Response) (result ServiceResource, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *WCFRelaysClient) createOrUpdateAuthorizationRuleHandleResponse(resp *http.Response) (WCFRelaysClientCreateOrUpdateAuthorizationRuleResponse, error) {\n\tresult := WCFRelaysClientCreateOrUpdateAuthorizationRuleResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRule); err != nil {\n\t\treturn WCFRelaysClientCreateOrUpdateAuthorizationRuleResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) createOrUpdateHostNameBindingSlotHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateHostNameBindingSlotResponse, error) {\n\tresult := WebAppsCreateOrUpdateHostNameBindingSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.HostNameBinding); err != nil {\n\t\treturn WebAppsCreateOrUpdateHostNameBindingSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) createOrUpdateVnetConnectionHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateVnetConnectionResponse, error) {\n\tresult := WebAppsCreateOrUpdateVnetConnectionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VnetInfoResource); err != nil {\n\t\treturn WebAppsCreateOrUpdateVnetConnectionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) createOrUpdateDomainOwnershipIdentifierSlotHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateDomainOwnershipIdentifierSlotResponse, error) {\n\tresult := WebAppsCreateOrUpdateDomainOwnershipIdentifierSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Identifier); err != nil {\n\t\treturn WebAppsCreateOrUpdateDomainOwnershipIdentifierSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client ViewsClient) CreateOrUpdateResponder(resp *http.Response) (result View, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *WebAppsClient) createOrUpdateHybridConnectionSlotHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateHybridConnectionSlotResponse, error) {\n\tresult := WebAppsCreateOrUpdateHybridConnectionSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.HybridConnection); err != nil {\n\t\treturn WebAppsCreateOrUpdateHybridConnectionSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client LabClient) CreateOrUpdateResourceResponder(resp *http.Response) (result Lab, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (h Handler) Create(res http.ResponseWriter, req *http.Request) {\n\tdefer req.Body.Close()\n\tvar body []byte\n\t_, readErr := req.Body.Read(body)\n\tif readErr != nil {\n\t\tres.WriteHeader(400)\n\t\tres.Write([]byte(\"400 Bad Request\"))\n\t\tlog.Println(readErr.Error())\n\t\treturn\n\t}\n\tvar bodyMap map[string]interface{}\n\tmarshErr := json.Unmarshal(body, bodyMap)\n\tif marshErr != nil {\n\t\tres.WriteHeader(400)\n\t\tres.Write([]byte(\"400 Bad Request\"))\n\t\tlog.Println(marshErr.Error())\n\t}\n\tvar key string\n\tif bodyMap[\"Name\"] != nil {\n\t\tkey = serviceStateKey(bodyMap)\n\t} else {\n\t\tkey = projectStateKey(bodyMap)\n\t}\n\terrChan := make(chan error)\n\th.Store.Save(key, bodyMap, func(err error) {\n\t\terrChan <- err\n\t})\n\tsaveErr := <-errChan\n\tif saveErr != nil {\n\t\tres.WriteHeader(500)\n\t\tres.Write([]byte(\"500 Internal Error\"))\n\t\treturn\n\t}\n\tres.WriteHeader(201)\n\tres.Write([]byte(\"201 Created\"))\n\th.Running.CheckIn(bodyMap[\"Project\"].(string), bodyMap[\"Branch\"].(string))\n}", "func (client OpenShiftManagedClustersClient) CreateOrUpdateResponder(resp *http.Response) (result v20180930preview.OpenShiftManagedCluster, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client RosettaNetProcessConfigurationsClient) CreateOrUpdateResponder(resp *http.Response) (result IntegrationAccountRosettaNetProcessConfiguration, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client DatabasesClient) CreateOrUpdateResponder(resp *http.Response) (result Database, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client MSIXPackagesClient) CreateOrUpdateResponder(resp *http.Response) (result MSIXPackage, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *WebAppsClient) createOrUpdateVnetConnectionSlotHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateVnetConnectionSlotResponse, error) {\n\tresult := WebAppsCreateOrUpdateVnetConnectionSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VnetInfoResource); err != nil {\n\t\treturn WebAppsCreateOrUpdateVnetConnectionSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client ViewsClient) CreateOrUpdateByScopeResponder(resp *http.Response) (result View, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *KeyVaultClient) createKeyHandleResponse(resp *http.Response) (KeyVaultClientCreateKeyResponse, error) {\n\tresult := KeyVaultClientCreateKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientCreateKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client ReferenceDataSetsClient) CreateOrUpdateResponder(resp *http.Response) (result ReferenceDataSetResource, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func CreateModifyWebLockCreateConfigResponse() (response *ModifyWebLockCreateConfigResponse) {\n\tresponse = &ModifyWebLockCreateConfigResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client *WebAppsClient) createOrUpdateVnetConnectionGatewayHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateVnetConnectionGatewayResponse, error) {\n\tresult := WebAppsCreateOrUpdateVnetConnectionGatewayResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VnetGateway); err != nil {\n\t\treturn WebAppsCreateOrUpdateVnetConnectionGatewayResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *RoleAssignmentsClient) createHandleResponse(resp *http.Response) (RoleAssignmentsCreateResponse, error) {\n\tresult := RoleAssignmentsCreateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RoleAssignment); err != nil {\n\t\treturn RoleAssignmentsCreateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) createKeyHandleResponse(resp *http.Response) (KeyVaultClientCreateKeyResponse, error) {\n\tresult := KeyVaultClientCreateKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientCreateKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client StorageTargetsClient) CreateOrUpdateResponder(resp *http.Response) (result StorageTarget, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func CreateOrUpdate(ctx context.Context, client resource.Interface, obj runtime.Object, mutate MutateFn) (OperationResult, error) {\n\treturn maybeCreateOrUpdate(ctx, client, obj, mutate, opCreate)\n}", "func (client *WebAppsClient) createOrUpdatePublicCertificateSlotHandleResponse(resp *http.Response) (WebAppsCreateOrUpdatePublicCertificateSlotResponse, error) {\n\tresult := WebAppsCreateOrUpdatePublicCertificateSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PublicCertificate); err != nil {\n\t\treturn WebAppsCreateOrUpdatePublicCertificateSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client AccountQuotaPolicyClient) CreateOrUpdateResponder(resp *http.Response) (result AccountQuotaPolicyResourceDescription, err error) {\n err = autorest.Respond(\n resp,\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusCreated,http.StatusAccepted),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }", "func (client JobClient) UpdateResponder(resp *http.Response) (result JobResourceDescription, err error) {\n err = autorest.Respond(\n resp,\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusCreated,http.StatusAccepted),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }", "func (client FirewallPolicyRuleGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result FirewallPolicyRuleGroup, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *WebAppsClient) createOrUpdateVnetConnectionGatewaySlotHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateVnetConnectionGatewaySlotResponse, error) {\n\tresult := WebAppsCreateOrUpdateVnetConnectionGatewaySlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VnetGateway); err != nil {\n\t\treturn WebAppsCreateOrUpdateVnetConnectionGatewaySlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func CreateOrUpdate(ctx context.Context, client *k8s.Client, req k8s.Resource, options ...k8s.Option) error {\n\tif err := client.Create(ctx, req, options...); err == nil {\n\t\treturn nil\n\t} else if !IsK8sAlreadyExists(err) {\n\t\treturn maskAny(err)\n\t}\n\t// Exists, update it\n\tif err := client.Update(ctx, req, options...); err != nil {\n\t\treturn maskAny(err)\n\t}\n\treturn nil\n}", "func (ro *ResourceOperations) CreateOrUpdate(resourceGroupName string, identity *ResourceIdentity) (*ResourceCreateOrUpdateResult, *AzureOperationResponse, error) {\n\treturn nil, nil, nil\n}", "func (client *DataCollectionEndpointsClient) createHandleResponse(resp *http.Response) (DataCollectionEndpointsCreateResponse, error) {\n\tresult := DataCollectionEndpointsCreateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResource); err != nil {\n\t\treturn DataCollectionEndpointsCreateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client HTTPSuccessClient) Put201Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusCreated),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func (client *DataCollectionEndpointsClient) createHandleResponse(resp *http.Response) (DataCollectionEndpointsClientCreateResponse, error) {\n\tresult := DataCollectionEndpointsClientCreateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResource); err != nil {\n\t\treturn DataCollectionEndpointsClientCreateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) createOrUpdatePublicCertificateHandleResponse(resp *http.Response) (WebAppsCreateOrUpdatePublicCertificateResponse, error) {\n\tresult := WebAppsCreateOrUpdatePublicCertificateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PublicCertificate); err != nil {\n\t\treturn WebAppsCreateOrUpdatePublicCertificateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func CreateResponse(w *gin.Context, payload interface{}) {\n\tw.JSON(200, payload)\n}", "func CreateUpdateHookConfigurationResponse() (response *UpdateHookConfigurationResponse) {\n\tresponse = &UpdateHookConfigurationResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client DataFlowClient) CreateOrUpdateDataFlowResponder(resp *http.Response) (result DataFlowResource, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (h *EcrHandler) Create(obj interface{}) error {\n\treturn h.Upsert(obj)\n}", "func (client MeshNetworkClient) CreateOrUpdateResponder(resp *http.Response) (result NetworkResourceDescription, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func CreateCreateVmAndSaveStockResponse() (response *CreateVmAndSaveStockResponse) {\n\tresponse = &CreateVmAndSaveStockResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {\n\n\t// check if the name key has to be generated\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn controllerutil.OperationResultNone, err\n\t}\n\tkey := client.ObjectKey{Namespace: accessor.GetNamespace(), Name: accessor.GetName()}\n\n\tif accessor.GetName() == \"\" && accessor.GetGenerateName() != \"\" {\n\t\tif err := Mutate(f, key, obj); err != nil {\n\t\t\treturn controllerutil.OperationResultNone, err\n\t\t}\n\t\tif err := c.Create(ctx, obj); err != nil {\n\t\t\treturn controllerutil.OperationResultNone, err\n\t\t}\n\t\treturn controllerutil.OperationResultCreated, nil\n\t}\n\n\treturn controllerutil.CreateOrUpdate(ctx, c, obj, f)\n}", "func (client *WebAppsClient) createOrUpdateRelayServiceConnectionHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateRelayServiceConnectionResponse, error) {\n\tresult := WebAppsCreateOrUpdateRelayServiceConnectionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RelayServiceConnectionEntity); err != nil {\n\t\treturn WebAppsCreateOrUpdateRelayServiceConnectionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) createDeploymentHandleResponse(resp *http.Response) (WebAppsCreateDeploymentResponse, error) {\n\tresult := WebAppsCreateDeploymentResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Deployment); err != nil {\n\t\treturn WebAppsCreateDeploymentResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client DatasetClient) CreateResponder(resp *http.Response) (result LongRunningOperationResult, err error) {\n err = autorest.Respond(\n resp,\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusAccepted),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }" ]
[ "0.77074736", "0.76244164", "0.7601424", "0.75650877", "0.7548688", "0.7540541", "0.75193167", "0.73697567", "0.73658377", "0.730312", "0.72981757", "0.7290304", "0.7289147", "0.7269985", "0.7257409", "0.72332484", "0.7225668", "0.7180523", "0.71755373", "0.7170585", "0.7164116", "0.71528697", "0.7144813", "0.71379364", "0.7057408", "0.7053598", "0.7051868", "0.7044911", "0.70046777", "0.6861829", "0.6792189", "0.6787576", "0.6775036", "0.673027", "0.66908556", "0.6678232", "0.6637656", "0.6635138", "0.6608939", "0.65854335", "0.65078104", "0.64945763", "0.6424815", "0.64120936", "0.6369942", "0.63507026", "0.63275826", "0.6324052", "0.6289827", "0.6252482", "0.617543", "0.61704195", "0.6154954", "0.612679", "0.6099428", "0.60909563", "0.6085369", "0.60744244", "0.6053201", "0.6027354", "0.60106343", "0.5997734", "0.5961734", "0.5939962", "0.58722866", "0.5866562", "0.5859427", "0.58566344", "0.58231425", "0.5772985", "0.57705754", "0.57564515", "0.5734916", "0.57283205", "0.5728004", "0.57258636", "0.5724381", "0.5707758", "0.569332", "0.563812", "0.5634672", "0.5607102", "0.5604422", "0.5598565", "0.5596671", "0.55762845", "0.5560141", "0.55279386", "0.5499332", "0.5461264", "0.5457536", "0.54386693", "0.54368436", "0.5420585", "0.53934664", "0.53895193", "0.53845793", "0.5346732", "0.5314478", "0.5297656" ]
0.7500699
7
createOrUpdateAtManagementGroupCreateRequest creates the CreateOrUpdateAtManagementGroup request.
func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, parameters PolicyDefinition, options *PolicyDefinitionsCreateOrUpdateAtManagementGroupOptions) (*policy.Request, error) { urlPath := "/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}" if policyDefinitionName == "" { return nil, errors.New("parameter policyDefinitionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{policyDefinitionName}", url.PathEscape(policyDefinitionName)) if managementGroupID == "" { return nil, errors.New("parameter managementGroupID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{managementGroupId}", url.PathEscape(managementGroupID)) req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, runtime.MarshalAsJSON(req, parameters) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *GroupClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, groupID string, parameters GroupCreateParameters, options *GroupCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif groupID == \"\" {\n\t\treturn nil, errors.New(\"parameter groupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *PolicyDefinitionsClient) getAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsGetAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, containerGroupName string, containerGroup ContainerGroup, options *ContainerGroupsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif containerGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerGroupName}\", url.PathEscape(containerGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, containerGroup)\n}", "func (c *MockResourceGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, parameters resources.Group) error {\n\tparameters.Name = &resourceGroupName\n\tc.RGs[resourceGroupName] = parameters\n\treturn nil\n}", "func (client *SyncGroupsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, syncGroupName string, parameters SyncGroup, options *SyncGroupsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif syncGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter syncGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{syncGroupName}\", url.PathEscape(syncGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (c GroupClient) CreateOrUpdate(ctx context.Context, resourceGroupName, region string) error {\n\tif _, err := c.client.CreateOrUpdate(ctx, resourceGroupName, resources.Group{\n\t\tLocation: &region,\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (client *PolicyDefinitionsClient) deleteAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsDeleteAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagementAssociationsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, managementAssociationName string, parameters ManagementAssociation, options *ManagementAssociationsCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.OperationsManagement/ManagementAssociations/{managementAssociationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.providerName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.providerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{providerName}\", url.PathEscape(client.providerName))\n\tif client.resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(client.resourceType))\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif managementAssociationName == \"\" {\n\t\treturn nil, errors.New(\"parameter managementAssociationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementAssociationName}\", url.PathEscape(managementAssociationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2015-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (c *MockApplicationSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName, asgName string, parameters network.ApplicationSecurityGroup) (*network.ApplicationSecurityGroup, error) {\n\t// Ignore resourceGroupName for simplicity.\n\tif _, ok := c.ASGs[asgName]; ok {\n\t\treturn nil, fmt.Errorf(\"update not supported\")\n\t}\n\tparameters.Name = &asgName\n\tc.ASGs[asgName] = parameters\n\treturn &parameters, nil\n}", "func (client *PrivateDNSZoneGroupsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string, parameters PrivateDNSZoneGroup, options *PrivateDNSZoneGroupsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}/privateDnsZoneGroups/{privateDnsZoneGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateEndpointName}\", url.PathEscape(privateEndpointName))\n\tif privateDNSZoneGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateDNSZoneGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateDnsZoneGroupName}\", url.PathEscape(privateDNSZoneGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *AvailabilitySetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySet, options *AvailabilitySetsCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif availabilitySetName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilitySetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilitySetName}\", url.PathEscape(availabilitySetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func NewDeploymentAtManagementGroupScope(ctx *pulumi.Context,\n\tname string, args *DeploymentAtManagementGroupScopeArgs, opts ...pulumi.ResourceOption) (*DeploymentAtManagementGroupScope, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.GroupId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'GroupId'\")\n\t}\n\tif args.Properties == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Properties'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190501:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20190510:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190510:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20190701:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190701:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20190801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20191001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20191001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20200601:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20200601:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20200801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20200801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20201001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20201001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20210101:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20210101:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20210401:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20210401:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource DeploymentAtManagementGroupScope\n\terr := ctx.RegisterResource(\"azure-native:resources/v20190501:DeploymentAtManagementGroupScope\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func CreateCreateApplicationGroupRequest() (request *CreateApplicationGroupRequest) {\n\trequest = &CreateApplicationGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"oos\", \"2019-06-01\", \"CreateApplicationGroup\", \"oos\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *MockNetworkSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName, nsgName string, parameters network.SecurityGroup) (*network.SecurityGroup, error) {\n\t// Ignore resourceGroupName for simplicity.\n\tif _, ok := c.NSGs[nsgName]; ok {\n\t\treturn nil, fmt.Errorf(\"update not supported\")\n\t}\n\tparameters.Name = &nsgName\n\tc.NSGs[nsgName] = parameters\n\treturn &parameters, nil\n}", "func (client *PolicyDefinitionsClient) listByManagementGroupCreateRequest(ctx context.Context, managementGroupID string, options *PolicyDefinitionsListByManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions\"\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreatePutMetricAlarmRequest() (request *PutMetricAlarmRequest) {\n\trequest = &PutMetricAlarmRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2018-03-08\", \"PutMetricAlarm\", \"cms\", \"openAPI\")\n\treturn\n}", "func (c *UMemClient) NewCreateUMemcacheGroupRequest() *CreateUMemcacheGroupRequest {\n\treq := &CreateUMemcacheGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func (client *ClustersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, parameters Cluster, options *ClustersBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/clusters/{clusterName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func CreateUpdateEndpointGroupRequest() (request *UpdateEndpointGroupRequest) {\n\trequest = &UpdateEndpointGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Ga\", \"2019-11-20\", \"UpdateEndpointGroup\", \"gaplus\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateModifyDesktopsPolicyGroupRequest() (request *ModifyDesktopsPolicyGroupRequest) {\n\trequest = &ModifyDesktopsPolicyGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ecd\", \"2020-09-30\", \"ModifyDesktopsPolicyGroup\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func NewPolicyDefinitionAtManagementGroup(ctx *pulumi.Context,\n\tname string, args *PolicyDefinitionAtManagementGroupArgs, opts ...pulumi.ResourceOption) (*PolicyDefinitionAtManagementGroup, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ManagementGroupId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ManagementGroupId'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190101:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20161201:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20161201:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20180301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20180301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20180501:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20180501:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20190601:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190601:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20190901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20200301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20200301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20200901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20200901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource PolicyDefinitionAtManagementGroup\n\terr := ctx.RegisterResource(\"azure-native:authorization/v20190101:PolicyDefinitionAtManagementGroup\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func CreateCreateMonitorGroupRequest() (request *CreateMonitorGroupRequest) {\n\trequest = &CreateMonitorGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2019-01-01\", \"CreateMonitorGroup\", \"cms\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *TagRulesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, ruleSetName string, resource TagRule, options *TagRulesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/tagRules/{ruleSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\tif ruleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleSetName}\", url.PathEscape(ruleSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-27\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, resource); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func CreateCreateMonitorGroupByResourceGroupIdRequest() (request *CreateMonitorGroupByResourceGroupIdRequest) {\n\trequest = &CreateMonitorGroupByResourceGroupIdRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2019-01-01\", \"CreateMonitorGroupByResourceGroupId\", \"cms\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, privateZoneName string, parameters privatedns.PrivateZone, waitForCompletion bool) error {\n\tcreateOrUpdateFuture, err := c.privateDNSClient.CreateOrUpdate(ctx, resourceGroupName, privateZoneName, parameters, \"\", \"*\")\n\n\tif err != nil {\n\t\tklog.V(5).Infof(\"Received error for %s, resourceGroup: %s, error: %s\", \"privatedns.put.request\", resourceGroupName, err)\n\t\treturn err\n\t}\n\n\tif waitForCompletion {\n\t\terr := createOrUpdateFuture.WaitForCompletionRef(ctx, c.privateDNSClient.Client)\n\t\tif err != nil {\n\t\t\tklog.V(5).Infof(\"Received error while waiting for completion for %s, resourceGroup: %s, error: %s\", \"privatedns.put.request\", resourceGroupName, err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (client *DiskEncryptionSetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSet, options *DiskEncryptionSetsBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskEncryptionSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskEncryptionSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskEncryptionSetName}\", url.PathEscape(diskEncryptionSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, diskEncryptionSet)\n}", "func (client *VirtualMachineScaleSetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSet, options *VirtualMachineScaleSetsBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *LocalRulestacksClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, resource LocalRulestackResource, options *LocalRulestacksClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif localRulestackName == \"\" {\n\t\treturn nil, errors.New(\"parameter localRulestackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{localRulestackName}\", url.PathEscape(localRulestackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, resource)\n}", "func (client *KpiClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, hubName string, kpiName string, parameters KpiResourceFormat, options *KpiClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/kpi/{kpiName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif kpiName == \"\" {\n\t\treturn nil, errors.New(\"parameter kpiName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{kpiName}\", url.PathEscape(kpiName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *SchemaRegistryClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, schemaGroupName string, parameters SchemaGroup, options *SchemaRegistryClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups/{schemaGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif schemaGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter schemaGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{schemaGroupName}\", url.PathEscape(schemaGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-10-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *RegistrationDefinitionsClient) createOrUpdateCreateRequest(ctx context.Context, registrationDefinitionID string, scope string, requestBody RegistrationDefinition, options *RegistrationDefinitionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions/{registrationDefinitionId}\"\n\tif registrationDefinitionID == \"\" {\n\t\treturn nil, errors.New(\"parameter registrationDefinitionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registrationDefinitionId}\", url.PathEscape(registrationDefinitionID))\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, requestBody)\n}", "func CreateUpdateAppInstanceGroupImageRequest() (request *UpdateAppInstanceGroupImageRequest) {\n\trequest = &UpdateAppInstanceGroupImageRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"appstream-center\", \"2021-09-01\", \"UpdateAppInstanceGroupImage\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *UDBClient) NewCreateUDBParamGroupRequest() *CreateUDBParamGroupRequest {\n\treq := &CreateUDBParamGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func (c *UDBClient) NewCreateUDBParamGroupRequest() *CreateUDBParamGroupRequest {\n\treq := &CreateUDBParamGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func (c *MockNatGatewaysClient) CreateOrUpdate(ctx context.Context, resourceGroupName, ngwName string, parameters network.NatGateway) (*network.NatGateway, error) {\n\t// Ignore resourceGroupName for simplicity.\n\tif _, ok := c.NGWs[ngwName]; ok {\n\t\treturn nil, fmt.Errorf(\"update not supported\")\n\t}\n\tparameters.Name = &ngwName\n\tc.NGWs[ngwName] = parameters\n\treturn &parameters, nil\n}", "func CreateDescribeContainerGroupMetricRequest() (request *DescribeContainerGroupMetricRequest) {\n\trequest = &DescribeContainerGroupMetricRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Eci\", \"2018-08-08\", \"DescribeContainerGroupMetric\", \"eci\", \"openAPI\")\n\treturn\n}", "func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse, error) {\n\tresult := PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil {\n\t\treturn PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func CreateDescribeScalingGroupsRequest() (request *DescribeScalingGroupsRequest) {\n\trequest = &DescribeScalingGroupsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Ess\", \"2014-08-28\", \"DescribeScalingGroups\", \"ess\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (mg *Groups) Create(group *Group) error {\n\n\tif mg.group != nil && len(group.ID) > 0 {\n\n\t\tpath := fmt.Sprintf(\"%s%s\", marathon.APIGroups, utilities.DelInitialSlash(group.ID))\n\n\t\tif _, err := mg.client.Session.BodyAsJSON(group).Post(path, mg.deploy, mg.fail); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmg.group = group\n\t\treturn nil\n\t}\n\treturn errors.New(\"group cannot be null nor empty\")\n}", "func (client *Client) CreateOrUpdateAssetGroupWithOptions(request *CreateOrUpdateAssetGroupRequest, runtime *util.RuntimeOptions) (_result *CreateOrUpdateAssetGroupResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.GroupId)) {\n\t\tquery[\"GroupId\"] = request.GroupId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.GroupName)) {\n\t\tquery[\"GroupName\"] = request.GroupName\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Uuids)) {\n\t\tquery[\"Uuids\"] = request.Uuids\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"CreateOrUpdateAssetGroup\"),\n\t\tVersion: tea.String(\"2018-12-03\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &CreateOrUpdateAssetGroupResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func (client *Client) CreateOrUpdateAssetGroup(request *CreateOrUpdateAssetGroupRequest) (_result *CreateOrUpdateAssetGroupResponse, _err error) {\n\truntime := &util.RuntimeOptions{}\n\t_result = &CreateOrUpdateAssetGroupResponse{}\n\t_body, _err := client.CreateOrUpdateAssetGroupWithOptions(request, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_result = _body\n\treturn _result, _err\n}", "func CreateModifySkillGroupExRequest() (request *ModifySkillGroupExRequest) {\n\trequest = &ModifySkillGroupExRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CloudCallCenter\", \"2017-07-05\", \"ModifySkillGroupEx\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *ClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ClustersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RecordSetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSet, options *RecordSetsCreateOrUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif recordType == \"\" {\n\t\treturn nil, errors.New(\"parameter recordType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{recordType}\", url.PathEscape(string(recordType)))\n\tif relativeRecordSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter relativeRecordSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{relativeRecordSetName}\", relativeRecordSetName)\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\tif options != nil && options.IfNoneMatch != nil {\n\t\treq.Header.Set(\"If-None-Match\", *options.IfNoneMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(parameters)\n}", "func (client *MetricAlertsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, ruleName string, parameters MetricAlertResource, options *MetricAlertsClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ruleName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleName}\", url.PathEscape(ruleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func CreateDeleteCorpGroupRequest() (request *DeleteCorpGroupRequest) {\n\trequest = &DeleteCorpGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Vcs\", \"2020-05-15\", \"DeleteCorpGroup\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *ManagedClustersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedCluster, options *ManagedClustersClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *AgentPoolsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, kubernetesClusterName string, agentPoolName string, agentPoolParameters AgentPool, options *AgentPoolsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/kubernetesClusters/{kubernetesClusterName}/agentPools/{agentPoolName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif kubernetesClusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter kubernetesClusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{kubernetesClusterName}\", url.PathEscape(kubernetesClusterName))\n\tif agentPoolName == \"\" {\n\t\treturn nil, errors.New(\"parameter agentPoolName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{agentPoolName}\", url.PathEscape(agentPoolName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, agentPoolParameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *CapacityReservationsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservation, options *CapacityReservationsBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif capacityReservationGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationGroupName}\", url.PathEscape(capacityReservationGroupName))\n\tif capacityReservationName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationName}\", url.PathEscape(capacityReservationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *IPAllocationsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, ipAllocationName string, parameters IPAllocation, options *IPAllocationsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ipAllocationName == \"\" {\n\t\treturn nil, errors.New(\"parameter ipAllocationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ipAllocationName}\", url.PathEscape(ipAllocationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (a *IAMApiService) CreateGroup(ctx context.Context, gid string, iamGroupCreate IamGroupCreate) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = http.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/acs/api/v1/groups/{gid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"gid\"+\"}\", fmt.Sprintf(\"%v\", gid), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &iamGroupCreate\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v IamError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func NewAttachGroupPolicyRequestWithoutParam() *AttachGroupPolicyRequest {\n\n return &AttachGroupPolicyRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/group/{groupName}:attachGroupPolicy\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func (gr *GroupResource) Create(owner string, name string) (g *GroupDetails, err error) {\n\townerOrCurrentUser(gr, &owner)\n\n\tpath := fmt.Sprintf(\"/groups/%s/\", owner)\n\tvalues := url.Values{}\n\tvalues.Set(\"name\", name)\n\terr = gr.client.do(\"POST\", path, nil, values, &g)\n\n\treturn\n}", "func (client *DedicatedHostsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHost, options *DedicatedHostsBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hostGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter hostGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\tif hostName == \"\" {\n\t\treturn nil, errors.New(\"parameter hostName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hostName}\", url.PathEscape(hostName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *ManagedInstancesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, parameters ManagedInstance, options *ManagedInstancesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *MonitorsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *MonitorsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateRemoveAppGroupRequest() (request *RemoveAppGroupRequest) {\n\trequest = &RemoveAppGroupRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"OpenSearch\", \"2017-12-25\", \"RemoveAppGroup\", \"/v4/openapi/app-groups/[appGroupIdentity]\", \"\", \"\")\n\trequest.Method = requests.DELETE\n\treturn\n}", "func (client *GroupClient) updateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, groupID string, ifMatch string, parameters GroupUpdateParameters, options *GroupUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif groupID == \"\" {\n\t\treturn nil, errors.New(\"parameter groupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"If-Match\", ifMatch)\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func NewIgroupCreateRequest() *IgroupCreateRequest {\n\treturn &IgroupCreateRequest{}\n}", "func createGroup() (group resources.Group, err error) {\n\tgroupsClient := resources.NewGroupsClient(config.SubscriptionID)\n\tgroupsClient.Authorizer = autorest.NewBearerAuthorizer(token)\n\n\treturn groupsClient.CreateOrUpdate(\n\t\tctx,\n\t\tresourceGroupName,\n\t\tresources.Group{\n\t\t\tLocation: to.StringPtr(resourceGroupLocation)})\n}", "func (client *DeviceSettingsClient) createOrUpdateTimeSettingsCreateRequest(ctx context.Context, deviceName string, resourceGroupName string, managerName string, parameters TimeSettings, options *DeviceSettingsClientBeginCreateOrUpdateTimeSettingsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorSimple/managers/{managerName}/devices/{deviceName}/timeSettings/default\"\n\turlPath = strings.ReplaceAll(urlPath, \"{deviceName}\", deviceName)\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", client.subscriptionID)\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", resourceGroupName)\n\turlPath = strings.ReplaceAll(urlPath, \"{managerName}\", managerName)\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (r *ProjectsGroupsService) Create(name string, group *Group) *ProjectsGroupsCreateCall {\n\tc := &ProjectsGroupsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\tc.group = group\n\treturn c\n}", "func (g *GroupsService) CreateGroup(group Group) (*Group, *Response, error) {\n\tif err := g.client.validate.Struct(group); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq, err := g.client.newRequest(IDM, \"POST\", \"authorize/identity/Group\", &group, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq.Header.Set(\"api-version\", groupAPIVersion)\n\n\tvar createdGroup Group\n\n\tresp, err := g.client.do(req, &createdGroup)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\treturn &createdGroup, resp, err\n\n}", "func (client *CustomAssessmentAutomationsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *CustomAssessmentAutomationsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Security/customAssessmentAutomations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ClientImpl) CreateControlInGroup(ctx context.Context, args CreateControlInGroupArgs) (*Control, error) {\n\tif args.Control == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.Control\"}\n\t}\n\trouteValues := make(map[string]string)\n\tif args.ProcessId == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.ProcessId\"}\n\t}\n\trouteValues[\"processId\"] = (*args.ProcessId).String()\n\tif args.WitRefName == nil || *args.WitRefName == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.WitRefName\"}\n\t}\n\trouteValues[\"witRefName\"] = *args.WitRefName\n\tif args.GroupId == nil || *args.GroupId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.GroupId\"}\n\t}\n\trouteValues[\"groupId\"] = *args.GroupId\n\n\tbody, marshalErr := json.Marshal(*args.Control)\n\tif marshalErr != nil {\n\t\treturn nil, marshalErr\n\t}\n\tlocationId, _ := uuid.Parse(\"1f59b363-a2d0-4b7e-9bc6-eb9f5f3f0e58\")\n\tresp, err := client.Client.Send(ctx, http.MethodPost, locationId, \"6.0-preview.1\", routeValues, nil, bytes.NewReader(body), \"application/json\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue Control\n\terr = client.Client.UnmarshalBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func (s *AutograderService) CreateGroup(ctx context.Context, in *pb.Group) (*pb.Group, error) {\n\tusr, err := s.getCurrentUser(ctx)\n\tif err != nil {\n\t\ts.logger.Errorf(\"CreateGroup failed: authentication error: %w\", err)\n\t\treturn nil, ErrInvalidUserInfo\n\t}\n\tif !s.isEnrolled(usr.GetID(), in.GetCourseID()) {\n\t\ts.logger.Errorf(\"CreateGroup failed: user %s not enrolled in course %d\", usr.GetLogin(), in.GetCourseID())\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"user not enrolled in given course\")\n\t}\n\tif !(in.Contains(usr) || s.isTeacher(usr.GetID(), in.GetCourseID())) {\n\t\ts.logger.Error(\"CreateGroup failed: user is not group member or teacher\")\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"only group member or teacher can create group\")\n\t}\n\tgroup, err := s.createGroup(in)\n\tif err != nil {\n\t\ts.logger.Errorf(\"CreateGroup failed: %w\", err)\n\t\treturn nil, status.Error(codes.InvalidArgument, \"failed to create group\")\n\t}\n\treturn group, nil\n}", "func (client *DedicatedHostsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHost, options *DedicatedHostsBeginCreateOrUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{hostName}\", url.PathEscape(hostName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodPut, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(parameters)\n}", "func NewCreateVMPlacementGroupOK() *CreateVMPlacementGroupOK {\n\treturn &CreateVMPlacementGroupOK{}\n}", "func (client *DeviceSettingsClient) createOrUpdateAlertSettingsCreateRequest(ctx context.Context, deviceName string, resourceGroupName string, managerName string, parameters AlertSettings, options *DeviceSettingsClientBeginCreateOrUpdateAlertSettingsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorSimple/managers/{managerName}/devices/{deviceName}/alertSettings/default\"\n\turlPath = strings.ReplaceAll(urlPath, \"{deviceName}\", deviceName)\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", client.subscriptionID)\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", resourceGroupName)\n\turlPath = strings.ReplaceAll(urlPath, \"{managerName}\", managerName)\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *ConnectedEnvironmentsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, connectedEnvironmentName string, environmentEnvelope ConnectedEnvironment, options *ConnectedEnvironmentsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.App/connectedEnvironments/{connectedEnvironmentName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif connectedEnvironmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter connectedEnvironmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{connectedEnvironmentName}\", url.PathEscape(connectedEnvironmentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, environmentEnvelope)\n}", "func (c *MockVMScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName, vmScaleSetName string, parameters compute.VirtualMachineScaleSet) (*compute.VirtualMachineScaleSet, error) {\n\t// Ignore resourceGroupName for simplicity.\n\tif _, ok := c.VMSSes[vmScaleSetName]; ok {\n\t\treturn nil, fmt.Errorf(\"update not supported\")\n\t}\n\tparameters.Name = &vmScaleSetName\n\tparameters.Identity.PrincipalID = fi.PtrTo(uuid.New().String())\n\tc.VMSSes[vmScaleSetName] = parameters\n\treturn &parameters, nil\n}", "func (client *RoleDefinitionsClient) createOrUpdateCreateRequest(ctx context.Context, scope string, roleDefinitionID string, roleDefinition RoleDefinition, options *RoleDefinitionsCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleDefinitions/{roleDefinitionId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif roleDefinitionID == \"\" {\n\t\treturn nil, errors.New(\"parameter roleDefinitionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{roleDefinitionId}\", url.PathEscape(roleDefinitionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, roleDefinition)\n}", "func (client *InteractionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, hubName string, interactionName string, parameters InteractionResourceFormat, options *InteractionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/interactions/{interactionName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif interactionName == \"\" {\n\t\treturn nil, errors.New(\"parameter interactionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{interactionName}\", url.PathEscape(interactionName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (c *tiKVGroups) Create(tiKVGroup *v1alpha1.TiKVGroup) (result *v1alpha1.TiKVGroup, err error) {\n\tresult = &v1alpha1.TiKVGroup{}\n\terr = c.client.Post().\n\t\tNamespace(c.ns).\n\t\tResource(\"tikvgroups\").\n\t\tBody(tiKVGroup).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func (client *AvailabilityGroupListenersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineGroupName string, availabilityGroupListenerName string, parameters AvailabilityGroupListener, options *AvailabilityGroupListenersClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners/{availabilityGroupListenerName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineGroupName}\", url.PathEscape(sqlVirtualMachineGroupName))\n\tif availabilityGroupListenerName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilityGroupListenerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilityGroupListenerName}\", url.PathEscape(availabilityGroupListenerName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *UserMetricsKeysClient) createOrUpdateCreateRequest(ctx context.Context, options *UserMetricsKeysClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/trafficManagerUserMetricsKeys/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateDescribeMultiContainerGroupMetricRequest() (request *DescribeMultiContainerGroupMetricRequest) {\n\trequest = &DescribeMultiContainerGroupMetricRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Eci\", \"2018-08-08\", \"DescribeMultiContainerGroupMetric\", \"eci\", \"openAPI\")\n\treturn\n}", "func (client *IotSecuritySolutionClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *IotSecuritySolutionClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-08-01\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (c *UDBClient) NewUploadUDBParamGroupRequest() *UploadUDBParamGroupRequest {\n\treq := &UploadUDBParamGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (client *DevicesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, deviceName string, parameters Device, options *DevicesClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridNetwork/devices/{deviceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif deviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter deviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{deviceName}\", url.PathEscape(deviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *ConnectedEnvironmentsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ConnectedEnvironmentsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/connectedEnvironments\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client IdentityClient) CreateGroup(ctx context.Context, request CreateGroupRequest) (response CreateGroupResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\n\tif !(request.OpcRetryToken != nil && *request.OpcRetryToken != \"\") {\n\t\trequest.OpcRetryToken = common.String(common.RetryToken())\n\t}\n\n\tociResponse, err = common.Retry(ctx, request, client.createGroup, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = CreateGroupResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = CreateGroupResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(CreateGroupResponse); ok {\n\t\tcommon.EcContext.UpdateEndOfWindow(time.Duration(240 * time.Second))\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into CreateGroupResponse\")\n\t}\n\treturn\n}", "func (_BaseContentSpace *BaseContentSpaceTransactor) CreateGroup(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BaseContentSpace.contract.Transact(opts, \"createGroup\")\n}", "func (client *DiskEncryptionSetsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DiskEncryptionSetsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ClustersListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/clusters\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (_RandomBeacon *RandomBeaconTransactor) UpdateGroupCreationParameters(opts *bind.TransactOpts, groupCreationFrequency *big.Int, groupLifetime *big.Int, dkgResultChallengePeriodLength *big.Int, dkgResultChallengeExtraGas *big.Int, dkgResultSubmissionTimeout *big.Int, dkgSubmitterPrecedencePeriodLength *big.Int) (*types.Transaction, error) {\n\treturn _RandomBeacon.contract.Transact(opts, \"updateGroupCreationParameters\", groupCreationFrequency, groupLifetime, dkgResultChallengePeriodLength, dkgResultChallengeExtraGas, dkgResultSubmissionTimeout, dkgSubmitterPrecedencePeriodLength)\n}", "func (c *UDBClient) NewUpdateUDBParamGroupRequest() *UpdateUDBParamGroupRequest {\n\treq := &UpdateUDBParamGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (client *Client) CreateMonitorGroupByResourceGroupId(request *CreateMonitorGroupByResourceGroupIdRequest) (response *CreateMonitorGroupByResourceGroupIdResponse, err error) {\n\tresponse = CreateCreateMonitorGroupByResourceGroupIdResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (sqlStore *SQLStore) CreateGroup(group *model.Group) error {\n\tgroup.ID = model.NewID()\n\tgroup.CreateAt = GetMillis()\n\n\t_, err := sqlStore.execBuilder(sqlStore.db, sq.\n\t\tInsert(`\"Group\"`).\n\t\tSetMap(map[string]interface{}{\n\t\t\t\"ID\": group.ID,\n\t\t\t\"Name\": group.Name,\n\t\t\t\"Description\": group.Description,\n\t\t\t\"Version\": group.Version,\n\t\t\t\"CreateAt\": group.CreateAt,\n\t\t\t\"DeleteAt\": 0,\n\t\t}),\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create group\")\n\t}\n\n\treturn nil\n}", "func (client *ContainerGroupsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ContainerGroupsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (s *GroupsService) Create(\n\tctx context.Context,\n\tgroupName string,\n) error {\n\traw, err := json.Marshal(struct {\n\t\tGroupName string `json:\"group_name\"`\n\t}{\n\t\tgroupName,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq, err := http.NewRequest(\n\t\thttp.MethodPost,\n\t\ts.client.url+\"2.0/groups/create\",\n\t\tbytes.NewBuffer(raw),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq = req.WithContext(ctx)\n\tres, err := s.client.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.StatusCode >= 300 || res.StatusCode <= 199 {\n\t\treturn fmt.Errorf(\n\t\t\t\"Failed to returns 2XX response: %d\", res.StatusCode)\n\t}\n\n\treturn nil\n}", "func (client *MetricAlertsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *MetricAlertsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AssociationsClient) createOrUpdateCreateRequest(ctx context.Context, scope string, associationName string, association Association, options *AssociationsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.CustomProviders/associations/{associationName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif associationName == \"\" {\n\t\treturn nil, errors.New(\"parameter associationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{associationName}\", url.PathEscape(associationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, association)\n}", "func (client *MonitoringSettingsClient) updatePutCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, monitoringSettingResource MonitoringSettingResource, options *MonitoringSettingsClientBeginUpdatePutOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, monitoringSettingResource)\n}", "func (client *DefenderSettingsClient) createOrUpdateCreateRequest(ctx context.Context, defenderSettingsModel DefenderSettingsModel, options *DefenderSettingsClientCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.IoTSecurity/defenderSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, defenderSettingsModel)\n}", "func (client IdentityClient) createGroup(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/groups\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateGroupResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func handleCreateGroup(c *Context, w http.ResponseWriter, r *http.Request) {\n\tcreateGroupRequest, err := model.NewCreateGroupRequestFromReader(r.Body)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to decode request\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tgroup := model.Group{\n\t\tName: createGroupRequest.Name,\n\t\tDescription: createGroupRequest.Description,\n\t\tVersion: createGroupRequest.Version,\n\t\tImage: createGroupRequest.Image,\n\t\tMaxRolling: createGroupRequest.MaxRolling,\n\t\tAPISecurityLock: createGroupRequest.APISecurityLock,\n\t\tMattermostEnv: createGroupRequest.MattermostEnv,\n\t}\n\n\tannotations, err := model.AnnotationsFromStringSlice(createGroupRequest.Annotations)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"invalid annotations\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = c.Store.CreateGroup(&group, annotations)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to create group\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tc.Supervisor.Do()\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\toutputJSON(c, w, group.ToDTO(annotations))\n}", "func (client *ServersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serverName string, parameters Server, options *ServersClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, parameters); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (c *EtcdGroupService) GroupCreate(ctx context.Context, groupName string) error {\n\treturn c.createGroup(ctx, groupName, 0)\n}", "func (_RandomBeacon *RandomBeaconTransactorSession) UpdateGroupCreationParameters(groupCreationFrequency *big.Int, groupLifetime *big.Int, dkgResultChallengePeriodLength *big.Int, dkgResultChallengeExtraGas *big.Int, dkgResultSubmissionTimeout *big.Int, dkgSubmitterPrecedencePeriodLength *big.Int) (*types.Transaction, error) {\n\treturn _RandomBeacon.Contract.UpdateGroupCreationParameters(&_RandomBeacon.TransactOpts, groupCreationFrequency, groupLifetime, dkgResultChallengePeriodLength, dkgResultChallengeExtraGas, dkgResultSubmissionTimeout, dkgSubmitterPrecedencePeriodLength)\n}" ]
[ "0.5988593", "0.5798292", "0.5685915", "0.558678", "0.5551827", "0.5526636", "0.54961526", "0.5459959", "0.54404885", "0.5392508", "0.5279132", "0.5272559", "0.5239537", "0.5215954", "0.5215764", "0.51848537", "0.51664686", "0.5060169", "0.505902", "0.5002327", "0.49339387", "0.49327195", "0.48570153", "0.4852222", "0.48519197", "0.48420623", "0.48413515", "0.48401713", "0.4818292", "0.4815378", "0.4798992", "0.47798774", "0.47621012", "0.47621012", "0.47567642", "0.47543117", "0.47517595", "0.47428077", "0.47325537", "0.47321534", "0.4721099", "0.47121555", "0.47100124", "0.47089314", "0.47050878", "0.4699813", "0.46988317", "0.46870938", "0.46846768", "0.46581635", "0.46429998", "0.4635851", "0.46298972", "0.46210358", "0.46175376", "0.46136954", "0.46030098", "0.45921335", "0.45894906", "0.4583414", "0.4559696", "0.45583516", "0.45466834", "0.45463932", "0.4542331", "0.45413345", "0.4534764", "0.45320785", "0.4529007", "0.452327", "0.45142534", "0.45079163", "0.4502758", "0.4496058", "0.44864586", "0.44811934", "0.44788498", "0.4475255", "0.44750527", "0.44718936", "0.44607985", "0.44366774", "0.44364947", "0.4434716", "0.443427", "0.4423846", "0.44159448", "0.4415907", "0.44154227", "0.4415147", "0.44149703", "0.44135624", "0.4410747", "0.4410395", "0.44079345", "0.44074267", "0.43964246", "0.4379501", "0.4369535", "0.43645966" ]
0.7278163
0
createOrUpdateAtManagementGroupHandleResponse handles the CreateOrUpdateAtManagementGroup response.
func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse, error) { result := PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil { return PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *GroupClient) createOrUpdateHandleResponse(resp *http.Response) (GroupCreateOrUpdateResponse, error) {\n\tresult := GroupCreateOrUpdateResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GroupContract); err != nil {\n\t\treturn GroupCreateOrUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SchemaRegistryClient) createOrUpdateHandleResponse(resp *http.Response) (SchemaRegistryClientCreateOrUpdateResponse, error) {\n\tresult := SchemaRegistryClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SchemaGroup); err != nil {\n\t\treturn SchemaRegistryClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func handleCreateGroup(c *Context, w http.ResponseWriter, r *http.Request) {\n\tcreateGroupRequest, err := model.NewCreateGroupRequestFromReader(r.Body)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to decode request\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tgroup := model.Group{\n\t\tName: createGroupRequest.Name,\n\t\tDescription: createGroupRequest.Description,\n\t\tVersion: createGroupRequest.Version,\n\t\tImage: createGroupRequest.Image,\n\t\tMaxRolling: createGroupRequest.MaxRolling,\n\t\tAPISecurityLock: createGroupRequest.APISecurityLock,\n\t\tMattermostEnv: createGroupRequest.MattermostEnv,\n\t}\n\n\tannotations, err := model.AnnotationsFromStringSlice(createGroupRequest.Annotations)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"invalid annotations\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = c.Store.CreateGroup(&group, annotations)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to create group\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tc.Supervisor.Do()\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\toutputJSON(c, w, group.ToDTO(annotations))\n}", "func (client *ManagementAssociationsClient) createOrUpdateHandleResponse(resp *http.Response) (ManagementAssociationsCreateOrUpdateResponse, error) {\n\tresult := ManagementAssociationsCreateOrUpdateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagementAssociation); err != nil {\n\t\treturn ManagementAssociationsCreateOrUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *AvailabilitySetsClient) createOrUpdateHandleResponse(resp *http.Response) (AvailabilitySetsCreateOrUpdateResponse, error) {\n\tresult := AvailabilitySetsCreateOrUpdateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilitySet); err != nil {\n\t\treturn AvailabilitySetsCreateOrUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *PolicyDefinitionsClient) getAtManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsGetAtManagementGroupResponse, error) {\n\tresult := PolicyDefinitionsGetAtManagementGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil {\n\t\treturn PolicyDefinitionsGetAtManagementGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, parameters PolicyDefinition, options *PolicyDefinitionsCreateOrUpdateAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client IotHubResourceClient) CreateEventHubConsumerGroupResponder(resp *http.Response) (result EventHubConsumerGroupInfo, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func CreateCreateMonitorGroupResponse() (response *CreateMonitorGroupResponse) {\n\tresponse = &CreateMonitorGroupResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateCreateMonitorGroupByResourceGroupIdResponse() (response *CreateMonitorGroupByResourceGroupIdResponse) {\n\tresponse = &CreateMonitorGroupByResourceGroupIdResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateModifyDesktopsPolicyGroupResponse() (response *ModifyDesktopsPolicyGroupResponse) {\n\tresponse = &ModifyDesktopsPolicyGroupResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client FirewallPolicyRuleGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result FirewallPolicyRuleGroup, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func CreateUpdateEndpointGroupResponse() (response *UpdateEndpointGroupResponse) {\n\tresponse = &UpdateEndpointGroupResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client *MetricAlertsClient) createOrUpdateHandleResponse(resp *http.Response) (MetricAlertsClientCreateOrUpdateResponse, error) {\n\tresult := MetricAlertsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MetricAlertResource); err != nil {\n\t\treturn MetricAlertsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func CreateUpdateAppInstanceGroupImageResponse() (response *UpdateAppInstanceGroupImageResponse) {\n\tresponse = &UpdateAppInstanceGroupImageResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (a *IAMApiService) CreateGroup(ctx context.Context, gid string, iamGroupCreate IamGroupCreate) (*http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = http.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/acs/api/v1/groups/{gid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"gid\"+\"}\", fmt.Sprintf(\"%v\", gid), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &iamGroupCreate\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v IamError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarHttpResponse, newErr\n\t}\n\n\treturn localVarHttpResponse, nil\n}", "func (c GroupClient) CreateOrUpdate(ctx context.Context, resourceGroupName, region string) error {\n\tif _, err := c.client.CreateOrUpdate(ctx, resourceGroupName, resources.Group{\n\t\tLocation: &region,\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (client *UserMetricsKeysClient) createOrUpdateHandleResponse(resp *http.Response) (UserMetricsKeysClientCreateOrUpdateResponse, error) {\n\tresult := UserMetricsKeysClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.UserMetricsModel); err != nil {\n\t\treturn UserMetricsKeysClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *HCRPAssignmentsClient) createOrUpdateHandleResponse(resp *http.Response) (HCRPAssignmentsClientCreateOrUpdateResponse, error) {\n\tresult := HCRPAssignmentsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Assignment); err != nil {\n\t\treturn HCRPAssignmentsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ServerVulnerabilityAssessmentClient) createOrUpdateHandleResponse(resp *http.Response) (ServerVulnerabilityAssessmentClientCreateOrUpdateResponse, error) {\n\tresult := ServerVulnerabilityAssessmentClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerVulnerabilityAssessment); err != nil {\n\t\treturn ServerVulnerabilityAssessmentClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func CreateCreateApplicationGroupResponse() (response *CreateApplicationGroupResponse) {\n\tresponse = &CreateApplicationGroupResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (c *MockResourceGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, parameters resources.Group) error {\n\tparameters.Name = &resourceGroupName\n\tc.RGs[resourceGroupName] = parameters\n\treturn nil\n}", "func (client WorkloadNetworksClient) CreateVMGroupResponder(resp *http.Response) (result WorkloadNetworkVMGroup, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func CreatePutMetricAlarmResponse() (response *PutMetricAlarmResponse) {\n\tresponse = &PutMetricAlarmResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client *RecordSetsClient) createOrUpdateHandleResponse(resp *azcore.Response) (RecordSetResponse, error) {\n\tvar val *RecordSet\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn RecordSetResponse{}, err\n\t}\n\treturn RecordSetResponse{RawResponse: resp.Response, RecordSet: val}, nil\n}", "func (client *RoleDefinitionsClient) createOrUpdateHandleResponse(resp *http.Response) (RoleDefinitionsCreateOrUpdateResponse, error) {\n\tresult := RoleDefinitionsCreateOrUpdateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RoleDefinition); err != nil {\n\t\treturn RoleDefinitionsCreateOrUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *IotSecuritySolutionClient) createOrUpdateHandleResponse(resp *http.Response) (IotSecuritySolutionClientCreateOrUpdateResponse, error) {\n\tresult := IotSecuritySolutionClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IoTSecuritySolutionModel); err != nil {\n\t\treturn IotSecuritySolutionClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MonitorsClient) listByResourceGroupHandleResponse(resp *http.Response) (MonitorsClientListByResourceGroupResponse, error) {\n\tresult := MonitorsClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MonitorResourceListResponse); err != nil {\n\t\treturn MonitorsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ActionsClient) createOrUpdateHandleResponse(resp *http.Response) (ActionsClientCreateOrUpdateResponse, error) {\n\tresult := ActionsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ActionResponse); err != nil {\n\t\treturn ActionsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PolicyDefinitionsClient) listByManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsListByManagementGroupResponse, error) {\n\tresult := PolicyDefinitionsListByManagementGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinitionListResult); err != nil {\n\t\treturn PolicyDefinitionsListByManagementGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *CustomAssessmentAutomationsClient) listByResourceGroupHandleResponse(resp *http.Response) (CustomAssessmentAutomationsListByResourceGroupResponse, error) {\n\tresult := CustomAssessmentAutomationsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomAssessmentAutomationsListResult); err != nil {\n\t\treturn CustomAssessmentAutomationsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (a *HyperflexApiService) UpdateHyperflexInitiatorGroupExecute(r ApiUpdateHyperflexInitiatorGroupRequest) (*HyperflexInitiatorGroup, *http.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = http.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tformFiles []formFile\n\t\tlocalVarReturnValue *HyperflexInitiatorGroup\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"HyperflexApiService.UpdateHyperflexInitiatorGroup\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/api/v1/hyperflex/InitiatorGroups/{Moid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"Moid\"+\"}\", url.PathEscape(parameterToString(r.moid, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\tif r.hyperflexInitiatorGroup == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"hyperflexInitiatorGroup is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\", \"application/json-patch+json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ifMatch != nil {\n\t\tlocalVarHeaderParams[\"If-Match\"] = parameterToString(*r.ifMatch, \"\")\n\t}\n\t// body params\n\tlocalVarPostBody = r.hyperflexInitiatorGroup\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tvar v Error\n\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\tif err != nil {\n\t\t\tnewErr.error = err.Error()\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (client *PolicyDefinitionsClient) createOrUpdateHandleResponse(resp *http.Response) (PolicyDefinitionsCreateOrUpdateResponse, error) {\n\tresult := PolicyDefinitionsCreateOrUpdateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil {\n\t\treturn PolicyDefinitionsCreateOrUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *PipelinesClient) createOrUpdateHandleResponse(resp *http.Response) (PipelinesClientCreateOrUpdateResponse, error) {\n\tresult := PipelinesClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PipelineResource); err != nil {\n\t\treturn PipelinesClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func CreateDescribeScalingGroupsResponse() (response *DescribeScalingGroupsResponse) {\n\tresponse = &DescribeScalingGroupsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client *DefenderSettingsClient) createOrUpdateHandleResponse(resp *http.Response) (DefenderSettingsClientCreateOrUpdateResponse, error) {\n\tresult := DefenderSettingsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DefenderSettingsModel); err != nil {\n\t\treturn DefenderSettingsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *GalleryImageVersionsClient) createOrUpdateHandleResponse(resp *azcore.Response) (GalleryImageVersionResponse, error) {\n\tvar val *GalleryImageVersion\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn GalleryImageVersionResponse{}, err\n\t}\n\treturn GalleryImageVersionResponse{RawResponse: resp.Response, GalleryImageVersion: val}, nil\n}", "func (client *VirtualMachineImageTemplatesClient) listByResourceGroupHandleResponse(resp *http.Response) (VirtualMachineImageTemplatesClientListByResourceGroupResponse, error) {\n\tresult := VirtualMachineImageTemplatesClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ImageTemplateListResult); err != nil {\n\t\treturn VirtualMachineImageTemplatesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client IdentityClient) createDynamicGroup(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/dynamicGroups\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateDynamicGroupResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client *ClientImpl) CreateControlInGroup(ctx context.Context, args CreateControlInGroupArgs) (*Control, error) {\n\tif args.Control == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.Control\"}\n\t}\n\trouteValues := make(map[string]string)\n\tif args.ProcessId == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.ProcessId\"}\n\t}\n\trouteValues[\"processId\"] = (*args.ProcessId).String()\n\tif args.WitRefName == nil || *args.WitRefName == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.WitRefName\"}\n\t}\n\trouteValues[\"witRefName\"] = *args.WitRefName\n\tif args.GroupId == nil || *args.GroupId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.GroupId\"}\n\t}\n\trouteValues[\"groupId\"] = *args.GroupId\n\n\tbody, marshalErr := json.Marshal(*args.Control)\n\tif marshalErr != nil {\n\t\treturn nil, marshalErr\n\t}\n\tlocationId, _ := uuid.Parse(\"1f59b363-a2d0-4b7e-9bc6-eb9f5f3f0e58\")\n\tresp, err := client.Client.Send(ctx, http.MethodPost, locationId, \"6.0-preview.1\", routeValues, nil, bytes.NewReader(body), \"application/json\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue Control\n\terr = client.Client.UnmarshalBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func (client OpenShiftManagedClustersClient) CreateOrUpdateResponder(resp *http.Response) (result v20180930preview.OpenShiftManagedCluster, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *ContainerGroupsClient) listByResourceGroupHandleResponse(resp *http.Response) (ContainerGroupsClientListByResourceGroupResponse, error) {\n\tresult := ContainerGroupsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ContainerGroupListResult); err != nil {\n\t\treturn ContainerGroupsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedInstancesClient) listByResourceGroupHandleResponse(resp *http.Response) (ManagedInstancesClientListByResourceGroupResponse, error) {\n\tresult := ManagedInstancesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceListResult); err != nil {\n\t\treturn ManagedInstancesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DatasetsClient) createOrUpdateHandleResponse(resp *http.Response) (DatasetsClientCreateOrUpdateResponse, error) {\n\tresult := DatasetsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DatasetResource); err != nil {\n\t\treturn DatasetsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DiskEncryptionSetsClient) listByResourceGroupHandleResponse(resp *http.Response) (DiskEncryptionSetsListByResourceGroupResponse, error) {\n\tresult := DiskEncryptionSetsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DiskEncryptionSetList); err != nil {\n\t\treturn DiskEncryptionSetsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ContainerGroupsClient) updateHandleResponse(resp *http.Response) (ContainerGroupsClientUpdateResponse, error) {\n\tresult := ContainerGroupsClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ContainerGroup); err != nil {\n\t\treturn ContainerGroupsClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (c *Client) CreateGroupResp(ctx context.Context, group *Group) (*http.Response, error) {\n\t// POST /Groups\n\tif group == nil {\n\t\treturn nil, fmt.Errorf(\"group is required\")\n\t}\n\treturn c.getResp(ctx, \"POST\", \"/Groups\", group, nil)\n}", "func (client *LongTermRetentionManagedInstanceBackupsClient) getByResourceGroupHandleResponse(resp *http.Response) (LongTermRetentionManagedInstanceBackupsGetByResourceGroupResponse, error) {\n\tresult := LongTermRetentionManagedInstanceBackupsGetByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceLongTermRetentionBackup); err != nil {\n\t\treturn LongTermRetentionManagedInstanceBackupsGetByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WorkspacesClient) listByResourceGroupHandleResponse(resp *http.Response) (WorkspacesListByResourceGroupResponse, error) {\n\tresult := WorkspacesListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WorkspaceListResult); err != nil {\n\t\treturn WorkspacesListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *MetricAlertsClient) listByResourceGroupHandleResponse(resp *http.Response) (MetricAlertsClientListByResourceGroupResponse, error) {\n\tresult := MetricAlertsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MetricAlertResourceCollection); err != nil {\n\t\treturn MetricAlertsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedDatabaseSecurityAlertPoliciesClient) createOrUpdateHandleResponse(resp *http.Response) (ManagedDatabaseSecurityAlertPoliciesClientCreateOrUpdateResponse, error) {\n\tresult := ManagedDatabaseSecurityAlertPoliciesClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedDatabaseSecurityAlertPolicy); err != nil {\n\t\treturn ManagedDatabaseSecurityAlertPoliciesClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) createOrUpdateDomainOwnershipIdentifierSlotHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateDomainOwnershipIdentifierSlotResponse, error) {\n\tresult := WebAppsCreateOrUpdateDomainOwnershipIdentifierSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Identifier); err != nil {\n\t\treturn WebAppsCreateOrUpdateDomainOwnershipIdentifierSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ManagedClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (ManagedClustersClientListByResourceGroupResponse, error) {\n\tresult := ManagedClustersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedClusterListResult); err != nil {\n\t\treturn ManagedClustersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) createOrUpdateHostSecretSlotHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateHostSecretSlotResponse, error) {\n\tresult := WebAppsCreateOrUpdateHostSecretSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyInfo); err != nil {\n\t\treturn WebAppsCreateOrUpdateHostSecretSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) createOrUpdateDomainOwnershipIdentifierHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateDomainOwnershipIdentifierResponse, error) {\n\tresult := WebAppsCreateOrUpdateDomainOwnershipIdentifierResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Identifier); err != nil {\n\t\treturn WebAppsCreateOrUpdateDomainOwnershipIdentifierResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func CreateModifySkillGroupExResponse() (response *ModifySkillGroupExResponse) {\n\tresponse = &ModifySkillGroupExResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client *WCFRelaysClient) createOrUpdateAuthorizationRuleHandleResponse(resp *http.Response) (WCFRelaysClientCreateOrUpdateAuthorizationRuleResponse, error) {\n\tresult := WCFRelaysClientCreateOrUpdateAuthorizationRuleResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRule); err != nil {\n\t\treturn WCFRelaysClientCreateOrUpdateAuthorizationRuleResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DedicatedHostsClient) createOrUpdateHandleResponse(resp *azcore.Response) (DedicatedHostResponse, error) {\n\tvar val *DedicatedHost\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DedicatedHostResponse{}, err\n\t}\n\treturn DedicatedHostResponse{RawResponse: resp.Response, DedicatedHost: val}, nil\n}", "func (client IdentityClient) CreateDynamicGroup(ctx context.Context, request CreateDynamicGroupRequest) (response CreateDynamicGroupResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\n\tif !(request.OpcRetryToken != nil && *request.OpcRetryToken != \"\") {\n\t\trequest.OpcRetryToken = common.String(common.RetryToken())\n\t}\n\n\tociResponse, err = common.Retry(ctx, request, client.createDynamicGroup, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = CreateDynamicGroupResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = CreateDynamicGroupResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(CreateDynamicGroupResponse); ok {\n\t\tcommon.EcContext.UpdateEndOfWindow(time.Duration(240 * time.Second))\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into CreateDynamicGroupResponse\")\n\t}\n\treturn\n}", "func (client *VideosClient) createOrUpdateHandleResponse(resp *http.Response) (VideosClientCreateOrUpdateResponse, error) {\n\tresult := VideosClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VideoEntity); err != nil {\n\t\treturn VideosClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (ClustersListByResourceGroupResponse, error) {\n\tresult := ClustersListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ClusterListResult); err != nil {\n\t\treturn ClustersListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *IPAllocationsClient) listByResourceGroupHandleResponse(resp *http.Response) (IPAllocationsClientListByResourceGroupResponse, error) {\n\tresult := IPAllocationsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IPAllocationListResult); err != nil {\n\t\treturn IPAllocationsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ApplyUpdatesClient) createOrUpdateHandleResponse(resp *http.Response) (ApplyUpdatesClientCreateOrUpdateResponse, error) {\n\tresult := ApplyUpdatesClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ApplyUpdate); err != nil {\n\t\treturn ApplyUpdatesClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SourceControlConfigurationsClient) createOrUpdateHandleResponse(resp *http.Response) (SourceControlConfigurationsClientCreateOrUpdateResponse, error) {\n\tresult := SourceControlConfigurationsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SourceControlConfiguration); err != nil {\n\t\treturn SourceControlConfigurationsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client IdentityClient) CreateGroup(ctx context.Context, request CreateGroupRequest) (response CreateGroupResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\n\tif !(request.OpcRetryToken != nil && *request.OpcRetryToken != \"\") {\n\t\trequest.OpcRetryToken = common.String(common.RetryToken())\n\t}\n\n\tociResponse, err = common.Retry(ctx, request, client.createGroup, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = CreateGroupResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = CreateGroupResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(CreateGroupResponse); ok {\n\t\tcommon.EcContext.UpdateEndOfWindow(time.Duration(240 * time.Second))\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into CreateGroupResponse\")\n\t}\n\treturn\n}", "func (client *RedisClient) listByResourceGroupHandleResponse(resp *http.Response) (RedisListByResourceGroupResponse, error) {\n\tresult := RedisListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RedisListResult); err != nil {\n\t\treturn RedisListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func NewPolicyDefinitionAtManagementGroup(ctx *pulumi.Context,\n\tname string, args *PolicyDefinitionAtManagementGroupArgs, opts ...pulumi.ResourceOption) (*PolicyDefinitionAtManagementGroup, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ManagementGroupId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ManagementGroupId'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190101:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20161201:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20161201:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20180301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20180301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20180501:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20180501:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20190601:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190601:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20190901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20200301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20200301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20200901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20200901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource PolicyDefinitionAtManagementGroup\n\terr := ctx.RegisterResource(\"azure-native:authorization/v20190101:PolicyDefinitionAtManagementGroup\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (client *GroupClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, groupID string, parameters GroupCreateParameters, options *GroupCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif groupID == \"\" {\n\t\treturn nil, errors.New(\"parameter groupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *IotSecuritySolutionClient) listByResourceGroupHandleResponse(resp *http.Response) (IotSecuritySolutionClientListByResourceGroupResponse, error) {\n\tresult := IotSecuritySolutionClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IoTSecuritySolutionsList); err != nil {\n\t\treturn IotSecuritySolutionClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ConnectedEnvironmentsClient) listByResourceGroupHandleResponse(resp *http.Response) (ConnectedEnvironmentsClientListByResourceGroupResponse, error) {\n\tresult := ConnectedEnvironmentsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ConnectedEnvironmentCollection); err != nil {\n\t\treturn ConnectedEnvironmentsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AgentsClient) createOrUpdateHandleResponse(resp *http.Response) (AgentsClientCreateOrUpdateResponse, error) {\n\tresult := AgentsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Agent); err != nil {\n\t\treturn AgentsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func CreateRemoveAppGroupResponse() (response *RemoveAppGroupResponse) {\n\tresponse = &RemoveAppGroupResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client IdentityClient) UpdateDynamicGroup(ctx context.Context, request UpdateDynamicGroupRequest) (response UpdateDynamicGroupResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.updateDynamicGroup, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = UpdateDynamicGroupResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = UpdateDynamicGroupResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(UpdateDynamicGroupResponse); ok {\n\t\tcommon.EcContext.UpdateEndOfWindow(time.Duration(240 * time.Second))\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into UpdateDynamicGroupResponse\")\n\t}\n\treturn\n}", "func (client *AlertProcessingRulesClient) createOrUpdateHandleResponse(resp *http.Response) (AlertProcessingRulesClientCreateOrUpdateResponse, error) {\n\tresult := AlertProcessingRulesClientCreateOrUpdateResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"x-ms-request-id\"); val != \"\" {\n\t\tresult.XMSRequestID = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AlertProcessingRule); err != nil {\n\t\treturn AlertProcessingRulesClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CassandraClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (CassandraClustersClientListByResourceGroupResponse, error) {\n\tresult := CassandraClustersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListClusters); err != nil {\n\t\treturn CassandraClustersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *LocalRulestacksClient) listByResourceGroupHandleResponse(resp *http.Response) (LocalRulestacksClientListByResourceGroupResponse, error) {\n\tresult := LocalRulestacksClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LocalRulestackResourceListResult); err != nil {\n\t\treturn LocalRulestacksClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) createOrUpdateHostSecretHandleResponse(resp *http.Response) (WebAppsCreateOrUpdateHostSecretResponse, error) {\n\tresult := WebAppsCreateOrUpdateHostSecretResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyInfo); err != nil {\n\t\treturn WebAppsCreateOrUpdateHostSecretResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func CreateUserGroup(w http.ResponseWriter, r *http.Request) {\n\tfLog := userMgmtLogger.WithField(\"func\", \"CreateUserGroup\").WithField(\"RequestID\", r.Context().Value(constants.RequestID)).WithField(\"path\", r.URL.Path).WithField(\"method\", r.Method)\n\n\tiauthctx := r.Context().Value(constants.HansipAuthentication)\n\tif iauthctx == nil {\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusUnauthorized, \"You are not authorized to access this resource\", nil, nil)\n\t\treturn\n\t}\n\n\tparams, err := helper.ParsePathParams(fmt.Sprintf(\"%s/management/user/{userRecId}/group/{groupRecId}\", apiPrefix), r.URL.Path)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tgroup, err := GroupRepo.GetGroupByRecID(r.Context(), params[\"groupRecId\"])\n\tif err != nil {\n\t\tfLog.Errorf(\"GroupRepo.GetGroupByRecID got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusInternalServerError, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tif group == nil {\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusNotFound, fmt.Sprintf(\"Group recid %s not found\", params[\"groupRecId\"]), nil, nil)\n\t\treturn\n\t}\n\n\tauthCtx := iauthctx.(*hansipcontext.AuthenticationContext)\n\tif !authCtx.IsAdminOfDomain(group.GroupDomain) {\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusForbidden, \"You don't have the right to access group with the specified domain\", nil, nil)\n\t\treturn\n\t}\n\n\tuser, err := UserRepo.GetUserByRecID(r.Context(), params[\"userRecId\"])\n\tif err != nil {\n\t\tfLog.Errorf(\"UserRepo.GetUserByRecID got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusInternalServerError, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tif user == nil {\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusNotFound, fmt.Sprintf(\"User recid %s not found\", params[\"userRecId\"]), nil, nil)\n\t\treturn\n\t}\n\n\t_, err = UserGroupRepo.CreateUserGroup(r.Context(), user, group)\n\tif err != nil {\n\t\tfLog.Errorf(\"UserGroupRepo.CreateUserGroup got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusBadRequest, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tRevocationRepo.Revoke(r.Context(), user.Email)\n\thelper.WriteHTTPResponse(r.Context(), w, http.StatusOK, \"User-Group created\", nil, nil)\n}", "func (client *ClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (ClustersClientListByResourceGroupResponse, error) {\n\tresult := ClustersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ClusterListResult); err != nil {\n\t\treturn ClustersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listByResourceGroupHandleResponse(resp *http.Response) (WebAppsListByResourceGroupResponse, error) {\n\tresult := WebAppsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebAppCollection); err != nil {\n\t\treturn WebAppsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func CreateModifyHostAvailabilityResponse() (response *ModifyHostAvailabilityResponse) {\n\tresponse = &ModifyHostAvailabilityResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func GroupCreateAppHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tlog.Infoln(\"CreateAppHandler invoked\", r.Method, r.URL.Path, r.RemoteAddr)\n\n\tuserid := vars[\"userid\"]\n\tgroupid := r.URL.Query().Get(\"groupid\")\n\ttaskid := r.URL.Query().Get(\"taskid\")\n\tif taskid == \"\" {\n\t\ttaskid = string(GenrandInt(32))\n\t}\n\n\tif len(groupid) < 1 {\n\t\trespondError(w, http.StatusBadRequest, \"groupid is requried\")\n\t\treturn\n\t}\n\n\tdefer r.Body.Close()\n\tb, _ := ioutil.ReadAll(r.Body)\n\n\tlog.Debugln(string(b))\n\tvar js map[string]interface{}\n\n\terr := CheckPodcfg(b, false)\n\tif err != nil {\n\t\trespondError(w, http.StatusBadRequest, \"Input Error: \"+err.Error())\n\t\treturn\n\t}\n\tb, err = jsonparser.Set(b, []byte(`\"IfNotPresent\"`), \"spec\", \"containers\", \"[0]\", \"imagePullPolicy\")\n\tif err != nil {\n\t\trespondError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\tcontainerimage, err := jsonparser.GetString(b, \"spec\", \"containers\", \"[0]\", \"image\")\n\tregistryL := strings.Split(strings.Split(containerimage, `:`)[0], `/`)\n\t//basename := registryL[len(registryL)-1] + fmt.Sprintf(\"-%d\", rand.Int(20))\n\tbasename := registryL[len(registryL)-1]\n\n\tb, err = jsonparser.Set(b, []byte(fmt.Sprintf(`\"%s\"`, basename)), \"metadata\", \"name\")\n\tif err != nil {\n\t\trespondError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\tb, err = jsonparser.Set(b, []byte(fmt.Sprintf(`\"%s\"`, basename)), \"spec\", \"containers\", \"[0]\", \"name\")\n\tif err != nil {\n\t\trespondError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\tif err = json.Unmarshal(b, &js); err != nil {\n\t\trespondError(w, http.StatusInternalServerError, \"Unexpect Error: \"+string(b))\n\t\treturn\n\t}\n\n\tb = jsonparser.Delete(b, \"spec\", \"nodeSelector\")\n\tcfg := jsonparser.Delete(b, \"spec\", \"imagePullSecrets\")\n\tlog.Debugln(string(cfg))\n\n\tT := TaskInfo{\n\t\tID: taskid,\n\t\tCurrent: 0,\n\t\tUserid: userid,\n\t\tGroupid: groupid,\n\t\tAppcfg: string(cfg),\n\t}\n\n\terr = DBGroupInQueue(T)\n\tif err != nil {\n\t\trespondError(w, http.StatusInternalServerError, \"Unexpect Error: \"+err.Error())\n\t}\n\trespondJSON(w, http.StatusOK,\n\t\tmap[string]interface{}{\"code\": 0, \"id\": T.ID})\n}", "func (client *ServersClient) listByResourceGroupHandleResponse(resp *http.Response) (ServersClientListByResourceGroupResponse, error) {\n\tresult := ServersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerListResult); err != nil {\n\t\treturn ServersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ApplyUpdatesClient) createOrUpdateParentHandleResponse(resp *http.Response) (ApplyUpdatesClientCreateOrUpdateParentResponse, error) {\n\tresult := ApplyUpdatesClientCreateOrUpdateParentResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ApplyUpdate); err != nil {\n\t\treturn ApplyUpdatesClientCreateOrUpdateParentResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AccountsClient) listByResourceGroupHandleResponse(resp *http.Response) (AccountsClientListByResourceGroupResponse, error) {\n\tresult := AccountsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AccountListResult); err != nil {\n\t\treturn AccountsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ServersClient) listByResourceGroupHandleResponse(resp *http.Response) (ServersClientListByResourceGroupResponse, error) {\n\tresult := ServersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerList); err != nil {\n\t\treturn ServersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SpatialAnchorsAccountsClient) listByResourceGroupHandleResponse(resp *http.Response) (SpatialAnchorsAccountsClientListByResourceGroupResponse, error) {\n\tresult := SpatialAnchorsAccountsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SpatialAnchorsAccountPage); err != nil {\n\t\treturn SpatialAnchorsAccountsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *GroupClient) updateHandleResponse(resp *http.Response) (GroupUpdateResponse, error) {\n\tresult := GroupUpdateResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GroupContract); err != nil {\n\t\treturn GroupUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachinesClient) listByResourceGroupHandleResponse(resp *http.Response) (VirtualMachinesClientListByResourceGroupResponse, error) {\n\tresult := VirtualMachinesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachinesList); err != nil {\n\t\treturn VirtualMachinesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *Client) CreateOrUpdateAssetGroupWithOptions(request *CreateOrUpdateAssetGroupRequest, runtime *util.RuntimeOptions) (_result *CreateOrUpdateAssetGroupResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.GroupId)) {\n\t\tquery[\"GroupId\"] = request.GroupId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.GroupName)) {\n\t\tquery[\"GroupName\"] = request.GroupName\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Uuids)) {\n\t\tquery[\"Uuids\"] = request.Uuids\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"CreateOrUpdateAssetGroup\"),\n\t\tVersion: tea.String(\"2018-12-03\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &CreateOrUpdateAssetGroupResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func (client *DataCollectionEndpointsClient) listByResourceGroupHandleResponse(resp *http.Response) (DataCollectionEndpointsListByResourceGroupResponse, error) {\n\tresult := DataCollectionEndpointsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResourceListResult); err != nil {\n\t\treturn DataCollectionEndpointsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ServersClient) createOrUpdateHandleResponse(resp *http.Response) (ServersClientCreateOrUpdateResponse, error) {\n\tresult := ServersClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Server); err != nil {\n\t\treturn ServersClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DataCollectionEndpointsClient) listByResourceGroupHandleResponse(resp *http.Response) (DataCollectionEndpointsClientListByResourceGroupResponse, error) {\n\tresult := DataCollectionEndpointsClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResourceListResult); err != nil {\n\t\treturn DataCollectionEndpointsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client IdentityClient) createGroup(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/groups\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateGroupResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client *CapacitiesClient) listByResourceGroupHandleResponse(resp *http.Response) (CapacitiesClientListByResourceGroupResponse, error) {\n\tresult := CapacitiesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DedicatedCapacities); err != nil {\n\t\treturn CapacitiesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (mg *Groups) Create(group *Group) error {\n\n\tif mg.group != nil && len(group.ID) > 0 {\n\n\t\tpath := fmt.Sprintf(\"%s%s\", marathon.APIGroups, utilities.DelInitialSlash(group.ID))\n\n\t\tif _, err := mg.client.Session.BodyAsJSON(group).Post(path, mg.deploy, mg.fail); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmg.group = group\n\t\treturn nil\n\t}\n\treturn errors.New(\"group cannot be null nor empty\")\n}", "func (client *SQLVirtualMachinesClient) listByResourceGroupHandleResponse(resp *http.Response) (SQLVirtualMachinesClientListByResourceGroupResponse, error) {\n\tresult := SQLVirtualMachinesClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListResult); err != nil {\n\t\treturn SQLVirtualMachinesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ConfigurationProfilesVersionsClient) createOrUpdateHandleResponse(resp *http.Response) (ConfigurationProfilesVersionsClientCreateOrUpdateResponse, error) {\n\tresult := ConfigurationProfilesVersionsClientCreateOrUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ConfigurationProfile); err != nil {\n\t\treturn ConfigurationProfilesVersionsClientCreateOrUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client WorkloadNetworksClient) UpdateVMGroupResponder(resp *http.Response) (result WorkloadNetworkVMGroup, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func CreateDescribeContainerGroupMetricResponse() (response *DescribeContainerGroupMetricResponse) {\n\tresponse = &DescribeContainerGroupMetricResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}" ]
[ "0.64995533", "0.6087047", "0.59565717", "0.577954", "0.5716804", "0.56633455", "0.5627714", "0.5607213", "0.55736977", "0.55376047", "0.5450639", "0.5443918", "0.532295", "0.52404857", "0.5232235", "0.52103275", "0.5207438", "0.5142607", "0.51312053", "0.5121249", "0.5118213", "0.5115343", "0.51078653", "0.5090818", "0.50841004", "0.50643474", "0.5059899", "0.5058732", "0.50565803", "0.5049206", "0.5016078", "0.501564", "0.5000634", "0.49947518", "0.49925056", "0.4988056", "0.4973801", "0.49694273", "0.49693176", "0.4960507", "0.495183", "0.49511233", "0.49451002", "0.49436527", "0.49384502", "0.4937776", "0.49281156", "0.492194", "0.4916605", "0.49024308", "0.48989365", "0.48889005", "0.48829654", "0.48756486", "0.48692057", "0.48423585", "0.4833977", "0.4833636", "0.48335296", "0.48248693", "0.4820857", "0.4815654", "0.481383", "0.4801977", "0.47981524", "0.47958082", "0.47881398", "0.47864708", "0.47858536", "0.47797567", "0.47783545", "0.47780067", "0.47736576", "0.47677547", "0.4764846", "0.4759437", "0.47556394", "0.47544822", "0.47538254", "0.47428885", "0.4741987", "0.47409332", "0.47322997", "0.47197413", "0.47164053", "0.47131947", "0.47047573", "0.4702847", "0.4692138", "0.46750832", "0.4670615", "0.46699104", "0.4667614", "0.4666687", "0.4664136", "0.4663989", "0.46580124", "0.46571338", "0.46555927", "0.4647787" ]
0.7587764
0
deleteCreateRequest creates the Delete request.
func (client *PolicyDefinitionsClient) deleteCreateRequest(ctx context.Context, policyDefinitionName string, options *PolicyDefinitionsDeleteOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}" if policyDefinitionName == "" { return nil, errors.New("parameter policyDefinitionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{policyDefinitionName}", url.PathEscape(policyDefinitionName)) if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *FactoriesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, options *FactoriesClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (connection *Connection) CreateDeleteRequest(fnr Fnr) (*DeleteRequest, error) {\n\treturn NewDeleteRequestAdabas(connection.adabasToData, fnr), nil\n}", "func (client *CloudServicesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif cloudServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter cloudServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cloudServiceName}\", url.PathEscape(cloudServiceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *subscriptionClient) deleteCreateRequest(ctx context.Context, topicName string, subscriptionName string, options *SubscriptionDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/{topicName}/subscriptions/{subscriptionName}\"\n\tif topicName == \"\" {\n\t\treturn nil, errors.New(\"parameter topicName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{topicName}\", url.PathEscape(topicName))\n\tif subscriptionName == \"\" {\n\t\treturn nil, errors.New(\"parameter subscriptionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionName}\", url.PathEscape(subscriptionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif client.apiVersion != nil {\n\t\treqQP.Set(\"api-version\", \"2017_04\")\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/xml, application/atom+xml\")\n\treturn req, nil\n}", "func (client *CertificateOrdersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, certificateOrderName string, options *CertificateOrdersClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif certificateOrderName == \"\" {\n\t\treturn nil, errors.New(\"parameter certificateOrderName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{certificateOrderName}\", url.PathEscape(certificateOrderName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RegistrationDefinitionsClient) deleteCreateRequest(ctx context.Context, registrationDefinitionID string, scope string, options *RegistrationDefinitionsClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions/{registrationDefinitionId}\"\n\tif registrationDefinitionID == \"\" {\n\t\treturn nil, errors.New(\"parameter registrationDefinitionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registrationDefinitionId}\", url.PathEscape(registrationDefinitionID))\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AssociationsClient) deleteCreateRequest(ctx context.Context, scope string, associationName string, options *AssociationsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.CustomProviders/associations/{associationName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif associationName == \"\" {\n\t\treturn nil, errors.New(\"parameter associationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{associationName}\", url.PathEscape(associationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DevicesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, deviceName string, options *DevicesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridNetwork/devices/{deviceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif deviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter deviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{deviceName}\", url.PathEscape(deviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ClustersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2/{clusterName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *UserMetricsKeysClient) deleteCreateRequest(ctx context.Context, options *UserMetricsKeysClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/trafficManagerUserMetricsKeys/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RoleDefinitionsClient) deleteCreateRequest(ctx context.Context, scope string, roleDefinitionID string, options *RoleDefinitionsDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleDefinitions/{roleDefinitionId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif roleDefinitionID == \"\" {\n\t\treturn nil, errors.New(\"parameter roleDefinitionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{roleDefinitionId}\", url.PathEscape(roleDefinitionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ServersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, serverName string, options *ServersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *ManagedClustersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WorkspacesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, options *WorkspacesBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CassandraClustersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *CassandraClustersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ManagedInstancesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, options *ManagedInstancesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *DicomServicesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, dicomServiceName string, workspaceName string, options *DicomServicesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/workspaces/{workspaceName}/dicomservices/{dicomServiceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dicomServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter dicomServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dicomServiceName}\", url.PathEscape(dicomServiceName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DefenderSettingsClient) deleteCreateRequest(ctx context.Context, options *DefenderSettingsClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.IoTSecurity/defenderSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SQLVirtualMachinesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineName string, options *SQLVirtualMachinesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/{sqlVirtualMachineName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineName}\", url.PathEscape(sqlVirtualMachineName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *RouteTablesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, routeTableName string, options *RouteTablesBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif routeTableName == \"\" {\n\t\treturn nil, errors.New(\"parameter routeTableName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{routeTableName}\", url.PathEscape(routeTableName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DatasetsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, datasetName string, options *DatasetsClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/datasets/{datasetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\tif datasetName == \"\" {\n\t\treturn nil, errors.New(\"parameter datasetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{datasetName}\", url.PathEscape(datasetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *IPAllocationsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, ipAllocationName string, options *IPAllocationsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ipAllocationName == \"\" {\n\t\treturn nil, errors.New(\"parameter ipAllocationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ipAllocationName}\", url.PathEscape(ipAllocationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"restype\", \"container\")\n\tif options != nil && options.Timeout != nil {\n\t\treqQP.Set(\"timeout\", strconv.FormatInt(int64(*options.Timeout), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {\n\t\treq.Raw().Header[\"x-ms-lease-id\"] = []string{*leaseAccessConditions.LeaseID}\n\t}\n\tif modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {\n\t\treq.Raw().Header[\"If-Modified-Since\"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}\n\t}\n\tif modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {\n\t\treq.Raw().Header[\"If-Unmodified-Since\"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}\n\t}\n\treq.Raw().Header[\"x-ms-version\"] = []string{\"2020-10-02\"}\n\tif options != nil && options.RequestID != nil {\n\t\treq.Raw().Header[\"x-ms-client-request-id\"] = []string{*options.RequestID}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/xml\"}\n\treturn req, nil\n}", "func (client *ManagedDatabasesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, options *ManagedDatabasesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *PipelinesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, pipelineName string, options *PipelinesClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelines/{pipelineName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\tif pipelineName == \"\" {\n\t\treturn nil, errors.New(\"parameter pipelineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{pipelineName}\", url.PathEscape(pipelineName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AccountsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, accountName string, options *AccountsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, containerGroupName string, options *ContainerGroupsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif containerGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerGroupName}\", url.PathEscape(containerGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *GroupClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, groupID string, ifMatch string, options *GroupDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif groupID == \"\" {\n\t\treturn nil, errors.New(\"parameter groupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"If-Match\", ifMatch)\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DedicatedHostsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hostGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter hostGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\tif hostName == \"\" {\n\t\treturn nil, errors.New(\"parameter hostName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hostName}\", url.PathEscape(hostName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *ClustersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/clusters/{clusterName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DedicatedHostsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsBeginDeleteOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{hostName}\", url.PathEscape(hostName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treturn req, nil\n}", "func (client *KeyVaultClient) deleteKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, options *KeyVaultClientDeleteKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *KeyVaultClient) deleteKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, options *KeyVaultClientDeleteKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *MonitorsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, options *MonitorsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *IotSecuritySolutionClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, solutionName string, options *IotSecuritySolutionClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif solutionName == \"\" {\n\t\treturn nil, errors.New(\"parameter solutionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{solutionName}\", url.PathEscape(solutionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *APIClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, apiID string, ifMatch string, options *APIClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif apiID == \"\" {\n\t\treturn nil, errors.New(\"parameter apiID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{apiId}\", url.PathEscape(apiID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.DeleteRevisions != nil {\n\t\treqQP.Set(\"deleteRevisions\", strconv.FormatBool(*options.DeleteRevisions))\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"If-Match\"] = []string{ifMatch}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ReplicationsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, replicationName string, options *ReplicationsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif replicationName == \"\" {\n\t\treturn nil, errors.New(\"parameter replicationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{replicationName}\", url.PathEscape(replicationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *FirewallRulesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, serverName string, firewallRuleName string, options *FirewallRulesBeginDeleteOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/firewallRules/{firewallRuleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif firewallRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter firewallRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{firewallRuleName}\", url.PathEscape(firewallRuleName))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2017-12-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *IntegrationRuntimeNodesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, integrationRuntimeName string, nodeName string, options *IntegrationRuntimeNodesClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/integrationRuntimes/{integrationRuntimeName}/nodes/{nodeName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\tif integrationRuntimeName == \"\" {\n\t\treturn nil, errors.New(\"parameter integrationRuntimeName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{integrationRuntimeName}\", url.PathEscape(integrationRuntimeName))\n\tif nodeName == \"\" {\n\t\treturn nil, errors.New(\"parameter nodeName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{nodeName}\", url.PathEscape(nodeName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DiskEncryptionSetsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, options *DiskEncryptionSetsBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskEncryptionSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskEncryptionSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskEncryptionSetName}\", url.PathEscape(diskEncryptionSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CapacityReservationsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *CapacityReservationsBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif capacityReservationGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationGroupName}\", url.PathEscape(capacityReservationGroupName))\n\tif capacityReservationName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationName}\", url.PathEscape(capacityReservationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SyncGroupsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, syncGroupName string, options *SyncGroupsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif syncGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter syncGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{syncGroupName}\", url.PathEscape(syncGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func CreateDeleteApDeviceRequest() (request *DeleteApDeviceRequest) {\n\trequest = &DeleteApDeviceRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"cloudesl\", \"2020-02-01\", \"DeleteApDevice\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *WebhooksClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, webhookName string, options *WebhooksClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif webhookName == \"\" {\n\t\treturn nil, errors.New(\"parameter webhookName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{webhookName}\", url.PathEscape(webhookName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *PrivateDNSZoneGroupsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string, options *PrivateDNSZoneGroupsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}/privateDnsZoneGroups/{privateDnsZoneGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateEndpointName}\", url.PathEscape(privateEndpointName))\n\tif privateDNSZoneGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateDNSZoneGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateDnsZoneGroupName}\", url.PathEscape(privateDNSZoneGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ConnectedEnvironmentsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, connectedEnvironmentName string, options *ConnectedEnvironmentsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.App/connectedEnvironments/{connectedEnvironmentName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif connectedEnvironmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter connectedEnvironmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{connectedEnvironmentName}\", url.PathEscape(connectedEnvironmentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AlertProcessingRulesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, alertProcessingRuleName string, options *AlertProcessingRulesClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AlertsManagement/actionRules/{alertProcessingRuleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif alertProcessingRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter alertProcessingRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{alertProcessingRuleName}\", url.PathEscape(alertProcessingRuleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CapacitiesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, dedicatedCapacityName string, options *CapacitiesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/capacities/{dedicatedCapacityName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dedicatedCapacityName == \"\" {\n\t\treturn nil, errors.New(\"parameter dedicatedCapacityName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dedicatedCapacityName}\", url.PathEscape(dedicatedCapacityName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-01-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DatasetClient) deleteDatasetCreateRequest(ctx context.Context, datasetName string, options *DatasetBeginDeleteDatasetOptions) (*azcore.Request, error) {\n\turlPath := \"/datasets/{datasetName}\"\n\tif datasetName == \"\" {\n\t\treturn nil, errors.New(\"parameter datasetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{datasetName}\", url.PathEscape(datasetName))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *NetworkToNetworkInterconnectsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, networkFabricName string, networkToNetworkInterconnectName string, options *NetworkToNetworkInterconnectsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/networkFabrics/{networkFabricName}/networkToNetworkInterconnects/{networkToNetworkInterconnectName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif networkFabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkFabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkFabricName}\", url.PathEscape(networkFabricName))\n\tif networkToNetworkInterconnectName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkToNetworkInterconnectName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkToNetworkInterconnectName}\", url.PathEscape(networkToNetworkInterconnectName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *IscsiTargetsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, diskPoolName string, iscsiTargetName string, options *IscsiTargetsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}/iscsiTargets/{iscsiTargetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskPoolName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskPoolName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskPoolName}\", url.PathEscape(diskPoolName))\n\tif iscsiTargetName == \"\" {\n\t\treturn nil, errors.New(\"parameter iscsiTargetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{iscsiTargetName}\", url.PathEscape(iscsiTargetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AgentsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, storageMoverName string, agentName string, options *AgentsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageMover/storageMovers/{storageMoverName}/agents/{agentName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif storageMoverName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageMoverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storageMoverName}\", url.PathEscape(storageMoverName))\n\tif agentName == \"\" {\n\t\treturn nil, errors.New(\"parameter agentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{agentName}\", url.PathEscape(agentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RoleAssignmentsClient) deleteCreateRequest(ctx context.Context, vaultBaseURL string, scope string, roleAssignmentName string, options *RoleAssignmentsDeleteOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}\"\n\tif scope == \"\" {\n\t\treturn nil, errors.New(\"parameter scope cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif roleAssignmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter roleAssignmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{roleAssignmentName}\", url.PathEscape(roleAssignmentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ServersClient) deleteCreateRequest(ctx context.Context, resourceGroup string, fluidRelayServerName string, options *ServersClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{fluidRelayServerName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroup == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroup cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroup}\", url.PathEscape(resourceGroup))\n\tif fluidRelayServerName == \"\" {\n\t\treturn nil, errors.New(\"parameter fluidRelayServerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{fluidRelayServerName}\", url.PathEscape(fluidRelayServerName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualNetworkTapsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, tapName string, options *VirtualNetworkTapsBeginDeleteOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{tapName}\", url.PathEscape(tapName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SubscriptionClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, sid string, ifMatch string, options *SubscriptionClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/subscriptions/{sid}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif sid == \"\" {\n\t\treturn nil, errors.New(\"parameter sid cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sid}\", url.PathEscape(sid))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"If-Match\"] = []string{ifMatch}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WebAppsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, name string, options *WebAppsDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.DeleteMetrics != nil {\n\t\treqQP.Set(\"deleteMetrics\", strconv.FormatBool(*options.DeleteMetrics))\n\t}\n\tif options != nil && options.DeleteEmptyServerFarm != nil {\n\t\treqQP.Set(\"deleteEmptyServerFarm\", strconv.FormatBool(*options.DeleteEmptyServerFarm))\n\t}\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *GuestAgentsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, virtualMachineName string, name string, options *GuestAgentsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{virtualMachineName}/guestAgents/{name}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif virtualMachineName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualMachineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualMachineName}\", url.PathEscape(virtualMachineName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ActionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, ruleID string, actionID string, options *ActionsClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/alertRules/{ruleId}/actions/{actionId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif ruleID == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleId}\", url.PathEscape(ruleID))\n\tif actionID == \"\" {\n\t\treturn nil, errors.New(\"parameter actionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{actionId}\", url.PathEscape(actionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AvailabilitySetsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, availabilitySetName string, options *AvailabilitySetsDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif availabilitySetName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilitySetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilitySetName}\", url.PathEscape(availabilitySetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *VirtualMachineImageTemplatesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, imageTemplateName string, options *VirtualMachineImageTemplatesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imageTemplates/{imageTemplateName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif imageTemplateName == \"\" {\n\t\treturn nil, errors.New(\"parameter imageTemplateName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{imageTemplateName}\", url.PathEscape(imageTemplateName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *TagRulesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, ruleSetName string, options *TagRulesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/tagRules/{ruleSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\tif ruleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleSetName}\", url.PathEscape(ruleSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-27\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualNetworkLinksClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, options *VirtualNetworkLinksBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks/{virtualNetworkLinkName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif virtualNetworkLinkName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualNetworkLinkName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualNetworkLinkName}\", url.PathEscape(virtualNetworkLinkName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualApplianceSitesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, siteName string, options *VirtualApplianceSitesBeginDeleteOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites/{siteName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{networkVirtualApplianceName}\", url.PathEscape(networkVirtualApplianceName))\n\turlPath = strings.ReplaceAll(urlPath, \"{siteName}\", url.PathEscape(siteName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *IncidentsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, incidentID string, options *IncidentsClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif incidentID == \"\" {\n\t\treturn nil, errors.New(\"parameter incidentID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{incidentId}\", url.PathEscape(incidentID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-05-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ReplicationsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, replicationName string, options *ReplicationsBeginDeleteOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif replicationName == \"\" {\n\t\treturn nil, errors.New(\"parameter replicationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{replicationName}\", url.PathEscape(replicationName))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *ReplicationvCentersClient) deleteCreateRequest(ctx context.Context, fabricName string, vcenterName string, options *ReplicationvCentersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationvCenters/{vcenterName}\"\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif client.resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(client.resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif fabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter fabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{fabricName}\", url.PathEscape(fabricName))\n\tif vcenterName == \"\" {\n\t\treturn nil, errors.New(\"parameter vcenterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vcenterName}\", url.PathEscape(vcenterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *NotebookWorkspacesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, accountName string, notebookWorkspaceName NotebookWorkspaceName, options *NotebookWorkspacesBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces/{notebookWorkspaceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif notebookWorkspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter notebookWorkspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{notebookWorkspaceName}\", url.PathEscape(string(notebookWorkspaceName)))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualMachinesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, virtualMachineName string, options *VirtualMachinesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{virtualMachineName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif virtualMachineName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualMachineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualMachineName}\", url.PathEscape(virtualMachineName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\tif options != nil && options.Force != nil {\n\t\treqQP.Set(\"force\", strconv.FormatBool(*options.Force))\n\t}\n\tif options != nil && options.Retain != nil {\n\t\treqQP.Set(\"retain\", strconv.FormatBool(*options.Retain))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SchemaRegistryClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, schemaGroupName string, options *SchemaRegistryClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups/{schemaGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif schemaGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter schemaGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{schemaGroupName}\", url.PathEscape(schemaGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-10-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *TaskRunsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, registryName string, taskRunName string, options *TaskRunsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns/{taskRunName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif taskRunName == \"\" {\n\t\treturn nil, errors.New(\"parameter taskRunName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{taskRunName}\", url.PathEscape(taskRunName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif localRulestackName == \"\" {\n\t\treturn nil, errors.New(\"parameter localRulestackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{localRulestackName}\", url.PathEscape(localRulestackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *TriggersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, accountName string, shareSubscriptionName string, triggerName string, options *TriggersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shareSubscriptions/{shareSubscriptionName}/triggers/{triggerName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif shareSubscriptionName == \"\" {\n\t\treturn nil, errors.New(\"parameter shareSubscriptionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{shareSubscriptionName}\", url.PathEscape(shareSubscriptionName))\n\tif triggerName == \"\" {\n\t\treturn nil, errors.New(\"parameter triggerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{triggerName}\", url.PathEscape(triggerName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SpatialAnchorsAccountsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, accountName string, options *SpatialAnchorsAccountsClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MixedReality/spatialAnchorsAccounts/{accountName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *KpiClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, hubName string, kpiName string, options *KpiClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/kpi/{kpiName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif kpiName == \"\" {\n\t\treturn nil, errors.New(\"parameter kpiName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{kpiName}\", url.PathEscape(kpiName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *AFDOriginsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, profileName string, originGroupName string, originName string, options *AFDOriginsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/originGroups/{originGroupName}/origins/{originName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif profileName == \"\" {\n\t\treturn nil, errors.New(\"parameter profileName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{profileName}\", url.PathEscape(profileName))\n\tif originGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter originGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{originGroupName}\", url.PathEscape(originGroupName))\n\tif originName == \"\" {\n\t\treturn nil, errors.New(\"parameter originName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{originName}\", url.PathEscape(originName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *PeeringPoliciesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkPeeringPolicyName string, options *PeeringPoliciesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies/{managedNetworkPeeringPolicyName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedNetworkName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedNetworkName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedNetworkName}\", url.PathEscape(managedNetworkName))\n\tif managedNetworkPeeringPolicyName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedNetworkPeeringPolicyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedNetworkPeeringPolicyName}\", url.PathEscape(managedNetworkPeeringPolicyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VendorSKUPreviewClient) deleteCreateRequest(ctx context.Context, vendorName string, skuName string, previewSubscription string, options *VendorSKUPreviewClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/vendors/{vendorName}/vendorSkus/{skuName}/previewSubscriptions/{previewSubscription}\"\n\tif vendorName == \"\" {\n\t\treturn nil, errors.New(\"parameter vendorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vendorName}\", url.PathEscape(vendorName))\n\tif skuName == \"\" {\n\t\treturn nil, errors.New(\"parameter skuName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{skuName}\", url.PathEscape(skuName))\n\tif previewSubscription == \"\" {\n\t\treturn nil, errors.New(\"parameter previewSubscription cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{previewSubscription}\", url.PathEscape(previewSubscription))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DataCollectionEndpointsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, dataCollectionEndpointName string, options *DataCollectionEndpointsClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints/{dataCollectionEndpointName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dataCollectionEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter dataCollectionEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dataCollectionEndpointName}\", url.PathEscape(dataCollectionEndpointName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CustomAssessmentAutomationsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, customAssessmentAutomationName string, options *CustomAssessmentAutomationsDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Security/customAssessmentAutomations/{customAssessmentAutomationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif customAssessmentAutomationName == \"\" {\n\t\treturn nil, errors.New(\"parameter customAssessmentAutomationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{customAssessmentAutomationName}\", url.PathEscape(customAssessmentAutomationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RecordSetsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, options *RecordSetsDeleteOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif recordType == \"\" {\n\t\treturn nil, errors.New(\"parameter recordType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{recordType}\", url.PathEscape(string(recordType)))\n\tif relativeRecordSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter relativeRecordSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{relativeRecordSetName}\", relativeRecordSetName)\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VideosClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, accountName string, videoName string, options *VideosClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos/{videoName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif videoName == \"\" {\n\t\treturn nil, errors.New(\"parameter videoName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{videoName}\", url.PathEscape(videoName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *KeyVaultClient) deleteSecretCreateRequest(ctx context.Context, vaultBaseURL string, secretName string, options *KeyVaultClientDeleteSecretOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/secrets/{secret-name}\"\n\tif secretName == \"\" {\n\t\treturn nil, errors.New(\"parameter secretName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{secret-name}\", url.PathEscape(secretName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DataCollectionEndpointsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, dataCollectionEndpointName string, options *DataCollectionEndpointsDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints/{dataCollectionEndpointName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dataCollectionEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter dataCollectionEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dataCollectionEndpointName}\", url.PathEscape(dataCollectionEndpointName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RedisClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, name string, options *RedisBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{name}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualRoutersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, virtualRouterName string, options *VirtualRoutersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif virtualRouterName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualRouterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualRouterName}\", url.PathEscape(virtualRouterName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AgentPoolsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, kubernetesClusterName string, agentPoolName string, options *AgentPoolsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/kubernetesClusters/{kubernetesClusterName}/agentPools/{agentPoolName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif kubernetesClusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter kubernetesClusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{kubernetesClusterName}\", url.PathEscape(kubernetesClusterName))\n\tif agentPoolName == \"\" {\n\t\treturn nil, errors.New(\"parameter agentPoolName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{agentPoolName}\", url.PathEscape(agentPoolName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CloudServiceRoleInstancesClient) deleteCreateRequest(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesBeginDeleteOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}\"\n\tif roleInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter roleInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{roleInstanceName}\", url.PathEscape(roleInstanceName))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif cloudServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter cloudServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cloudServiceName}\", url.PathEscape(cloudServiceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagementAssociationsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, managementAssociationName string, options *ManagementAssociationsDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.OperationsManagement/ManagementAssociations/{managementAssociationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.providerName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.providerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{providerName}\", url.PathEscape(client.providerName))\n\tif client.resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(client.resourceType))\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif managementAssociationName == \"\" {\n\t\treturn nil, errors.New(\"parameter managementAssociationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementAssociationName}\", url.PathEscape(managementAssociationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2015-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ApplicationTypeVersionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string, options *ApplicationTypeVersionsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applicationTypes/{applicationTypeName}/versions/{version}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\tif applicationTypeName == \"\" {\n\t\treturn nil, errors.New(\"parameter applicationTypeName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{applicationTypeName}\", url.PathEscape(applicationTypeName))\n\tif version == \"\" {\n\t\treturn nil, errors.New(\"parameter version cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{version}\", url.PathEscape(version))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AvailabilityGroupListenersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineGroupName string, availabilityGroupListenerName string, options *AvailabilityGroupListenersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners/{availabilityGroupListenerName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineGroupName}\", url.PathEscape(sqlVirtualMachineGroupName))\n\tif availabilityGroupListenerName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilityGroupListenerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilityGroupListenerName}\", url.PathEscape(availabilityGroupListenerName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CustomDomainsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, appName string, domainName string, options *CustomDomainsBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif appName == \"\" {\n\t\treturn nil, errors.New(\"parameter appName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{appName}\", url.PathEscape(appName))\n\tif domainName == \"\" {\n\t\treturn nil, errors.New(\"parameter domainName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{domainName}\", url.PathEscape(domainName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PacketCoreDataPlanesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, packetCoreControlPlaneName string, packetCoreDataPlaneName string, options *PacketCoreDataPlanesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/packetCoreControlPlanes/{packetCoreControlPlaneName}/packetCoreDataPlanes/{packetCoreDataPlaneName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif packetCoreControlPlaneName == \"\" {\n\t\treturn nil, errors.New(\"parameter packetCoreControlPlaneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{packetCoreControlPlaneName}\", url.PathEscape(packetCoreControlPlaneName))\n\tif packetCoreDataPlaneName == \"\" {\n\t\treturn nil, errors.New(\"parameter packetCoreDataPlaneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{packetCoreDataPlaneName}\", url.PathEscape(packetCoreDataPlaneName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualMachineScaleSetsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.ForceDeletion != nil {\n\t\treqQP.Set(\"forceDeletion\", strconv.FormatBool(*options.ForceDeletion))\n\t}\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *PublicIPAddressesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, publicIPAddressName string, options *PublicIPAddressesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif publicIPAddressName == \"\" {\n\t\treturn nil, errors.New(\"parameter publicIPAddressName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{publicIpAddressName}\", url.PathEscape(publicIPAddressName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *NotebookClient) deleteNotebookCreateRequest(ctx context.Context, notebookName string, options *NotebookClientBeginDeleteNotebookOptions) (*policy.Request, error) {\n\turlPath := \"/notebooks/{notebookName}\"\n\tif notebookName == \"\" {\n\t\treturn nil, errors.New(\"parameter notebookName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{notebookName}\", url.PathEscape(notebookName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ProductPolicyClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, productID string, policyID PolicyIDName, ifMatch string, options *ProductPolicyClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/policies/{policyId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif productID == \"\" {\n\t\treturn nil, errors.New(\"parameter productID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{productId}\", url.PathEscape(productID))\n\tif policyID == \"\" {\n\t\treturn nil, errors.New(\"parameter policyID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyId}\", url.PathEscape(string(policyID)))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"If-Match\"] = []string{ifMatch}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DataFlowDebugSessionClient) deleteDataFlowDebugSessionCreateRequest(ctx context.Context, request DeleteDataFlowDebugSessionRequest, options *DataFlowDebugSessionClientDeleteDataFlowDebugSessionOptions) (*policy.Request, error) {\n\turlPath := \"/deleteDataFlowDebugSession\"\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, request); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *SourceControlConfigurationsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, clusterRp string, clusterResourceName string, clusterName string, sourceControlConfigurationName string, options *SourceControlConfigurationsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterRp == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterRp cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterRp}\", url.PathEscape(clusterRp))\n\tif clusterResourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterResourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterResourceName}\", url.PathEscape(clusterResourceName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\tif sourceControlConfigurationName == \"\" {\n\t\treturn nil, errors.New(\"parameter sourceControlConfigurationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sourceControlConfigurationName}\", url.PathEscape(sourceControlConfigurationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CertificateOrdersClient) deleteCertificateCreateRequest(ctx context.Context, resourceGroupName string, certificateOrderName string, name string, options *CertificateOrdersClientDeleteCertificateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif certificateOrderName == \"\" {\n\t\treturn nil, errors.New(\"parameter certificateOrderName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{certificateOrderName}\", url.PathEscape(certificateOrderName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}" ]
[ "0.76320505", "0.7584632", "0.75717694", "0.7566521", "0.7531389", "0.75282", "0.7505947", "0.7504906", "0.7492394", "0.7492235", "0.74904436", "0.7432695", "0.7426466", "0.74185693", "0.7404809", "0.73585665", "0.7352774", "0.7342807", "0.7289017", "0.72818315", "0.7280017", "0.72641027", "0.72544336", "0.72520685", "0.7251728", "0.7243615", "0.72096527", "0.7201552", "0.7197194", "0.7185018", "0.717346", "0.71458405", "0.7144982", "0.7132672", "0.7121222", "0.7106942", "0.7105571", "0.7101803", "0.71004266", "0.70893526", "0.7085441", "0.70700926", "0.70616543", "0.70538044", "0.70529956", "0.70505625", "0.7023896", "0.7013103", "0.7009507", "0.7006913", "0.6999188", "0.69968814", "0.69938457", "0.6993641", "0.6988131", "0.6987731", "0.6976566", "0.6971027", "0.6964506", "0.69616055", "0.6955038", "0.6947099", "0.694701", "0.6945518", "0.6942814", "0.69391435", "0.69317603", "0.69313776", "0.6917061", "0.6907204", "0.69042027", "0.69033295", "0.69003856", "0.68977785", "0.68939674", "0.6884883", "0.68807447", "0.6877372", "0.68771124", "0.6871673", "0.6871498", "0.6854659", "0.6841991", "0.68349135", "0.68341315", "0.6832094", "0.68157667", "0.67991", "0.67865884", "0.6780297", "0.67801976", "0.6770231", "0.67655265", "0.6762833", "0.6749718", "0.67331254", "0.6730649", "0.67276996", "0.6691175", "0.6686218" ]
0.7662335
0
deleteAtManagementGroupCreateRequest creates the DeleteAtManagementGroup request.
func (client *PolicyDefinitionsClient) deleteAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsDeleteAtManagementGroupOptions) (*policy.Request, error) { urlPath := "/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}" if policyDefinitionName == "" { return nil, errors.New("parameter policyDefinitionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{policyDefinitionName}", url.PathEscape(policyDefinitionName)) if managementGroupID == "" { return nil, errors.New("parameter managementGroupID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{managementGroupId}", url.PathEscape(managementGroupID)) req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CreateDeleteCorpGroupRequest() (request *DeleteCorpGroupRequest) {\n\trequest = &DeleteCorpGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Vcs\", \"2020-05-15\", \"DeleteCorpGroup\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *GroupClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, groupID string, ifMatch string, options *GroupDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif groupID == \"\" {\n\t\treturn nil, errors.New(\"parameter groupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"If-Match\", ifMatch)\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, containerGroupName string, options *ContainerGroupsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif containerGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerGroupName}\", url.PathEscape(containerGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, parameters PolicyDefinition, options *PolicyDefinitionsCreateOrUpdateAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func CreateDeleteVideoDnaGroupRequest() (request *DeleteVideoDnaGroupRequest) {\n\trequest = &DeleteVideoDnaGroupRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Green\", \"2018-05-09\", \"DeleteVideoDnaGroup\", \"/green/video/dna/group/delete\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *PolicyDefinitionsClient) getAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsGetAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PrivateDNSZoneGroupsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string, options *PrivateDNSZoneGroupsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}/privateDnsZoneGroups/{privateDnsZoneGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateEndpointName}\", url.PathEscape(privateEndpointName))\n\tif privateDNSZoneGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateDNSZoneGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateDnsZoneGroupName}\", url.PathEscape(privateDNSZoneGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SyncGroupsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, syncGroupName string, options *SyncGroupsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif syncGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter syncGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{syncGroupName}\", url.PathEscape(syncGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *LongTermRetentionManagedInstanceBackupsClient) deleteByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, locationName string, managedInstanceName string, databaseName string, backupName string, options *LongTermRetentionManagedInstanceBackupsBeginDeleteByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/locations/{locationName}/longTermRetentionManagedInstances/{managedInstanceName}/longTermRetentionDatabases/{databaseName}/longTermRetentionManagedInstanceBackups/{backupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif locationName == \"\" {\n\t\treturn nil, errors.New(\"parameter locationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{locationName}\", url.PathEscape(locationName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif backupName == \"\" {\n\t\treturn nil, errors.New(\"parameter backupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{backupName}\", url.PathEscape(backupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (c *UDBClient) NewDeleteUDBParamGroupRequest() *DeleteUDBParamGroupRequest {\n\treq := &DeleteUDBParamGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (c *UDBClient) NewDeleteUDBParamGroupRequest() *DeleteUDBParamGroupRequest {\n\treq := &DeleteUDBParamGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (client *AvailabilitySetsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, availabilitySetName string, options *AvailabilitySetsDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif availabilitySetName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilitySetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilitySetName}\", url.PathEscape(availabilitySetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *ClustersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2/{clusterName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *PolicyDefinitionsClient) listByManagementGroupCreateRequest(ctx context.Context, managementGroupID string, options *PolicyDefinitionsListByManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions\"\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagementAssociationsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, managementAssociationName string, options *ManagementAssociationsDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.OperationsManagement/ManagementAssociations/{managementAssociationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.providerName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.providerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{providerName}\", url.PathEscape(client.providerName))\n\tif client.resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(client.resourceType))\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif managementAssociationName == \"\" {\n\t\treturn nil, errors.New(\"parameter managementAssociationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementAssociationName}\", url.PathEscape(managementAssociationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2015-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DiskEncryptionSetsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, options *DiskEncryptionSetsBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskEncryptionSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskEncryptionSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskEncryptionSetName}\", url.PathEscape(diskEncryptionSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ClustersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/clusters/{clusterName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateRemoveAppGroupRequest() (request *RemoveAppGroupRequest) {\n\trequest = &RemoveAppGroupRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"OpenSearch\", \"2017-12-25\", \"RemoveAppGroup\", \"/v4/openapi/app-groups/[appGroupIdentity]\", \"\", \"\")\n\trequest.Method = requests.DELETE\n\treturn\n}", "func NewDeleteaspecificPeeringGroupRequest(server string, id string) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParam(\"simple\", false, \"id\", id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/peeringgroups/%s\", pathParam0)\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"DELETE\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif localRulestackName == \"\" {\n\t\treturn nil, errors.New(\"parameter localRulestackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{localRulestackName}\", url.PathEscape(localRulestackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *TagRulesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, ruleSetName string, options *TagRulesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/tagRules/{ruleSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\tif ruleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleSetName}\", url.PathEscape(ruleSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-27\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualMachineScaleSetsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.ForceDeletion != nil {\n\t\treqQP.Set(\"forceDeletion\", strconv.FormatBool(*options.ForceDeletion))\n\t}\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *ManagedInstancesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, options *ManagedInstancesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *MonitorsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, options *MonitorsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CustomAssessmentAutomationsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, customAssessmentAutomationName string, options *CustomAssessmentAutomationsDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Security/customAssessmentAutomations/{customAssessmentAutomationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif customAssessmentAutomationName == \"\" {\n\t\treturn nil, errors.New(\"parameter customAssessmentAutomationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{customAssessmentAutomationName}\", url.PathEscape(customAssessmentAutomationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagedClustersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DedicatedHostsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hostGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter hostGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\tif hostName == \"\" {\n\t\treturn nil, errors.New(\"parameter hostName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hostName}\", url.PathEscape(hostName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func CreateDeleteCasterEpisodeGroupRequest() (request *DeleteCasterEpisodeGroupRequest) {\n\trequest = &DeleteCasterEpisodeGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"live\", \"2016-11-01\", \"DeleteCasterEpisodeGroup\", \"live\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *UMemClient) NewCreateUMemcacheGroupRequest() *CreateUMemcacheGroupRequest {\n\treq := &CreateUMemcacheGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func (client *AvailabilityGroupListenersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineGroupName string, availabilityGroupListenerName string, options *AvailabilityGroupListenersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners/{availabilityGroupListenerName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineGroupName}\", url.PathEscape(sqlVirtualMachineGroupName))\n\tif availabilityGroupListenerName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilityGroupListenerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilityGroupListenerName}\", url.PathEscape(availabilityGroupListenerName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SapMonitorsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, sapMonitorName string, options *SapMonitorsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/sapMonitors/{sapMonitorName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sapMonitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter sapMonitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sapMonitorName}\", url.PathEscape(sapMonitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-02-07-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CapacityReservationsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *CapacityReservationsBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif capacityReservationGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationGroupName}\", url.PathEscape(capacityReservationGroupName))\n\tif capacityReservationName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationName}\", url.PathEscape(capacityReservationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RecordSetsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, options *RecordSetsDeleteOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif recordType == \"\" {\n\t\treturn nil, errors.New(\"parameter recordType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{recordType}\", url.PathEscape(string(recordType)))\n\tif relativeRecordSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter relativeRecordSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{relativeRecordSetName}\", relativeRecordSetName)\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DedicatedHostsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsBeginDeleteOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{hostName}\", url.PathEscape(hostName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treturn req, nil\n}", "func (client *IPAllocationsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, ipAllocationName string, options *IPAllocationsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ipAllocationName == \"\" {\n\t\treturn nil, errors.New(\"parameter ipAllocationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ipAllocationName}\", url.PathEscape(ipAllocationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SchemaRegistryClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, schemaGroupName string, options *SchemaRegistryClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups/{schemaGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif schemaGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter schemaGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{schemaGroupName}\", url.PathEscape(schemaGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-10-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *KpiClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, hubName string, kpiName string, options *KpiClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/kpi/{kpiName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif kpiName == \"\" {\n\t\treturn nil, errors.New(\"parameter kpiName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{kpiName}\", url.PathEscape(kpiName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func NewAttachGroupPolicyRequestWithoutParam() *AttachGroupPolicyRequest {\n\n return &AttachGroupPolicyRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/group/{groupName}:attachGroupPolicy\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func (client *RegistrationDefinitionsClient) deleteCreateRequest(ctx context.Context, registrationDefinitionID string, scope string, options *RegistrationDefinitionsClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions/{registrationDefinitionId}\"\n\tif registrationDefinitionID == \"\" {\n\t\treturn nil, errors.New(\"parameter registrationDefinitionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registrationDefinitionId}\", url.PathEscape(registrationDefinitionID))\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AgentPoolsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, kubernetesClusterName string, agentPoolName string, options *AgentPoolsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/kubernetesClusters/{kubernetesClusterName}/agentPools/{agentPoolName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif kubernetesClusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter kubernetesClusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{kubernetesClusterName}\", url.PathEscape(kubernetesClusterName))\n\tif agentPoolName == \"\" {\n\t\treturn nil, errors.New(\"parameter agentPoolName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{agentPoolName}\", url.PathEscape(agentPoolName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateDeleteDegradeControlRequest() (request *DeleteDegradeControlRequest) {\n\trequest = &DeleteDegradeControlRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Edas\", \"2017-08-01\", \"DeleteDegradeControl\", \"/pop/v5/degradeControl\", \"Edas\", \"openAPI\")\n\trequest.Method = requests.DELETE\n\treturn\n}", "func (rm *resourceManager) newDeleteRequestPayload(\n\tr *resource,\n) (*svcsdk.DeleteReplicationGroupInput, error) {\n\tres := &svcsdk.DeleteReplicationGroupInput{}\n\n\tif r.ko.Spec.ReplicationGroupID != nil {\n\t\tres.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID)\n\t}\n\n\treturn res, nil\n}", "func (client *SQLVirtualMachinesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineName string, options *SQLVirtualMachinesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/{sqlVirtualMachineName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineName}\", url.PathEscape(sqlVirtualMachineName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *MetricAlertsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, ruleName string, options *MetricAlertsClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ruleName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleName}\", url.PathEscape(ruleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func NewDeleteConsumerGroupCommand(f *factory.Factory) *cobra.Command {\n\topts := &Options{\n\t\tConnection: f.Connection,\n\t\tCfgHandler: f.CfgHandler,\n\t\tIO: f.IOStreams,\n\t\tLogger: f.Logger,\n\t\tlocalizer: f.Localizer,\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.cmd.use\"),\n\t\tShort: opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.cmd.shortDescription\"),\n\t\tLong: opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.cmd.longDescription\"),\n\t\tExample: opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.cmd.example\"),\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) (err error) {\n\t\t\tif opts.kafkaID != \"\" {\n\t\t\t\treturn runCmd(opts)\n\t\t\t}\n\n\t\t\tif !f.CfgHandler.Cfg.HasKafka() {\n\t\t\t\treturn errors.New(opts.localizer.LocalizeByID(\"kafka.consumerGroup.common.error.noKafkaSelected\"))\n\t\t\t}\n\n\t\t\topts.kafkaID = opts.CfgHandler.Cfg.Services.Kafka.ClusterID\n\n\t\t\treturn runCmd(opts)\n\t\t},\n\t}\n\n\topts.localizer.LocalizeByID(\"kafka.consumerGroup.common.flag.id.description\", localize.NewEntry(\"Action\", \"delete\"))\n\tcmd.Flags().BoolVarP(&opts.skipConfirm, \"yes\", \"y\", false, opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.flag.yes.description\"))\n\tcmd.Flags().StringVar(&opts.id, \"id\", \"\", opts.localizer.LocalizeByID(\"kafka.consumerGroup.common.flag.id.description\", localize.NewEntry(\"Action\", \"delete\")))\n\t_ = cmd.MarkFlagRequired(\"id\")\n\n\t// flag based completions for ID\n\t_ = cmd.RegisterFlagCompletionFunc(\"id\", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {\n\t\treturn cmdutil.FilterValidConsumerGroupIDs(f, toComplete)\n\t})\n\n\treturn cmd\n}", "func NewDeploymentAtManagementGroupScope(ctx *pulumi.Context,\n\tname string, args *DeploymentAtManagementGroupScopeArgs, opts ...pulumi.ResourceOption) (*DeploymentAtManagementGroupScope, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.GroupId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'GroupId'\")\n\t}\n\tif args.Properties == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Properties'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190501:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20190510:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190510:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20190701:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190701:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20190801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20191001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20191001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20200601:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20200601:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20200801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20200801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20201001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20201001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20210101:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20210101:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20210401:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20210401:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource DeploymentAtManagementGroupScope\n\terr := ctx.RegisterResource(\"azure-native:resources/v20190501:DeploymentAtManagementGroupScope\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (client *CloudServicesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif cloudServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter cloudServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cloudServiceName}\", url.PathEscape(cloudServiceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (m *MockRDSAPI) DeleteOptionGroupRequest(arg0 *rds.DeleteOptionGroupInput) (*request.Request, *rds.DeleteOptionGroupOutput) {\n\tret := m.ctrl.Call(m, \"DeleteOptionGroupRequest\", arg0)\n\tret0, _ := ret[0].(*request.Request)\n\tret1, _ := ret[1].(*rds.DeleteOptionGroupOutput)\n\treturn ret0, ret1\n}", "func (client *SourceControlConfigurationsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, clusterRp string, clusterResourceName string, clusterName string, sourceControlConfigurationName string, options *SourceControlConfigurationsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterRp == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterRp cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterRp}\", url.PathEscape(clusterRp))\n\tif clusterResourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterResourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterResourceName}\", url.PathEscape(clusterResourceName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\tif sourceControlConfigurationName == \"\" {\n\t\treturn nil, errors.New(\"parameter sourceControlConfigurationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sourceControlConfigurationName}\", url.PathEscape(sourceControlConfigurationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func NewRemoveControlFromGroupParams() *RemoveControlFromGroupParams {\n\tvar ()\n\treturn &RemoveControlFromGroupParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (client *DevicesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, deviceName string, options *DevicesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridNetwork/devices/{deviceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif deviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter deviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{deviceName}\", url.PathEscape(deviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PacketCoreDataPlanesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, packetCoreControlPlaneName string, packetCoreDataPlaneName string, options *PacketCoreDataPlanesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/packetCoreControlPlanes/{packetCoreControlPlaneName}/packetCoreDataPlanes/{packetCoreDataPlaneName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif packetCoreControlPlaneName == \"\" {\n\t\treturn nil, errors.New(\"parameter packetCoreControlPlaneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{packetCoreControlPlaneName}\", url.PathEscape(packetCoreControlPlaneName))\n\tif packetCoreDataPlaneName == \"\" {\n\t\treturn nil, errors.New(\"parameter packetCoreDataPlaneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{packetCoreDataPlaneName}\", url.PathEscape(packetCoreDataPlaneName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AlertProcessingRulesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, alertProcessingRuleName string, options *AlertProcessingRulesClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AlertsManagement/actionRules/{alertProcessingRuleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif alertProcessingRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter alertProcessingRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{alertProcessingRuleName}\", url.PathEscape(alertProcessingRuleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ReplicationvCentersClient) deleteCreateRequest(ctx context.Context, fabricName string, vcenterName string, options *ReplicationvCentersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationvCenters/{vcenterName}\"\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif client.resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(client.resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif fabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter fabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{fabricName}\", url.PathEscape(fabricName))\n\tif vcenterName == \"\" {\n\t\treturn nil, errors.New(\"parameter vcenterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vcenterName}\", url.PathEscape(vcenterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func NewDeleteSecurityGroupRequest(server string, id string) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParam(\"simple\", false, \"id\", id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/security-group/%s\", pathParam0)\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"DELETE\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func (client *CapacitiesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, dedicatedCapacityName string, options *CapacitiesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/capacities/{dedicatedCapacityName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dedicatedCapacityName == \"\" {\n\t\treturn nil, errors.New(\"parameter dedicatedCapacityName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dedicatedCapacityName}\", url.PathEscape(dedicatedCapacityName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-01-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CertificateOrdersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, certificateOrderName string, options *CertificateOrdersClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif certificateOrderName == \"\" {\n\t\treturn nil, errors.New(\"parameter certificateOrderName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{certificateOrderName}\", url.PathEscape(certificateOrderName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ConnectedEnvironmentsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, connectedEnvironmentName string, options *ConnectedEnvironmentsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.App/connectedEnvironments/{connectedEnvironmentName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif connectedEnvironmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter connectedEnvironmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{connectedEnvironmentName}\", url.PathEscape(connectedEnvironmentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ManagedDatabasesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, options *ManagedDatabasesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (r *DeviceManagementScriptGroupAssignmentRequest) Delete(ctx context.Context) error {\n\treturn r.JSONRequest(ctx, \"DELETE\", \"\", nil, nil)\n}", "func (client *HCRPAssignmentsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, guestConfigurationAssignmentName string, machineName string, options *HCRPAssignmentsClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridCompute/machines/{machineName}/providers/Microsoft.GuestConfiguration/guestConfigurationAssignments/{guestConfigurationAssignmentName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif guestConfigurationAssignmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter guestConfigurationAssignmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{guestConfigurationAssignmentName}\", url.PathEscape(guestConfigurationAssignmentName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif machineName == \"\" {\n\t\treturn nil, errors.New(\"parameter machineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{machineName}\", url.PathEscape(machineName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-25\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CassandraClustersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *CassandraClustersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateDeleteApDeviceRequest() (request *DeleteApDeviceRequest) {\n\trequest = &DeleteApDeviceRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"cloudesl\", \"2020-02-01\", \"DeleteApDevice\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *ContainerGroupsClient) stopCreateRequest(ctx context.Context, resourceGroupName string, containerGroupName string, options *ContainerGroupsClientStopOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}/stop\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif containerGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerGroupName}\", url.PathEscape(containerGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (o *PcloudPlacementgroupsMembersDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param cloud_instance_id\n\tif err := r.SetPathParam(\"cloud_instance_id\", o.CloudInstanceID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param placement_group_id\n\tif err := r.SetPathParam(\"placement_group_id\", o.PlacementGroupID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (client IdentityClient) deleteGroup(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodDelete, \"/groups/{groupId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response DeleteGroupResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client *Client) DeleteGroupWithOptions(request *DeleteGroupRequest, runtime *util.RuntimeOptions) (_result *DeleteGroupResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.GroupId)) {\n\t\tquery[\"GroupId\"] = request.GroupId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.SourceIp)) {\n\t\tquery[\"SourceIp\"] = request.SourceIp\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"DeleteGroup\"),\n\t\tVersion: tea.String(\"2018-12-03\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &DeleteGroupResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func (ooc *MockOpenoltClient) DeleteGroup(ctx context.Context, in *openolt.Group, opts ...grpc.CallOption) (*openolt.Empty, error) {\n\treturn &openolt.Empty{}, nil\n}", "func (client *ContainerGroupsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ContainerGroupsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ReplicationProtectionContainersClient) deleteCreateRequest(ctx context.Context, resourceName string, resourceGroupName string, fabricName string, protectionContainerName string, options *ReplicationProtectionContainersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/remove\"\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif fabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter fabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{fabricName}\", url.PathEscape(fabricName))\n\tif protectionContainerName == \"\" {\n\t\treturn nil, errors.New(\"parameter protectionContainerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{protectionContainerName}\", url.PathEscape(protectionContainerName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func CreateCreateApplicationGroupRequest() (request *CreateApplicationGroupRequest) {\n\trequest = &CreateApplicationGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"oos\", \"2019-06-01\", \"CreateApplicationGroup\", \"oos\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func NewDeleteMsgVpnAuthorizationGroupOK() *DeleteMsgVpnAuthorizationGroupOK {\n\treturn &DeleteMsgVpnAuthorizationGroupOK{}\n}", "func (client *DiskEncryptionSetsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DiskEncryptionSetsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *UserMetricsKeysClient) deleteCreateRequest(ctx context.Context, options *UserMetricsKeysClientDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/trafficManagerUserMetricsKeys/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *PeeringPoliciesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkPeeringPolicyName string, options *PeeringPoliciesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies/{managedNetworkPeeringPolicyName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedNetworkName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedNetworkName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedNetworkName}\", url.PathEscape(managedNetworkName))\n\tif managedNetworkPeeringPolicyName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedNetworkPeeringPolicyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedNetworkPeeringPolicyName}\", url.PathEscape(managedNetworkPeeringPolicyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateDeleteVpdGrantRuleRequest() (request *DeleteVpdGrantRuleRequest) {\n\trequest = &DeleteVpdGrantRuleRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"eflo\", \"2022-05-30\", \"DeleteVpdGrantRule\", \"eflo\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (m *MockRDSAPI) DeleteDBClusterParameterGroupRequest(arg0 *rds.DeleteDBClusterParameterGroupInput) (*request.Request, *rds.DeleteDBClusterParameterGroupOutput) {\n\tret := m.ctrl.Call(m, \"DeleteDBClusterParameterGroupRequest\", arg0)\n\tret0, _ := ret[0].(*request.Request)\n\tret1, _ := ret[1].(*rds.DeleteDBClusterParameterGroupOutput)\n\treturn ret0, ret1\n}", "func (s *GroupsService) Delete(\n\tctx context.Context,\n\tgroupName string,\n) error {\n\traw, err := json.Marshal(struct {\n\t\tGroupName string `json:\"group_name\"`\n\t}{\n\t\tgroupName,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := http.NewRequest(\n\t\thttp.MethodPost,\n\t\ts.client.url+\"2.0/groups/delete\",\n\t\tbytes.NewBuffer(raw),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq = req.WithContext(ctx)\n\tres, err := s.client.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.StatusCode >= 300 || res.StatusCode <= 199 {\n\t\treturn fmt.Errorf(\n\t\t\t\"Failed to returns 2XX response: %d\", res.StatusCode)\n\t}\n\n\treturn nil\n}", "func (client *AgentsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, storageMoverName string, agentName string, options *AgentsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageMover/storageMovers/{storageMoverName}/agents/{agentName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif storageMoverName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageMoverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storageMoverName}\", url.PathEscape(storageMoverName))\n\tif agentName == \"\" {\n\t\treturn nil, errors.New(\"parameter agentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{agentName}\", url.PathEscape(agentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RedisClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, name string, options *RedisBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{name}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualMachineImageTemplatesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, imageTemplateName string, options *VirtualMachineImageTemplatesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imageTemplates/{imageTemplateName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif imageTemplateName == \"\" {\n\t\treturn nil, errors.New(\"parameter imageTemplateName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{imageTemplateName}\", url.PathEscape(imageTemplateName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RoleAssignmentsClient) deleteCreateRequest(ctx context.Context, vaultBaseURL string, scope string, roleAssignmentName string, options *RoleAssignmentsDeleteOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}\"\n\tif scope == \"\" {\n\t\treturn nil, errors.New(\"parameter scope cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif roleAssignmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter roleAssignmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{roleAssignmentName}\", url.PathEscape(roleAssignmentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client WorkloadNetworksClient) DeleteVMGroupPreparer(ctx context.Context, resourceGroupName string, VMGroupID string, privateCloudName string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"privateCloudName\": autorest.Encode(\"path\", privateCloudName),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t\t\"vmGroupId\": autorest.Encode(\"path\", VMGroupID),\n\t}\n\n\tconst APIVersion = \"2020-07-17-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsDelete(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/workloadNetworks/default/vmGroups/{vmGroupId}\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (client *VirtualMachineScaleSetVMRunCommandsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, options *VirtualMachineScaleSetVMRunCommandsBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif instanceID == \"\" {\n\t\treturn nil, errors.New(\"parameter instanceID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{instanceId}\", url.PathEscape(instanceID))\n\tif runCommandName == \"\" {\n\t\treturn nil, errors.New(\"parameter runCommandName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{runCommandName}\", url.PathEscape(runCommandName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json, text/json\")\n\treturn req, nil\n}", "func (client *VirtualNetworkTapsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, tapName string, options *VirtualNetworkTapsBeginDeleteOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{tapName}\", url.PathEscape(tapName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodDelete, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateDescribeContainerGroupMetricRequest() (request *DescribeContainerGroupMetricRequest) {\n\trequest = &DescribeContainerGroupMetricRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Eci\", \"2018-08-08\", \"DescribeContainerGroupMetric\", \"eci\", \"openAPI\")\n\treturn\n}", "func NewCreateVMPlacementGroupOK() *CreateVMPlacementGroupOK {\n\treturn &CreateVMPlacementGroupOK{}\n}", "func (client *CassandraClustersClient) deallocateCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *CassandraClustersClientBeginDeallocateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/deallocate\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RoleDefinitionsClient) deleteCreateRequest(ctx context.Context, scope string, roleDefinitionID string, options *RoleDefinitionsDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleDefinitions/{roleDefinitionId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif roleDefinitionID == \"\" {\n\t\treturn nil, errors.New(\"parameter roleDefinitionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{roleDefinitionId}\", url.PathEscape(roleDefinitionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ClustersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *PermissionBindingsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, permissionBindingName string, options *PermissionBindingsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/namespaces/{namespaceName}/permissionBindings/{permissionBindingName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif permissionBindingName == \"\" {\n\t\treturn nil, errors.New(\"parameter permissionBindingName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{permissionBindingName}\", url.PathEscape(permissionBindingName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DicomServicesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, dicomServiceName string, workspaceName string, options *DicomServicesClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/workspaces/{workspaceName}/dicomservices/{dicomServiceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dicomServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter dicomServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dicomServiceName}\", url.PathEscape(dicomServiceName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewPolicyDefinitionAtManagementGroup(ctx *pulumi.Context,\n\tname string, args *PolicyDefinitionAtManagementGroupArgs, opts ...pulumi.ResourceOption) (*PolicyDefinitionAtManagementGroup, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ManagementGroupId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ManagementGroupId'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190101:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20161201:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20161201:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20180301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20180301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20180501:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20180501:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20190601:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190601:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20190901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20190901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20200301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20200301:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:authorization/v20200901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:authorization/v20200901:PolicyDefinitionAtManagementGroup\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource PolicyDefinitionAtManagementGroup\n\terr := ctx.RegisterResource(\"azure-native:authorization/v20190101:PolicyDefinitionAtManagementGroup\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (client *PolicyDefinitionsClient) deleteCreateRequest(ctx context.Context, policyDefinitionName string, options *PolicyDefinitionsDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ServersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, serverName string, options *ServersClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, nil\n}", "func (client *VirtualNetworkLinksClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, options *VirtualNetworkLinksBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks/{virtualNetworkLinkName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif virtualNetworkLinkName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualNetworkLinkName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualNetworkLinkName}\", url.PathEscape(virtualNetworkLinkName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewDeleteSecurityGroupOK() *DeleteSecurityGroupOK {\n\treturn &DeleteSecurityGroupOK{}\n}", "func (client *ManagedClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ManagedClustersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ScriptExecutionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, privateCloudName string, scriptExecutionName string, options *ScriptExecutionsClientBeginDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/scriptExecutions/{scriptExecutionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateCloudName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateCloudName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateCloudName}\", url.PathEscape(privateCloudName))\n\tif scriptExecutionName == \"\" {\n\t\treturn nil, errors.New(\"parameter scriptExecutionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{scriptExecutionName}\", url.PathEscape(scriptExecutionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (obj *ShopSys) GroupDelete(input ShopGroup, _opt ...map[string]string) (output ResultCount, err error) {\n\tctx := context.Background()\n\treturn obj.GroupDeleteWithContext(ctx, input, _opt...)\n}" ]
[ "0.6350999", "0.6210928", "0.6089093", "0.6042023", "0.6032062", "0.6019282", "0.59526217", "0.5916635", "0.5828889", "0.58183795", "0.58183795", "0.5775107", "0.56965506", "0.5609115", "0.56069195", "0.55307055", "0.55185777", "0.5498397", "0.54778063", "0.54416555", "0.5410034", "0.53946894", "0.538106", "0.53780043", "0.5371178", "0.5369898", "0.53370947", "0.53127927", "0.5268267", "0.5264551", "0.5248842", "0.5241432", "0.52225673", "0.52115244", "0.5185894", "0.5180158", "0.516782", "0.5140397", "0.5139879", "0.5136518", "0.51287884", "0.50877494", "0.50854516", "0.5083756", "0.50802815", "0.50705844", "0.50502485", "0.5043913", "0.50366616", "0.5030508", "0.5026679", "0.50231487", "0.501981", "0.50071937", "0.49993718", "0.4994924", "0.4976547", "0.49765176", "0.49617976", "0.49561283", "0.49469352", "0.4942604", "0.4924374", "0.49204853", "0.49195856", "0.490731", "0.4885914", "0.48858756", "0.4885752", "0.48656845", "0.48498785", "0.48477057", "0.48462418", "0.48459503", "0.48296875", "0.48281667", "0.48276523", "0.48256338", "0.48237643", "0.48163393", "0.48113084", "0.48102286", "0.48065493", "0.48060635", "0.48041022", "0.4800253", "0.47996593", "0.47942805", "0.47938704", "0.47906488", "0.47855282", "0.4779093", "0.47644225", "0.47627434", "0.47594285", "0.4748504", "0.47473055", "0.47384572", "0.4738354", "0.47302598" ]
0.782387
0
getCreateRequest creates the Get request.
func (client *PolicyDefinitionsClient) getCreateRequest(ctx context.Context, policyDefinitionName string, options *PolicyDefinitionsGetOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}" if policyDefinitionName == "" { return nil, errors.New("parameter policyDefinitionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{policyDefinitionName}", url.PathEscape(policyDefinitionName)) if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *UserMetricsKeysClient) getCreateRequest(ctx context.Context, options *UserMetricsKeysClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/trafficManagerUserMetricsKeys/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AssociationsClient) getCreateRequest(ctx context.Context, scope string, associationName string, options *AssociationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.CustomProviders/associations/{associationName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif associationName == \"\" {\n\t\treturn nil, errors.New(\"parameter associationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{associationName}\", url.PathEscape(associationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CloudServicesClient) getCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif cloudServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter cloudServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cloudServiceName}\", url.PathEscape(cloudServiceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *subscriptionClient) getCreateRequest(ctx context.Context, topicName string, subscriptionName string, options *SubscriptionGetOptions) (*policy.Request, error) {\n\turlPath := \"/{topicName}/subscriptions/{subscriptionName}\"\n\tif topicName == \"\" {\n\t\treturn nil, errors.New(\"parameter topicName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{topicName}\", url.PathEscape(topicName))\n\tif subscriptionName == \"\" {\n\t\treturn nil, errors.New(\"parameter subscriptionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionName}\", url.PathEscape(subscriptionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Enrich != nil {\n\t\treqQP.Set(\"enrich\", strconv.FormatBool(*options.Enrich))\n\t}\n\tif client.apiVersion != nil {\n\t\treqQP.Set(\"api-version\", \"2017_04\")\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/xml, application/atom+xml\")\n\treturn req, nil\n}", "func (client *AlertOperationClient) getCreateRequest(ctx context.Context, scope string, operationID string, options *AlertOperationClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleManagementAlertOperations/{operationId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\turlPath = strings.ReplaceAll(urlPath, \"{operationId}\", operationID)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2/{clusterName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CertificateOrdersClient) getCreateRequest(ctx context.Context, resourceGroupName string, certificateOrderName string, options *CertificateOrdersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif certificateOrderName == \"\" {\n\t\treturn nil, errors.New(\"parameter certificateOrderName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{certificateOrderName}\", url.PathEscape(certificateOrderName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *KeyVaultClient) getKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, options *KeyVaultClientGetKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\tif keyVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PipelinesClient) getCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, pipelineName string, options *PipelinesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelines/{pipelineName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\tif pipelineName == \"\" {\n\t\treturn nil, errors.New(\"parameter pipelineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{pipelineName}\", url.PathEscape(pipelineName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfNoneMatch != nil {\n\t\treq.Raw().Header[\"If-None-Match\"] = []string{*options.IfNoneMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AvailabilitySetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, availabilitySetName string, options *AvailabilitySetsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif availabilitySetName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilitySetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilitySetName}\", url.PathEscape(availabilitySetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *KeyVaultClient) getKeysCreateRequest(ctx context.Context, vaultBaseURL string, options *KeyVaultClientGetKeysOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Maxresults != nil {\n\t\treqQP.Set(\"maxresults\", strconv.FormatInt(int64(*options.Maxresults), 10))\n\t}\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RegistrationDefinitionsClient) getCreateRequest(ctx context.Context, scope string, registrationDefinitionID string, options *RegistrationDefinitionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions/{registrationDefinitionId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif registrationDefinitionID == \"\" {\n\t\treturn nil, errors.New(\"parameter registrationDefinitionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registrationDefinitionId}\", url.PathEscape(registrationDefinitionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *KeyVaultClient) GetKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, options *KeyVaultClientGetKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\t// if keyVersion == \"\" {\n\t// \treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t// }\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DicomServicesClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, dicomServiceName string, options *DicomServicesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/workspaces/{workspaceName}/dicomservices/{dicomServiceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif dicomServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter dicomServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dicomServiceName}\", url.PathEscape(dicomServiceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *KeyVaultClient) GetKeysCreateRequest(ctx context.Context, vaultBaseURL string, options *KeyVaultClientGetKeysOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Maxresults != nil {\n\t\treqQP.Set(\"maxresults\", strconv.FormatInt(int64(*options.Maxresults), 10))\n\t}\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *WebAppsClient) getCreateRequest(ctx context.Context, resourceGroupName string, name string, options *WebAppsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagedClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/clusters/{clusterName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *WorkspacesClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, options *WorkspacesGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualMachinesClient) getCreateRequest(ctx context.Context, resourceGroupName string, virtualMachineName string, options *VirtualMachinesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{virtualMachineName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif virtualMachineName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualMachineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualMachineName}\", url.PathEscape(virtualMachineName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CompliancesClient) getCreateRequest(ctx context.Context, scope string, complianceName string, options *CompliancesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Security/compliances/{complianceName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif complianceName == \"\" {\n\t\treturn nil, errors.New(\"parameter complianceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{complianceName}\", url.PathEscape(complianceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DefenderSettingsClient) getCreateRequest(ctx context.Context, options *DefenderSettingsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.IoTSecurity/defenderSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SQLVirtualMachinesClient) getCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineName string, options *SQLVirtualMachinesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/{sqlVirtualMachineName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineName}\", url.PathEscape(sqlVirtualMachineName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *FactoriesClient) getCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, options *FactoriesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfNoneMatch != nil {\n\t\treq.Raw().Header[\"If-None-Match\"] = []string{*options.IfNoneMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualNetworkTapsClient) getCreateRequest(ctx context.Context, resourceGroupName string, tapName string, options *VirtualNetworkTapsGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{tapName}\", url.PathEscape(tapName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *APIClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, apiID string, options *APIClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif apiID == \"\" {\n\t\treturn nil, errors.New(\"parameter apiID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{apiId}\", url.PathEscape(apiID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AlertsClient) getCreateRequest(ctx context.Context, scope string, alertID string, options *AlertsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleManagementAlerts/{alertId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\turlPath = strings.ReplaceAll(urlPath, \"{alertId}\", alertID)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *MonitoringSettingsClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *MonitoringSettingsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *IotSecuritySolutionClient) getCreateRequest(ctx context.Context, resourceGroupName string, solutionName string, options *IotSecuritySolutionClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif solutionName == \"\" {\n\t\treturn nil, errors.New(\"parameter solutionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{solutionName}\", url.PathEscape(solutionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *KpiClient) getCreateRequest(ctx context.Context, resourceGroupName string, hubName string, kpiName string, options *KpiClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/kpi/{kpiName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif kpiName == \"\" {\n\t\treturn nil, errors.New(\"parameter kpiName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{kpiName}\", url.PathEscape(kpiName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *Client) getCreateRequest(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, resourceType string, resourceName string, changeResourceID string, options *ClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Resources/changes/{changeResourceId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif resourceProviderNamespace == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceProviderNamespace cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceProviderNamespace}\", url.PathEscape(resourceProviderNamespace))\n\tif resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(resourceType))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\tif changeResourceID == \"\" {\n\t\treturn nil, errors.New(\"parameter changeResourceID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{changeResourceId}\", url.PathEscape(changeResourceID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ConsumerInvitationsClient) getCreateRequest(ctx context.Context, location string, invitationID string, options *ConsumerInvitationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.DataShare/locations/{location}/consumerInvitations/{invitationId}\"\n\tif location == \"\" {\n\t\treturn nil, errors.New(\"parameter location cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{location}\", url.PathEscape(location))\n\tif invitationID == \"\" {\n\t\treturn nil, errors.New(\"parameter invitationID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{invitationId}\", url.PathEscape(invitationID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RouteTablesClient) getCreateRequest(ctx context.Context, resourceGroupName string, routeTableName string, options *RouteTablesGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif routeTableName == \"\" {\n\t\treturn nil, errors.New(\"parameter routeTableName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{routeTableName}\", url.PathEscape(routeTableName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DatasetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, datasetName string, options *DatasetsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/datasets/{datasetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\tif datasetName == \"\" {\n\t\treturn nil, errors.New(\"parameter datasetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{datasetName}\", url.PathEscape(datasetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfNoneMatch != nil {\n\t\treq.Raw().Header[\"If-None-Match\"] = []string{*options.IfNoneMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) getCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif localRulestackName == \"\" {\n\t\treturn nil, errors.New(\"parameter localRulestackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{localRulestackName}\", url.PathEscape(localRulestackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *TablesClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, tableName string, options *TablesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif tableName == \"\" {\n\t\treturn nil, errors.New(\"parameter tableName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{tableName}\", url.PathEscape(tableName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AccountsClient) getCreateRequest(ctx context.Context, resourceGroupName string, accountName string, options *AccountsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *GroupClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, groupID string, options *GroupGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif groupID == \"\" {\n\t\treturn nil, errors.New(\"parameter groupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DiskEncryptionSetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, options *DiskEncryptionSetsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskEncryptionSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskEncryptionSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskEncryptionSetName}\", url.PathEscape(diskEncryptionSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ServersClient) getCreateRequest(ctx context.Context, resourceGroupName string, serverName string, options *ServersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ExpressRoutePortsLocationsClient) getCreateRequest(ctx context.Context, locationName string, options *ExpressRoutePortsLocationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations/{locationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif locationName == \"\" {\n\t\treturn nil, errors.New(\"parameter locationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{locationName}\", url.PathEscape(locationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WorkflowsClient) getCreateRequest(ctx context.Context, resourceGroupName string, storageSyncServiceName string, workflowID string, options *WorkflowsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/storageSyncServices/{storageSyncServiceName}/workflows/{workflowId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif storageSyncServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageSyncServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storageSyncServiceName}\", url.PathEscape(storageSyncServiceName))\n\tif workflowID == \"\" {\n\t\treturn nil, errors.New(\"parameter workflowID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workflowId}\", url.PathEscape(workflowID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetMultiRateConfigRequest() (request *GetMultiRateConfigRequest) {\n\trequest = &GetMultiRateConfigRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"live\", \"2016-11-01\", \"GetMultiRateConfig\", \"live\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *PortalConfigClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, portalConfigID string, options *PortalConfigClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalconfigs/{portalConfigId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif portalConfigID == \"\" {\n\t\treturn nil, errors.New(\"parameter portalConfigID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{portalConfigId}\", url.PathEscape(portalConfigID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CustomAssessmentAutomationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, customAssessmentAutomationName string, options *CustomAssessmentAutomationsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Security/customAssessmentAutomations/{customAssessmentAutomationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif customAssessmentAutomationName == \"\" {\n\t\treturn nil, errors.New(\"parameter customAssessmentAutomationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{customAssessmentAutomationName}\", url.PathEscape(customAssessmentAutomationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CassandraClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *CassandraClustersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ActionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, ruleID string, actionID string, options *ActionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/alertRules/{ruleId}/actions/{actionId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif ruleID == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleId}\", url.PathEscape(ruleID))\n\tif actionID == \"\" {\n\t\treturn nil, errors.New(\"parameter actionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{actionId}\", url.PathEscape(actionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DevicesClient) getCreateRequest(ctx context.Context, resourceGroupName string, deviceName string, options *DevicesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridNetwork/devices/{deviceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif deviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter deviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{deviceName}\", url.PathEscape(deviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *MonitorsClient) getCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, options *MonitorsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DataCollectionEndpointsClient) getCreateRequest(ctx context.Context, resourceGroupName string, dataCollectionEndpointName string, options *DataCollectionEndpointsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints/{dataCollectionEndpointName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dataCollectionEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter dataCollectionEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dataCollectionEndpointName}\", url.PathEscape(dataCollectionEndpointName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagedInstancesClient) getCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, options *ManagedInstancesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualApplianceSitesClient) getCreateRequest(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, siteName string, options *VirtualApplianceSitesGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites/{siteName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{networkVirtualApplianceName}\", url.PathEscape(networkVirtualApplianceName))\n\turlPath = strings.ReplaceAll(urlPath, \"{siteName}\", url.PathEscape(siteName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CapacityReservationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *CapacityReservationsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif capacityReservationGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationGroupName}\", url.PathEscape(capacityReservationGroupName))\n\tif capacityReservationName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationName}\", url.PathEscape(capacityReservationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", string(*options.Expand))\n\t}\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"restype\", \"container\")\n\tif options != nil && options.Timeout != nil {\n\t\treqQP.Set(\"timeout\", strconv.FormatInt(int64(*options.Timeout), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {\n\t\treq.Raw().Header[\"x-ms-lease-id\"] = []string{*leaseAccessConditions.LeaseID}\n\t}\n\treq.Raw().Header[\"x-ms-version\"] = []string{\"2020-10-02\"}\n\tif options != nil && options.RequestID != nil {\n\t\treq.Raw().Header[\"x-ms-client-request-id\"] = []string{*options.RequestID}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/xml\"}\n\treturn req, nil\n}", "func (client *IPAllocationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, ipAllocationName string, options *IPAllocationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ipAllocationName == \"\" {\n\t\treturn nil, errors.New(\"parameter ipAllocationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ipAllocationName}\", url.PathEscape(ipAllocationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RoleDefinitionsClient) getCreateRequest(ctx context.Context, scope string, roleDefinitionID string, options *RoleDefinitionsGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleDefinitions/{roleDefinitionId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif roleDefinitionID == \"\" {\n\t\treturn nil, errors.New(\"parameter roleDefinitionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{roleDefinitionId}\", url.PathEscape(roleDefinitionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DataCollectionEndpointsClient) getCreateRequest(ctx context.Context, resourceGroupName string, dataCollectionEndpointName string, options *DataCollectionEndpointsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints/{dataCollectionEndpointName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dataCollectionEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter dataCollectionEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dataCollectionEndpointName}\", url.PathEscape(dataCollectionEndpointName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagedDatabasesClient) getCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, options *ManagedDatabasesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RedisClient) getCreateRequest(ctx context.Context, resourceGroupName string, name string, options *RedisGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{name}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *IncidentsClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, incidentID string, options *IncidentsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif incidentID == \"\" {\n\t\treturn nil, errors.New(\"parameter incidentID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{incidentId}\", url.PathEscape(incidentID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-05-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *TagRulesClient) getCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, ruleSetName string, options *TagRulesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/tagRules/{ruleSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\tif ruleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleSetName}\", url.PathEscape(ruleSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-27\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *NetworkToNetworkInterconnectsClient) getCreateRequest(ctx context.Context, resourceGroupName string, networkFabricName string, networkToNetworkInterconnectName string, options *NetworkToNetworkInterconnectsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/networkFabrics/{networkFabricName}/networkToNetworkInterconnects/{networkToNetworkInterconnectName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif networkFabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkFabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkFabricName}\", url.PathEscape(networkFabricName))\n\tif networkToNetworkInterconnectName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkToNetworkInterconnectName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkToNetworkInterconnectName}\", url.PathEscape(networkToNetworkInterconnectName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RoleAssignmentsClient) getCreateRequest(ctx context.Context, vaultBaseURL string, scope string, roleAssignmentName string, options *RoleAssignmentsGetOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}\"\n\tif scope == \"\" {\n\t\treturn nil, errors.New(\"parameter scope cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif roleAssignmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter roleAssignmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{roleAssignmentName}\", url.PathEscape(roleAssignmentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *WebhooksClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, webhookName string, options *WebhooksClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif webhookName == \"\" {\n\t\treturn nil, errors.New(\"parameter webhookName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{webhookName}\", url.PathEscape(webhookName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *TaskRunsClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, taskRunName string, options *TaskRunsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns/{taskRunName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif taskRunName == \"\" {\n\t\treturn nil, errors.New(\"parameter taskRunName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{taskRunName}\", url.PathEscape(taskRunName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualApplianceSKUsClient) getCreateRequest(ctx context.Context, skuName string, options *VirtualApplianceSKUsGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualApplianceSkus/{skuName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\turlPath = strings.ReplaceAll(urlPath, \"{skuName}\", url.PathEscape(skuName))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateGetContactRequest() (request *GetContactRequest) {\n\trequest = &GetContactRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Subscription\", \"2021-01-15\", \"GetContact\", \"\", \"\")\n\treturn\n}", "func (client *SyncGroupsClient) getCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, syncGroupName string, options *SyncGroupsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif syncGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter syncGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{syncGroupName}\", url.PathEscape(syncGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RoleDefinitionsClient) getByIDCreateRequest(ctx context.Context, roleID string, options *RoleDefinitionsGetByIDOptions) (*policy.Request, error) {\n\turlPath := \"/{roleId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{roleId}\", roleID)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *FirewallRulesClient) getCreateRequest(ctx context.Context, resourceGroupName string, serverName string, firewallRuleName string, options *FirewallRulesGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/firewallRules/{firewallRuleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif firewallRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter firewallRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{firewallRuleName}\", url.PathEscape(firewallRuleName))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2017-12-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AgentsClient) getCreateRequest(ctx context.Context, resourceGroupName string, storageMoverName string, agentName string, options *AgentsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageMover/storageMovers/{storageMoverName}/agents/{agentName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif storageMoverName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageMoverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storageMoverName}\", url.PathEscape(storageMoverName))\n\tif agentName == \"\" {\n\t\treturn nil, errors.New(\"parameter agentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{agentName}\", url.PathEscape(agentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualMachineImageTemplatesClient) getCreateRequest(ctx context.Context, resourceGroupName string, imageTemplateName string, options *VirtualMachineImageTemplatesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imageTemplates/{imageTemplateName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif imageTemplateName == \"\" {\n\t\treturn nil, errors.New(\"parameter imageTemplateName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{imageTemplateName}\", url.PathEscape(imageTemplateName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualMachineScaleSetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", string(*options.Expand))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *InteractionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, hubName string, interactionName string, options *InteractionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/interactions/{interactionName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif interactionName == \"\" {\n\t\treturn nil, errors.New(\"parameter interactionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{interactionName}\", url.PathEscape(interactionName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.LocaleCode != nil {\n\t\treqQP.Set(\"locale-code\", *options.LocaleCode)\n\t}\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *OutputsClient) getCreateRequest(ctx context.Context, resourceGroupName string, jobName string, outputName string, options *OutputsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/outputs/{outputName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif jobName == \"\" {\n\t\treturn nil, errors.New(\"parameter jobName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{jobName}\", url.PathEscape(jobName))\n\tif outputName == \"\" {\n\t\treturn nil, errors.New(\"parameter outputName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{outputName}\", url.PathEscape(outputName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *OutputsClient) getCreateRequest(ctx context.Context, resourceGroupName string, jobName string, outputName string, options *OutputsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/outputs/{outputName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif jobName == \"\" {\n\t\treturn nil, errors.New(\"parameter jobName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{jobName}\", url.PathEscape(jobName))\n\tif outputName == \"\" {\n\t\treturn nil, errors.New(\"parameter outputName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{outputName}\", url.PathEscape(outputName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SubscriptionClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, sid string, options *SubscriptionClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/subscriptions/{sid}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif sid == \"\" {\n\t\treturn nil, errors.New(\"parameter sid cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sid}\", url.PathEscape(sid))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *StorageTargetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, cacheName string, storageTargetName string, options *StorageTargetsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif cacheName == \"\" {\n\t\treturn nil, errors.New(\"parameter cacheName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cacheName}\", url.PathEscape(cacheName))\n\tif storageTargetName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageTargetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storageTargetName}\", url.PathEscape(storageTargetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client PeerExpressRouteCircuitConnectionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, connectionName string, options *PeerExpressRouteCircuitConnectionsGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections/{connectionName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{circuitName}\", url.PathEscape(circuitName))\n\turlPath = strings.ReplaceAll(urlPath, \"{peeringName}\", url.PathEscape(peeringName))\n\turlPath = strings.ReplaceAll(urlPath, \"{connectionName}\", url.PathEscape(connectionName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *KeyVaultClient) getCertificatesCreateRequest(ctx context.Context, vaultBaseURL string, options *KeyVaultClientGetCertificatesOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/certificates\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Maxresults != nil {\n\t\treqQP.Set(\"maxresults\", strconv.FormatInt(int64(*options.Maxresults), 10))\n\t}\n\tif options != nil && options.IncludePending != nil {\n\t\treqQP.Set(\"includePending\", strconv.FormatBool(*options.IncludePending))\n\t}\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualNetworkLinksClient) getCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, options *VirtualNetworkLinksGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks/{virtualNetworkLinkName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif virtualNetworkLinkName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualNetworkLinkName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualNetworkLinkName}\", url.PathEscape(virtualNetworkLinkName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AnalysisResultsClient) getCreateRequest(ctx context.Context, resourceGroupName string, testBaseAccountName string, packageName string, testResultName string, analysisResultName AnalysisResultName, options *AnalysisResultsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/packages/{packageName}/testResults/{testResultName}/analysisResults/{analysisResultName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif testBaseAccountName == \"\" {\n\t\treturn nil, errors.New(\"parameter testBaseAccountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{testBaseAccountName}\", url.PathEscape(testBaseAccountName))\n\tif packageName == \"\" {\n\t\treturn nil, errors.New(\"parameter packageName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{packageName}\", url.PathEscape(packageName))\n\tif testResultName == \"\" {\n\t\treturn nil, errors.New(\"parameter testResultName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{testResultName}\", url.PathEscape(testResultName))\n\tif analysisResultName == \"\" {\n\t\treturn nil, errors.New(\"parameter analysisResultName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{analysisResultName}\", url.PathEscape(string(analysisResultName)))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-16-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ReplicationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, replicationName string, options *ReplicationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif replicationName == \"\" {\n\t\treturn nil, errors.New(\"parameter replicationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{replicationName}\", url.PathEscape(replicationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *IscsiTargetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, diskPoolName string, iscsiTargetName string, options *IscsiTargetsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StoragePool/diskPools/{diskPoolName}/iscsiTargets/{iscsiTargetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskPoolName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskPoolName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskPoolName}\", url.PathEscape(diskPoolName))\n\tif iscsiTargetName == \"\" {\n\t\treturn nil, errors.New(\"parameter iscsiTargetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{iscsiTargetName}\", url.PathEscape(iscsiTargetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *TriggersClient) getCreateRequest(ctx context.Context, resourceGroupName string, accountName string, shareSubscriptionName string, triggerName string, options *TriggersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shareSubscriptions/{shareSubscriptionName}/triggers/{triggerName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif shareSubscriptionName == \"\" {\n\t\treturn nil, errors.New(\"parameter shareSubscriptionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{shareSubscriptionName}\", url.PathEscape(shareSubscriptionName))\n\tif triggerName == \"\" {\n\t\treturn nil, errors.New(\"parameter triggerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{triggerName}\", url.PathEscape(triggerName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VendorNetworkFunctionsClient) getCreateRequest(ctx context.Context, locationName string, vendorName string, serviceKey string, options *VendorNetworkFunctionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/locations/{locationName}/vendors/{vendorName}/networkFunctions/{serviceKey}\"\n\tif locationName == \"\" {\n\t\treturn nil, errors.New(\"parameter locationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{locationName}\", url.PathEscape(locationName))\n\tif vendorName == \"\" {\n\t\treturn nil, errors.New(\"parameter vendorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vendorName}\", url.PathEscape(vendorName))\n\tif serviceKey == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceKey cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceKey}\", url.PathEscape(serviceKey))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualRoutersClient) getCreateRequest(ctx context.Context, resourceGroupName string, virtualRouterName string, options *VirtualRoutersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif virtualRouterName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualRouterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualRouterName}\", url.PathEscape(virtualRouterName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateGetOpenNLURequest() (request *GetOpenNLURequest) {\n\trequest = &GetOpenNLURequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetOpenNLU\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *RecordSetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, recordType RecordType, relativeRecordSetName string, options *RecordSetsGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/{recordType}/{relativeRecordSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif recordType == \"\" {\n\t\treturn nil, errors.New(\"parameter recordType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{recordType}\", url.PathEscape(string(recordType)))\n\tif relativeRecordSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter relativeRecordSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{relativeRecordSetName}\", relativeRecordSetName)\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AgentPoolsClient) getCreateRequest(ctx context.Context, resourceGroupName string, kubernetesClusterName string, agentPoolName string, options *AgentPoolsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/kubernetesClusters/{kubernetesClusterName}/agentPools/{agentPoolName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif kubernetesClusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter kubernetesClusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{kubernetesClusterName}\", url.PathEscape(kubernetesClusterName))\n\tif agentPoolName == \"\" {\n\t\treturn nil, errors.New(\"parameter agentPoolName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{agentPoolName}\", url.PathEscape(agentPoolName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ReplicationvCentersClient) getCreateRequest(ctx context.Context, fabricName string, vcenterName string, options *ReplicationvCentersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationvCenters/{vcenterName}\"\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif client.resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(client.resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif fabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter fabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{fabricName}\", url.PathEscape(fabricName))\n\tif vcenterName == \"\" {\n\t\treturn nil, errors.New(\"parameter vcenterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vcenterName}\", url.PathEscape(vcenterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *MetricAlertsClient) getCreateRequest(ctx context.Context, resourceGroupName string, ruleName string, options *MetricAlertsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ruleName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleName}\", url.PathEscape(ruleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ConnectedEnvironmentsClient) getCreateRequest(ctx context.Context, resourceGroupName string, connectedEnvironmentName string, options *ConnectedEnvironmentsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.App/connectedEnvironments/{connectedEnvironmentName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif connectedEnvironmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter connectedEnvironmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{connectedEnvironmentName}\", url.PathEscape(connectedEnvironmentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *KeyVaultClient) getCertificateOperationCreateRequest(ctx context.Context, vaultBaseURL string, certificateName string, options *KeyVaultClientGetCertificateOperationOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/certificates/{certificate-name}/pending\"\n\tif certificateName == \"\" {\n\t\treturn nil, errors.New(\"parameter certificateName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{certificate-name}\", url.PathEscape(certificateName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SapMonitorsClient) getCreateRequest(ctx context.Context, resourceGroupName string, sapMonitorName string, options *SapMonitorsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/sapMonitors/{sapMonitorName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sapMonitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter sapMonitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sapMonitorName}\", url.PathEscape(sapMonitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-02-07-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) getCreateRequest(ctx context.Context, resourceGroupName string, containerGroupName string, options *ContainerGroupsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif containerGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerGroupName}\", url.PathEscape(containerGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *FirewallRulesClient) getCreateRequest(ctx context.Context, resourceGroupName string, cacheName string, ruleName string, options *FirewallRulesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{cacheName}/firewallRules/{ruleName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif cacheName == \"\" {\n\t\treturn nil, errors.New(\"parameter cacheName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cacheName}\", url.PathEscape(cacheName))\n\tif ruleName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleName}\", url.PathEscape(ruleName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *JobExecutionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, serverName string, jobAgentName string, jobName string, jobExecutionID string, options *JobExecutionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/jobAgents/{jobAgentName}/jobs/{jobName}/executions/{jobExecutionId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif jobAgentName == \"\" {\n\t\treturn nil, errors.New(\"parameter jobAgentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{jobAgentName}\", url.PathEscape(jobAgentName))\n\tif jobName == \"\" {\n\t\treturn nil, errors.New(\"parameter jobName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{jobName}\", url.PathEscape(jobName))\n\turlPath = strings.ReplaceAll(urlPath, \"{jobExecutionId}\", url.PathEscape(jobExecutionID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *NotebookWorkspacesClient) getCreateRequest(ctx context.Context, resourceGroupName string, accountName string, notebookWorkspaceName NotebookWorkspaceName, options *NotebookWorkspacesGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces/{notebookWorkspaceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif notebookWorkspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter notebookWorkspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{notebookWorkspaceName}\", url.PathEscape(string(notebookWorkspaceName)))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DatabaseVulnerabilityAssessmentScansClient) getCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, vulnerabilityAssessmentName VulnerabilityAssessmentName, scanID string, options *DatabaseVulnerabilityAssessmentScansClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}/scans/{scanId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif vulnerabilityAssessmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter vulnerabilityAssessmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vulnerabilityAssessmentName}\", url.PathEscape(string(vulnerabilityAssessmentName)))\n\tif scanID == \"\" {\n\t\treturn nil, errors.New(\"parameter scanID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{scanId}\", url.PathEscape(scanID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}" ]
[ "0.7216396", "0.7151208", "0.71419024", "0.7099855", "0.7065104", "0.70532906", "0.70396507", "0.70303977", "0.7029218", "0.7008257", "0.69958997", "0.6993072", "0.6982644", "0.69472474", "0.69467354", "0.69425017", "0.69257015", "0.6924219", "0.6909365", "0.69073766", "0.68997794", "0.68921137", "0.68902415", "0.6880629", "0.6876486", "0.6870244", "0.6862067", "0.68583757", "0.68552065", "0.684329", "0.6838805", "0.6835827", "0.68167055", "0.6811107", "0.68105423", "0.68101466", "0.6797083", "0.67953676", "0.6792335", "0.6790641", "0.67887384", "0.67798513", "0.67647415", "0.6762872", "0.6743101", "0.6742948", "0.67420685", "0.67365944", "0.6730261", "0.67291504", "0.67282104", "0.6727994", "0.6726732", "0.6725633", "0.67197156", "0.671082", "0.66952485", "0.6680763", "0.66747034", "0.6667645", "0.6656608", "0.66553617", "0.66512305", "0.6645348", "0.6637187", "0.6636488", "0.6624755", "0.6623885", "0.66211736", "0.6618208", "0.6589512", "0.6588142", "0.6582017", "0.65807843", "0.657946", "0.6576794", "0.6573786", "0.65719134", "0.6549607", "0.65264183", "0.6525048", "0.6524947", "0.6522731", "0.6519522", "0.65019774", "0.65004134", "0.6499857", "0.64990413", "0.6491035", "0.6488836", "0.6486379", "0.6486076", "0.6483796", "0.6481297", "0.64794296", "0.6458987", "0.64567536", "0.64478296", "0.64474213", "0.64447945" ]
0.69831
12
getHandleResponse handles the Get response.
func (client *PolicyDefinitionsClient) getHandleResponse(resp *http.Response) (PolicyDefinitionsGetResponse, error) { result := PolicyDefinitionsGetResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil { return PolicyDefinitionsGetResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *LiveOutputsClient) getHandleResponse(resp *http.Response) (LiveOutputsClientGetResponse, error) {\n\tresult := LiveOutputsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LiveOutput); err != nil {\n\t\treturn LiveOutputsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) getHandleResponse(resp *http.Response) (OutputsGetResponse, error) {\n\tresult := OutputsGetResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Output); err != nil {\n\t\treturn OutputsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) getHandleResponse(resp *http.Response) (OutputsClientGetResponse, error) {\n\tresult := OutputsClientGetResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Output); err != nil {\n\t\treturn OutputsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KpiClient) getHandleResponse(resp *http.Response) (KpiClientGetResponse, error) {\n\tresult := KpiClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KpiResourceFormat); err != nil {\n\t\treturn KpiClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ActionsClient) getHandleResponse(resp *http.Response) (ActionsClientGetResponse, error) {\n\tresult := ActionsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ActionResponse); err != nil {\n\t\treturn ActionsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *APIClient) getHandleResponse(resp *http.Response) (APIClientGetResponse, error) {\n\tresult := APIClientGetResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.APIContract); err != nil {\n\t\treturn APIClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PipelinesClient) getHandleResponse(resp *http.Response) (PipelinesClientGetResponse, error) {\n\tresult := PipelinesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PipelineResource); err != nil {\n\t\treturn PipelinesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RouteTablesClient) getHandleResponse(resp *http.Response) (RouteTablesGetResponse, error) {\n\tresult := RouteTablesGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RouteTable); err != nil {\n\t\treturn RouteTablesGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) getHandleResponse(resp *http.Response) (WebAppsGetResponse, error) {\n\tresult := WebAppsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Site); err != nil {\n\t\treturn WebAppsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ServerVulnerabilityAssessmentClient) getHandleResponse(resp *http.Response) (ServerVulnerabilityAssessmentClientGetResponse, error) {\n\tresult := ServerVulnerabilityAssessmentClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerVulnerabilityAssessment); err != nil {\n\t\treturn ServerVulnerabilityAssessmentClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *Client) getHandleResponse(resp *http.Response) (ClientGetResponse, error) {\n\tresult := ClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ChangeResourceResult); err != nil {\n\t\treturn ClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *InteractionsClient) getHandleResponse(resp *http.Response) (InteractionsClientGetResponse, error) {\n\tresult := InteractionsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.InteractionResourceFormat); err != nil {\n\t\treturn InteractionsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DatabaseVulnerabilityAssessmentScansClient) getHandleResponse(resp *http.Response) (DatabaseVulnerabilityAssessmentScansClientGetResponse, error) {\n\tresult := DatabaseVulnerabilityAssessmentScansClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VulnerabilityAssessmentScanRecord); err != nil {\n\t\treturn DatabaseVulnerabilityAssessmentScansClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VideosClient) getHandleResponse(resp *http.Response) (VideosClientGetResponse, error) {\n\tresult := VideosClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VideoEntity); err != nil {\n\t\treturn VideosClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *TablesClient) getHandleResponse(resp *http.Response) (TablesClientGetResponse, error) {\n\tresult := TablesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Table); err != nil {\n\t\treturn TablesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationvCentersClient) getHandleResponse(resp *http.Response) (ReplicationvCentersClientGetResponse, error) {\n\tresult := ReplicationvCentersClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VCenter); err != nil {\n\t\treturn ReplicationvCentersClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PortalConfigClient) getHandleResponse(resp *http.Response) (PortalConfigClientGetResponse, error) {\n\tresult := PortalConfigClientGetResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PortalConfigContract); err != nil {\n\t\treturn PortalConfigClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AlertOperationClient) getHandleResponse(resp *http.Response) (AlertOperationClientGetResponse, error) {\n\tresult := AlertOperationClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AlertOperationResult); err != nil {\n\t\treturn AlertOperationClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DataCollectionEndpointsClient) getHandleResponse(resp *http.Response) (DataCollectionEndpointsClientGetResponse, error) {\n\tresult := DataCollectionEndpointsClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResource); err != nil {\n\t\treturn DataCollectionEndpointsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AlertsClient) getHandleResponse(resp *http.Response) (AlertsClientGetResponse, error) {\n\tresult := AlertsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Alert); err != nil {\n\t\treturn AlertsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DataCollectionEndpointsClient) getHandleResponse(resp *http.Response) (DataCollectionEndpointsGetResponse, error) {\n\tresult := DataCollectionEndpointsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResource); err != nil {\n\t\treturn DataCollectionEndpointsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *RecoveryPointsClient) getHandleResponse(resp *http.Response) (RecoveryPointsClientGetResponse, error) {\n\tresult := RecoveryPointsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RecoveryPoint); err != nil {\n\t\treturn RecoveryPointsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *LocalRulestacksClient) getHandleResponse(resp *http.Response) (LocalRulestacksClientGetResponse, error) {\n\tresult := LocalRulestacksClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LocalRulestackResource); err != nil {\n\t\treturn LocalRulestacksClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AFDOriginsClient) getHandleResponse(resp *http.Response) (AFDOriginsClientGetResponse, error) {\n\tresult := AFDOriginsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AFDOrigin); err != nil {\n\t\treturn AFDOriginsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *GroupClient) getHandleResponse(resp *http.Response) (GroupGetResponse, error) {\n\tresult := GroupGetResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GroupContract); err != nil {\n\t\treturn GroupGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *TriggersClient) getHandleResponse(resp *http.Response) (TriggersClientGetResponse, error) {\n\tresult := TriggersClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result); err != nil {\n\t\treturn TriggersClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) getKeyHandleResponse(resp *http.Response) (KeyVaultClientGetKeyResponse, error) {\n\tresult := KeyVaultClientGetKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientGetKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DicomServicesClient) getHandleResponse(resp *http.Response) (DicomServicesClientGetResponse, error) {\n\tresult := DicomServicesClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DicomService); err != nil {\n\t\treturn DicomServicesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DatasetsClient) getHandleResponse(resp *http.Response) (DatasetsClientGetResponse, error) {\n\tresult := DatasetsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DatasetResource); err != nil {\n\t\treturn DatasetsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) getKeyHandleResponse(resp *http.Response) (KeyVaultClientGetKeyResponse, error) {\n\tresult := KeyVaultClientGetKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientGetKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AgentsClient) getHandleResponse(resp *http.Response) (AgentsClientGetResponse, error) {\n\tresult := AgentsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Agent); err != nil {\n\t\treturn AgentsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ServersClient) getHandleResponse(resp *http.Response) (ServersClientGetResponse, error) {\n\tresult := ServersClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Server); err != nil {\n\t\treturn ServersClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ServersClient) getHandleResponse(resp *http.Response) (ServersClientGetResponse, error) {\n\tresult := ServersClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Server); err != nil {\n\t\treturn ServersClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WorkflowsClient) getHandleResponse(resp *http.Response) (WorkflowsClientGetResponse, error) {\n\tresult := WorkflowsClientGetResponse{}\n\tif val := resp.Header.Get(\"x-ms-request-id\"); val != \"\" {\n\t\tresult.XMSRequestID = &val\n\t}\n\tif val := resp.Header.Get(\"x-ms-correlation-request-id\"); val != \"\" {\n\t\tresult.XMSCorrelationRequestID = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Workflow); err != nil {\n\t\treturn WorkflowsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IotSecuritySolutionClient) getHandleResponse(resp *http.Response) (IotSecuritySolutionClientGetResponse, error) {\n\tresult := IotSecuritySolutionClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IoTSecuritySolutionModel); err != nil {\n\t\treturn IotSecuritySolutionClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualRoutersClient) getHandleResponse(resp *http.Response) (VirtualRoutersClientGetResponse, error) {\n\tresult := VirtualRoutersClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualRouter); err != nil {\n\t\treturn VirtualRoutersClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RecordSetsClient) getHandleResponse(resp *azcore.Response) (RecordSetResponse, error) {\n\tvar val *RecordSet\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn RecordSetResponse{}, err\n\t}\n\treturn RecordSetResponse{RawResponse: resp.Response, RecordSet: val}, nil\n}", "func (client *TagRulesClient) getHandleResponse(resp *http.Response) (TagRulesClientGetResponse, error) {\n\tresult := TagRulesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TagRule); err != nil {\n\t\treturn TagRulesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ScriptExecutionsClient) getHandleResponse(resp *http.Response) (ScriptExecutionsClientGetResponse, error) {\n\tresult := ScriptExecutionsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ScriptExecution); err != nil {\n\t\treturn ScriptExecutionsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IncidentsClient) getHandleResponse(resp *http.Response) (IncidentsClientGetResponse, error) {\n\tresult := IncidentsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Incident); err != nil {\n\t\treturn IncidentsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualNetworkTapsClient) getHandleResponse(resp *azcore.Response) (VirtualNetworkTapResponse, error) {\n\tvar val *VirtualNetworkTap\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn VirtualNetworkTapResponse{}, err\n\t}\n\treturn VirtualNetworkTapResponse{RawResponse: resp.Response, VirtualNetworkTap: val}, nil\n}", "func (client *VirtualMachinesClient) getHandleResponse(resp *http.Response) (VirtualMachinesClientGetResponse, error) {\n\tresult := VirtualMachinesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachine); err != nil {\n\t\treturn VirtualMachinesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ProductPolicyClient) getHandleResponse(resp *http.Response) (ProductPolicyClientGetResponse, error) {\n\tresult := ProductPolicyClientGetResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyContract); err != nil {\n\t\treturn ProductPolicyClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MetricAlertsClient) getHandleResponse(resp *http.Response) (MetricAlertsClientGetResponse, error) {\n\tresult := MetricAlertsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MetricAlertResource); err != nil {\n\t\treturn MetricAlertsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualNetworkLinksClient) getHandleResponse(resp *http.Response) (VirtualNetworkLinksGetResponse, error) {\n\tresult := VirtualNetworkLinksGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualNetworkLink); err != nil {\n\t\treturn VirtualNetworkLinksGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SubscriptionClient) getHandleResponse(resp *http.Response) (SubscriptionClientGetResponse, error) {\n\tresult := SubscriptionClientGetResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SubscriptionContract); err != nil {\n\t\treturn SubscriptionClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PeeringPoliciesClient) getHandleResponse(resp *http.Response) (PeeringPoliciesClientGetResponse, error) {\n\tresult := PeeringPoliciesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PeeringPolicy); err != nil {\n\t\treturn PeeringPoliciesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *TaskRunsClient) getHandleResponse(resp *http.Response) (TaskRunsClientGetResponse, error) {\n\tresult := TaskRunsClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TaskRun); err != nil {\n\t\treturn TaskRunsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AccountsClient) getHandleResponse(resp *http.Response) (AccountsClientGetResponse, error) {\n\tresult := AccountsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Account); err != nil {\n\t\treturn AccountsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CustomAssessmentAutomationsClient) getHandleResponse(resp *http.Response) (CustomAssessmentAutomationsGetResponse, error) {\n\tresult := CustomAssessmentAutomationsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomAssessmentAutomation); err != nil {\n\t\treturn CustomAssessmentAutomationsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ExpressRoutePortsLocationsClient) getHandleResponse(resp *http.Response) (ExpressRoutePortsLocationsClientGetResponse, error) {\n\tresult := ExpressRoutePortsLocationsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ExpressRoutePortsLocation); err != nil {\n\t\treturn ExpressRoutePortsLocationsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedInstancesClient) getHandleResponse(resp *http.Response) (ManagedInstancesClientGetResponse, error) {\n\tresult := ManagedInstancesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstance); err != nil {\n\t\treturn ManagedInstancesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MonitorsClient) getHandleResponse(resp *http.Response) (MonitorsClientGetResponse, error) {\n\tresult := MonitorsClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MonitorResource); err != nil {\n\t\treturn MonitorsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AvailabilitySetsClient) getHandleResponse(resp *http.Response) (AvailabilitySetsGetResponse, error) {\n\tresult := AvailabilitySetsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilitySet); err != nil {\n\t\treturn AvailabilitySetsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebhooksClient) getHandleResponse(resp *http.Response) (WebhooksClientGetResponse, error) {\n\tresult := WebhooksClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Webhook); err != nil {\n\t\treturn WebhooksClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationProtectionContainersClient) getHandleResponse(resp *http.Response) (ReplicationProtectionContainersClientGetResponse, error) {\n\tresult := ReplicationProtectionContainersClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProtectionContainer); err != nil {\n\t\treturn ReplicationProtectionContainersClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *NetworkToNetworkInterconnectsClient) getHandleResponse(resp *http.Response) (NetworkToNetworkInterconnectsClientGetResponse, error) {\n\tresult := NetworkToNetworkInterconnectsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.NetworkToNetworkInterconnect); err != nil {\n\t\treturn NetworkToNetworkInterconnectsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CloudServicesClient) getHandleResponse(resp *http.Response) (CloudServicesClientGetResponse, error) {\n\tresult := CloudServicesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CloudService); err != nil {\n\t\treturn CloudServicesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AgentPoolsClient) getHandleResponse(resp *http.Response) (AgentPoolsClientGetResponse, error) {\n\tresult := AgentPoolsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AgentPool); err != nil {\n\t\treturn AgentPoolsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualApplianceSitesClient) getHandleResponse(resp *azcore.Response) (VirtualApplianceSiteResponse, error) {\n\tvar val *VirtualApplianceSite\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn VirtualApplianceSiteResponse{}, err\n\t}\n\treturn VirtualApplianceSiteResponse{RawResponse: resp.Response, VirtualApplianceSite: val}, nil\n}", "func (client *HCRPAssignmentsClient) getHandleResponse(resp *http.Response) (HCRPAssignmentsClientGetResponse, error) {\n\tresult := HCRPAssignmentsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Assignment); err != nil {\n\t\treturn HCRPAssignmentsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CustomDomainsClient) getHandleResponse(resp *http.Response) (CustomDomainsGetResponse, error) {\n\tresult := CustomDomainsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomDomainResource); err != nil {\n\t\treturn CustomDomainsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ApplicationTypeVersionsClient) getHandleResponse(resp *http.Response) (ApplicationTypeVersionsClientGetResponse, error) {\n\tresult := ApplicationTypeVersionsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ApplicationTypeVersionResource); err != nil {\n\t\treturn ApplicationTypeVersionsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *GuestAgentsClient) getHandleResponse(resp *http.Response) (GuestAgentsClientGetResponse, error) {\n\tresult := GuestAgentsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GuestAgent); err != nil {\n\t\treturn GuestAgentsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *GalleryImageVersionsClient) getHandleResponse(resp *azcore.Response) (GalleryImageVersionResponse, error) {\n\tvar val *GalleryImageVersion\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn GalleryImageVersionResponse{}, err\n\t}\n\treturn GalleryImageVersionResponse{RawResponse: resp.Response, GalleryImageVersion: val}, nil\n}", "func (client *RedisClient) getHandleResponse(resp *http.Response) (RedisGetResponse, error) {\n\tresult := RedisGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RedisResource); err != nil {\n\t\treturn RedisGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *AnalysisResultsClient) getHandleResponse(resp *http.Response) (AnalysisResultsClientGetResponse, error) {\n\tresult := AnalysisResultsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AnalysisResultSingletonResource); err != nil {\n\t\treturn AnalysisResultsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *JobExecutionsClient) getHandleResponse(resp *http.Response) (JobExecutionsClientGetResponse, error) {\n\tresult := JobExecutionsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.JobExecution); err != nil {\n\t\treturn JobExecutionsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedClustersClient) getHandleResponse(resp *http.Response) (ManagedClustersClientGetResponse, error) {\n\tresult := ManagedClustersClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedCluster); err != nil {\n\t\treturn ManagedClustersClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WCFRelaysClient) getHandleResponse(resp *http.Response) (WCFRelaysClientGetResponse, error) {\n\tresult := WCFRelaysClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WcfRelay); err != nil {\n\t\treturn WCFRelaysClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SyncGroupsClient) getHandleResponse(resp *http.Response) (SyncGroupsClientGetResponse, error) {\n\tresult := SyncGroupsClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SyncGroup); err != nil {\n\t\treturn SyncGroupsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ClustersClient) getHandleResponse(resp *http.Response) (ClustersClientGetResponse, error) {\n\tresult := ClustersClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Cluster); err != nil {\n\t\treturn ClustersClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WorkspacesClient) getHandleResponse(resp *http.Response) (WorkspacesGetResponse, error) {\n\tresult := WorkspacesGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Workspace); err != nil {\n\t\treturn WorkspacesGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *subscriptionClient) getHandleResponse(resp *http.Response) (SubscriptionGetResponse, error) {\n\tresult := SubscriptionGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsXML(resp, &result.Object); err != nil {\n\t\treturn SubscriptionGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SourceControlConfigurationsClient) getHandleResponse(resp *http.Response) (SourceControlConfigurationsClientGetResponse, error) {\n\tresult := SourceControlConfigurationsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SourceControlConfiguration); err != nil {\n\t\treturn SourceControlConfigurationsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ClustersClient) getHandleResponse(resp *http.Response) (ClustersGetResponse, error) {\n\tresult := ClustersGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Cluster); err != nil {\n\t\treturn ClustersGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *AssociationsClient) getHandleResponse(resp *http.Response) (AssociationsClientGetResponse, error) {\n\tresult := AssociationsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Association); err != nil {\n\t\treturn AssociationsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationsClient) getHandleResponse(resp *azcore.Response) (ReplicationResponse, error) {\n\tvar val *Replication\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn ReplicationResponse{}, err\n\t}\n\treturn ReplicationResponse{RawResponse: resp.Response, Replication: val}, nil\n}", "func (client *CertificateOrdersClient) getHandleResponse(resp *http.Response) (CertificateOrdersClientGetResponse, error) {\n\tresult := CertificateOrdersClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CertificateOrder); err != nil {\n\t\treturn CertificateOrdersClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineImageTemplatesClient) getHandleResponse(resp *http.Response) (VirtualMachineImageTemplatesClientGetResponse, error) {\n\tresult := VirtualMachineImageTemplatesClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ImageTemplate); err != nil {\n\t\treturn VirtualMachineImageTemplatesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedDatabasesClient) getHandleResponse(resp *http.Response) (ManagedDatabasesClientGetResponse, error) {\n\tresult := ManagedDatabasesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedDatabase); err != nil {\n\t\treturn ManagedDatabasesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SpatialAnchorsAccountsClient) getHandleResponse(resp *http.Response) (SpatialAnchorsAccountsClientGetResponse, error) {\n\tresult := SpatialAnchorsAccountsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SpatialAnchorsAccount); err != nil {\n\t\treturn SpatialAnchorsAccountsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DedicatedHostsClient) getHandleResponse(resp *azcore.Response) (DedicatedHostResponse, error) {\n\tvar val *DedicatedHost\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DedicatedHostResponse{}, err\n\t}\n\treturn DedicatedHostResponse{RawResponse: resp.Response, DedicatedHost: val}, nil\n}", "func (client *PermissionBindingsClient) getHandleResponse(resp *http.Response) (PermissionBindingsClientGetResponse, error) {\n\tresult := PermissionBindingsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PermissionBinding); err != nil {\n\t\treturn PermissionBindingsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineScaleSetVMRunCommandsClient) getHandleResponse(resp *http.Response) (VirtualMachineScaleSetVMRunCommandsGetResponse, error) {\n\tresult := VirtualMachineScaleSetVMRunCommandsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineRunCommand); err != nil {\n\t\treturn VirtualMachineScaleSetVMRunCommandsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SQLVirtualMachinesClient) getHandleResponse(resp *http.Response) (SQLVirtualMachinesClientGetResponse, error) {\n\tresult := SQLVirtualMachinesClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SQLVirtualMachine); err != nil {\n\t\treturn SQLVirtualMachinesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *NotebookWorkspacesClient) getHandleResponse(resp *http.Response) (NotebookWorkspacesGetResponse, error) {\n\tresult := NotebookWorkspacesGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.NotebookWorkspace); err != nil {\n\t\treturn NotebookWorkspacesGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *GalleryImagesClient) getHandleResponse(resp *http.Response) (GalleryImagesClientGetResponse, error) {\n\tresult := GalleryImagesClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GalleryImage); err != nil {\n\t\treturn GalleryImagesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DedicatedHostsClient) getHandleResponse(resp *http.Response) (DedicatedHostsGetResponse, error) {\n\tresult := DedicatedHostsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DedicatedHost); err != nil {\n\t\treturn DedicatedHostsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client PeerExpressRouteCircuitConnectionsClient) getHandleResponse(resp *azcore.Response) (PeerExpressRouteCircuitConnectionResponse, error) {\n\tresult := PeerExpressRouteCircuitConnectionResponse{RawResponse: resp.Response}\n\terr := resp.UnmarshalAsJSON(&result.PeerExpressRouteCircuitConnection)\n\treturn result, err\n}", "func (client *RegistrationDefinitionsClient) getHandleResponse(resp *http.Response) (RegistrationDefinitionsClientGetResponse, error) {\n\tresult := RegistrationDefinitionsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RegistrationDefinition); err != nil {\n\t\treturn RegistrationDefinitionsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagementAssociationsClient) getHandleResponse(resp *http.Response) (ManagementAssociationsGetResponse, error) {\n\tresult := ManagementAssociationsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagementAssociation); err != nil {\n\t\treturn ManagementAssociationsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ManagedDatabaseSecurityAlertPoliciesClient) getHandleResponse(resp *http.Response) (ManagedDatabaseSecurityAlertPoliciesClientGetResponse, error) {\n\tresult := ManagedDatabaseSecurityAlertPoliciesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedDatabaseSecurityAlertPolicy); err != nil {\n\t\treturn ManagedDatabaseSecurityAlertPoliciesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineImagesEdgeZoneClient) getHandleResponse(resp *http.Response) (VirtualMachineImagesEdgeZoneClientGetResponse, error) {\n\tresult := VirtualMachineImagesEdgeZoneClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineImage); err != nil {\n\t\treturn VirtualMachineImagesEdgeZoneClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CompliancesClient) getHandleResponse(resp *http.Response) (CompliancesClientGetResponse, error) {\n\tresult := CompliancesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Compliance); err != nil {\n\t\treturn CompliancesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ConnectedEnvironmentsClient) getHandleResponse(resp *http.Response) (ConnectedEnvironmentsClientGetResponse, error) {\n\tresult := ConnectedEnvironmentsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ConnectedEnvironment); err != nil {\n\t\treturn ConnectedEnvironmentsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ConsumerInvitationsClient) getHandleResponse(resp *http.Response) (ConsumerInvitationsClientGetResponse, error) {\n\tresult := ConsumerInvitationsClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ConsumerInvitation); err != nil {\n\t\treturn ConsumerInvitationsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *UserMetricsKeysClient) getHandleResponse(resp *http.Response) (UserMetricsKeysClientGetResponse, error) {\n\tresult := UserMetricsKeysClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.UserMetricsModel); err != nil {\n\t\treturn UserMetricsKeysClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PacketCoreDataPlanesClient) getHandleResponse(resp *http.Response) (PacketCoreDataPlanesClientGetResponse, error) {\n\tresult := PacketCoreDataPlanesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PacketCoreDataPlane); err != nil {\n\t\treturn PacketCoreDataPlanesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DevicesClient) getHandleResponse(resp *http.Response) (DevicesClientGetResponse, error) {\n\tresult := DevicesClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Device); err != nil {\n\t\treturn DevicesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}" ]
[ "0.7495375", "0.74194175", "0.7407496", "0.73024285", "0.7190589", "0.71616685", "0.7149459", "0.7110431", "0.7099549", "0.7077434", "0.7042537", "0.7032869", "0.7018246", "0.6996379", "0.69687", "0.6964784", "0.69636226", "0.6963067", "0.6956996", "0.69501376", "0.69436526", "0.69401246", "0.6914852", "0.69068694", "0.68860036", "0.68839544", "0.68693894", "0.6865552", "0.68601245", "0.6842939", "0.6842509", "0.683905", "0.683905", "0.6836444", "0.6826362", "0.68129593", "0.6808945", "0.67977643", "0.6790069", "0.6785043", "0.67813337", "0.6778236", "0.6777227", "0.6771354", "0.67621726", "0.67593753", "0.6756922", "0.67557395", "0.67484224", "0.6740914", "0.673735", "0.672852", "0.6723565", "0.6723092", "0.6715184", "0.67135215", "0.6709077", "0.6706317", "0.6704842", "0.6688456", "0.6673203", "0.66702557", "0.66671216", "0.66531396", "0.66526306", "0.6647918", "0.66473186", "0.66380095", "0.6630611", "0.6625675", "0.6621975", "0.6619547", "0.6618547", "0.66151613", "0.6603894", "0.66023695", "0.65963477", "0.65892065", "0.6586302", "0.65856355", "0.6584834", "0.6580642", "0.6574927", "0.65742505", "0.65649086", "0.6559564", "0.65544987", "0.6542835", "0.65422773", "0.6528777", "0.6516893", "0.65131396", "0.6511144", "0.65106976", "0.6508521", "0.6506679", "0.6484803", "0.64838195", "0.64795923", "0.64712983" ]
0.67575526
46
getAtManagementGroupCreateRequest creates the GetAtManagementGroup request.
func (client *PolicyDefinitionsClient) getAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsGetAtManagementGroupOptions) (*policy.Request, error) { urlPath := "/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}" if policyDefinitionName == "" { return nil, errors.New("parameter policyDefinitionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{policyDefinitionName}", url.PathEscape(policyDefinitionName)) if managementGroupID == "" { return nil, errors.New("parameter managementGroupID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{managementGroupId}", url.PathEscape(managementGroupID)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *PolicyDefinitionsClient) listByManagementGroupCreateRequest(ctx context.Context, managementGroupID string, options *PolicyDefinitionsListByManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions\"\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, parameters PolicyDefinition, options *PolicyDefinitionsCreateOrUpdateAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *PolicyDefinitionsClient) deleteAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsDeleteAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ContainerGroupsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CustomAssessmentAutomationsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *CustomAssessmentAutomationsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Security/customAssessmentAutomations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *IPAllocationsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *IPAllocationsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ClustersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateDescribeContainerGroupMetricRequest() (request *DescribeContainerGroupMetricRequest) {\n\trequest = &DescribeContainerGroupMetricRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Eci\", \"2018-08-08\", \"DescribeContainerGroupMetric\", \"eci\", \"openAPI\")\n\treturn\n}", "func (client *GroupClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, groupID string, options *GroupGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif groupID == \"\" {\n\t\treturn nil, errors.New(\"parameter groupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CapacitiesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *CapacitiesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/capacities\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-01-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (c *UMemClient) NewCreateUMemcacheGroupRequest() *CreateUMemcacheGroupRequest {\n\treq := &CreateUMemcacheGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func (client *ManagedInstancesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ManagedInstancesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualNetworkTapsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualNetworkTapsListByResourceGroupOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *MonitorsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *MonitorsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DiskEncryptionSetsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DiskEncryptionSetsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *MetricAlertsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *MetricAlertsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ManagedClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ManagedClustersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ClustersListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/clusters\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewIgroupCreateRequest() *IgroupCreateRequest {\n\treturn &IgroupCreateRequest{}\n}", "func (client *IotSecuritySolutionClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *IotSecuritySolutionClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-08-01\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *LocalRulestacksClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) getCreateRequest(ctx context.Context, resourceGroupName string, containerGroupName string, options *ContainerGroupsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif containerGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerGroupName}\", url.PathEscape(containerGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DataCollectionEndpointsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DataCollectionEndpointsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DataCollectionEndpointsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DataCollectionEndpointsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SpatialAnchorsAccountsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *SpatialAnchorsAccountsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MixedReality/spatialAnchorsAccounts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ConnectedEnvironmentsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ConnectedEnvironmentsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/connectedEnvironments\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualMachineImageTemplatesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualMachineImageTemplatesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imageTemplates\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DevicesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DevicesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridNetwork/devices\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateCreateApplicationGroupRequest() (request *CreateApplicationGroupRequest) {\n\trequest = &CreateApplicationGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"oos\", \"2019-06-01\", \"CreateApplicationGroup\", \"oos\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *AccountsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *AccountsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ServersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ServersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CapacityReservationsClient) listByCapacityReservationGroupCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, options *CapacityReservationsListByCapacityReservationGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif capacityReservationGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationGroupName}\", url.PathEscape(capacityReservationGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AlertProcessingRulesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *AlertProcessingRulesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AlertsManagement/actionRules\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SQLVirtualMachinesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *SQLVirtualMachinesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateDescribeMultiContainerGroupMetricRequest() (request *DescribeMultiContainerGroupMetricRequest) {\n\trequest = &DescribeMultiContainerGroupMetricRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Eci\", \"2018-08-08\", \"DescribeMultiContainerGroupMetric\", \"eci\", \"openAPI\")\n\treturn\n}", "func (client *CertificateOrdersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *CertificateOrdersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AvailabilityGroupListenersClient) listByGroupCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineGroupName string, options *AvailabilityGroupListenersClientListByGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineGroupName}\", url.PathEscape(sqlVirtualMachineGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualMachinesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualMachinesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DedicatedHostsClient) listByHostGroupCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, options *DedicatedHostsListByHostGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hostGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter hostGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PrivateDNSZoneGroupsClient) getCreateRequest(ctx context.Context, resourceGroupName string, privateEndpointName string, privateDNSZoneGroupName string, options *PrivateDNSZoneGroupsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}/privateDnsZoneGroups/{privateDnsZoneGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateEndpointName}\", url.PathEscape(privateEndpointName))\n\tif privateDNSZoneGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateDNSZoneGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateDnsZoneGroupName}\", url.PathEscape(privateDNSZoneGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateDescribeScalingGroupsRequest() (request *DescribeScalingGroupsRequest) {\n\trequest = &DescribeScalingGroupsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Ess\", \"2014-08-28\", \"DescribeScalingGroups\", \"ess\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateCreateMonitorGroupRequest() (request *CreateMonitorGroupRequest) {\n\trequest = &CreateMonitorGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2019-01-01\", \"CreateMonitorGroup\", \"cms\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *DedicatedHostsClient) listByHostGroupCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, options *DedicatedHostsListByHostGroupOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RedisClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *RedisListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SyncGroupsClient) getCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, syncGroupName string, options *SyncGroupsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif syncGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter syncGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{syncGroupName}\", url.PathEscape(syncGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *UDBClient) NewCreateUDBParamGroupRequest() *CreateUDBParamGroupRequest {\n\treq := &CreateUDBParamGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func (c *UDBClient) NewCreateUDBParamGroupRequest() *CreateUDBParamGroupRequest {\n\treq := &CreateUDBParamGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func (client *WorkspacesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *WorkspacesListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateDeleteCorpGroupRequest() (request *DeleteCorpGroupRequest) {\n\trequest = &DeleteCorpGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Vcs\", \"2020-05-15\", \"DeleteCorpGroup\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateGetHotlineGroupDetailReportRequest() (request *GetHotlineGroupDetailReportRequest) {\n\trequest = &GetHotlineGroupDetailReportRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"scsp\", \"2020-07-02\", \"GetHotlineGroupDetailReport\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *GroupClient) getEntityTagCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, groupID string, options *GroupGetEntityTagOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif groupID == \"\" {\n\t\treturn nil, errors.New(\"parameter groupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodHead, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualRoutersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualRoutersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) listCreateRequest(ctx context.Context, options *ContainerGroupsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.ContainerInstance/containerGroups\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *GroupClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, groupID string, parameters GroupCreateParameters, options *GroupCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif groupID == \"\" {\n\t\treturn nil, errors.New(\"parameter groupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfMatch != nil {\n\t\treq.Raw().Header.Set(\"If-Match\", *options.IfMatch)\n\t}\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *LongTermRetentionManagedInstanceBackupsClient) getByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, locationName string, managedInstanceName string, databaseName string, backupName string, options *LongTermRetentionManagedInstanceBackupsGetByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/locations/{locationName}/longTermRetentionManagedInstances/{managedInstanceName}/longTermRetentionDatabases/{databaseName}/longTermRetentionManagedInstanceBackups/{backupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif locationName == \"\" {\n\t\treturn nil, errors.New(\"parameter locationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{locationName}\", url.PathEscape(locationName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif backupName == \"\" {\n\t\treturn nil, errors.New(\"parameter backupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{backupName}\", url.PathEscape(backupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CassandraClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *CassandraClustersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualMachineScaleSetsClient) convertToSinglePlacementGroupCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VMScaleSetConvertToSinglePlacementGroupInput, options *VirtualMachineScaleSetsConvertToSinglePlacementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/convertToSinglePlacementGroup\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *ServersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroup string, options *ServersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroup == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroup cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroup}\", url.PathEscape(resourceGroup))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) startCreateRequest(ctx context.Context, resourceGroupName string, containerGroupName string, options *ContainerGroupsClientBeginStartOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}/start\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif containerGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerGroupName}\", url.PathEscape(containerGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateCreateMonitorGroupByResourceGroupIdRequest() (request *CreateMonitorGroupByResourceGroupIdRequest) {\n\trequest = &CreateMonitorGroupByResourceGroupIdRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2019-01-01\", \"CreateMonitorGroupByResourceGroupId\", \"cms\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *PrivateDNSZoneGroupsClient) listCreateRequest(ctx context.Context, privateEndpointName string, resourceGroupName string, options *PrivateDNSZoneGroupsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}/privateDnsZoneGroups\"\n\tif privateEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateEndpointName}\", url.PathEscape(privateEndpointName))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2/{clusterName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, containerGroupName string, containerGroup ContainerGroup, options *ContainerGroupsClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif containerGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerGroupName}\", url.PathEscape(containerGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, containerGroup)\n}", "func (client *GroupClient) listByServiceCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *GroupListByServiceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AvailabilitySetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, availabilitySetName string, options *AvailabilitySetsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif availabilitySetName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilitySetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilitySetName}\", url.PathEscape(availabilitySetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *GroupClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, groupID string, ifMatch string, options *GroupDeleteOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups/{groupId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif groupID == \"\" {\n\t\treturn nil, errors.New(\"parameter groupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{groupId}\", url.PathEscape(groupID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"If-Match\", ifMatch)\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client IdentityClient) createGroup(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/groups\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateGroupResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client *ManagementAssociationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, managementAssociationName string, options *ManagementAssociationsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.OperationsManagement/ManagementAssociations/{managementAssociationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.providerName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.providerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{providerName}\", url.PathEscape(client.providerName))\n\tif client.resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(client.resourceType))\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif managementAssociationName == \"\" {\n\t\treturn nil, errors.New(\"parameter managementAssociationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementAssociationName}\", url.PathEscape(managementAssociationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2015-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewAttachGroupPolicyRequestWithoutParam() *AttachGroupPolicyRequest {\n\n return &AttachGroupPolicyRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/group/{groupName}:attachGroupPolicy\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func (client *WebAppsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *WebAppsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.IncludeSlots != nil {\n\t\treqQP.Set(\"includeSlots\", strconv.FormatBool(*options.IncludeSlots))\n\t}\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (m *MockRDSAPI) CreateOptionGroupRequest(arg0 *rds.CreateOptionGroupInput) (*request.Request, *rds.CreateOptionGroupOutput) {\n\tret := m.ctrl.Call(m, \"CreateOptionGroupRequest\", arg0)\n\tret0, _ := ret[0].(*request.Request)\n\tret1, _ := ret[1].(*rds.CreateOptionGroupOutput)\n\treturn ret0, ret1\n}", "func (client *SQLVirtualMachinesClient) listBySQLVMGroupCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineGroupName string, options *SQLVirtualMachinesClientListBySQLVMGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/sqlVirtualMachines\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineGroupName}\", url.PathEscape(sqlVirtualMachineGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *IPAllocationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, ipAllocationName string, options *IPAllocationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ipAllocationName == \"\" {\n\t\treturn nil, errors.New(\"parameter ipAllocationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ipAllocationName}\", url.PathEscape(ipAllocationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func createGroup() (group resources.Group, err error) {\n\tgroupsClient := resources.NewGroupsClient(config.SubscriptionID)\n\tgroupsClient.Authorizer = autorest.NewBearerAuthorizer(token)\n\n\treturn groupsClient.CreateOrUpdate(\n\t\tctx,\n\t\tresourceGroupName,\n\t\tresources.Group{\n\t\t\tLocation: to.StringPtr(resourceGroupLocation)})\n}", "func (client *MonitoringSettingsClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *MonitoringSettingsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func NewDeploymentAtManagementGroupScope(ctx *pulumi.Context,\n\tname string, args *DeploymentAtManagementGroupScopeArgs, opts ...pulumi.ResourceOption) (*DeploymentAtManagementGroupScope, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.GroupId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'GroupId'\")\n\t}\n\tif args.Properties == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Properties'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190501:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20190510:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190510:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20190701:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190701:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20190801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20190801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20191001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20191001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20200601:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20200601:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20200801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20200801:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20201001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20201001:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20210101:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20210101:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:resources/v20210401:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:resources/v20210401:DeploymentAtManagementGroupScope\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource DeploymentAtManagementGroupScope\n\terr := ctx.RegisterResource(\"azure-native:resources/v20190501:DeploymentAtManagementGroupScope\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func BuildCreatePermissionGroupIn() *model.CreatePermissionGroupIn {\n\treturn &model.CreatePermissionGroupIn{\n\t\tName: *RandomString(10),\n\t\tDescription: RandomString(20),\n\t\tPermissions: []model.Permission{\n\t\t\tmodel.PermissionUser,\n\t\t\tmodel.PermissionPermissionGroup,\n\t\t},\n\t}\n}", "func (client *FactoriesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *FactoriesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreatePutMetricAlarmRequest() (request *PutMetricAlarmRequest) {\n\trequest = &PutMetricAlarmRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2018-03-08\", \"PutMetricAlarm\", \"cms\", \"openAPI\")\n\treturn\n}", "func (gr *GroupResource) Create(owner string, name string) (g *GroupDetails, err error) {\n\townerOrCurrentUser(gr, &owner)\n\n\tpath := fmt.Sprintf(\"/groups/%s/\", owner)\n\tvalues := url.Values{}\n\tvalues.Set(\"name\", name)\n\terr = gr.client.do(\"POST\", path, nil, values, &g)\n\n\treturn\n}", "func (client *AvailabilitySetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *AvailabilitySetsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateModifySkillGroupExRequest() (request *ModifySkillGroupExRequest) {\n\trequest = &ModifySkillGroupExRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CloudCallCenter\", \"2017-07-05\", \"ModifySkillGroupEx\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateRemoveAppGroupRequest() (request *RemoveAppGroupRequest) {\n\trequest = &RemoveAppGroupRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"OpenSearch\", \"2017-12-25\", \"RemoveAppGroup\", \"/v4/openapi/app-groups/[appGroupIdentity]\", \"\", \"\")\n\trequest.Method = requests.DELETE\n\treturn\n}", "func (client *MonitorsClient) getCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, options *MonitorsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/clusters/{clusterName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DeviceSettingsClient) getSecuritySettingsCreateRequest(ctx context.Context, deviceName string, resourceGroupName string, managerName string, options *DeviceSettingsClientGetSecuritySettingsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorSimple/managers/{managerName}/devices/{deviceName}/securitySettings/default\"\n\turlPath = strings.ReplaceAll(urlPath, \"{deviceName}\", deviceName)\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", client.subscriptionID)\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", resourceGroupName)\n\turlPath = strings.ReplaceAll(urlPath, \"{managerName}\", managerName)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (_BaseContentSpace *BaseContentSpaceTransactor) CreateGroup(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BaseContentSpace.contract.Transact(opts, \"createGroup\")\n}", "func (c *UDBClient) NewExtractUDBParamGroupRequest() *ExtractUDBParamGroupRequest {\n\treq := &ExtractUDBParamGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (c *UDBClient) NewExtractUDBParamGroupRequest() *ExtractUDBParamGroupRequest {\n\treq := &ExtractUDBParamGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (client *CapacitiesClient) getDetailsCreateRequest(ctx context.Context, resourceGroupName string, dedicatedCapacityName string, options *CapacitiesClientGetDetailsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/capacities/{dedicatedCapacityName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dedicatedCapacityName == \"\" {\n\t\treturn nil, errors.New(\"parameter dedicatedCapacityName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dedicatedCapacityName}\", url.PathEscape(dedicatedCapacityName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-01-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetManagedRuleRequest() (request *GetManagedRuleRequest) {\n\trequest = &GetManagedRuleRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Config\", \"2020-09-07\", \"GetManagedRule\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *CapacityReservationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *CapacityReservationsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif capacityReservationGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationGroupName}\", url.PathEscape(capacityReservationGroupName))\n\tif capacityReservationName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationName}\", url.PathEscape(capacityReservationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", string(*options.Expand))\n\t}\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) getSupportInfoCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientGetSupportInfoOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/getSupportInfo\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif localRulestackName == \"\" {\n\t\treturn nil, errors.New(\"parameter localRulestackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{localRulestackName}\", url.PathEscape(localRulestackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\tif options != nil && options.Email != nil {\n\t\treqQP.Set(\"email\", *options.Email)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ClientImpl) CreateControlInGroup(ctx context.Context, args CreateControlInGroupArgs) (*Control, error) {\n\tif args.Control == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.Control\"}\n\t}\n\trouteValues := make(map[string]string)\n\tif args.ProcessId == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.ProcessId\"}\n\t}\n\trouteValues[\"processId\"] = (*args.ProcessId).String()\n\tif args.WitRefName == nil || *args.WitRefName == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.WitRefName\"}\n\t}\n\trouteValues[\"witRefName\"] = *args.WitRefName\n\tif args.GroupId == nil || *args.GroupId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.GroupId\"}\n\t}\n\trouteValues[\"groupId\"] = *args.GroupId\n\n\tbody, marshalErr := json.Marshal(*args.Control)\n\tif marshalErr != nil {\n\t\treturn nil, marshalErr\n\t}\n\tlocationId, _ := uuid.Parse(\"1f59b363-a2d0-4b7e-9bc6-eb9f5f3f0e58\")\n\tresp, err := client.Client.Send(ctx, http.MethodPost, locationId, \"6.0-preview.1\", routeValues, nil, bytes.NewReader(body), \"application/json\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue Control\n\terr = client.Client.UnmarshalBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func NewIgroupAddRequest() *IgroupAddRequest {\n\treturn &IgroupAddRequest{}\n}", "func (client *SapMonitorsClient) getCreateRequest(ctx context.Context, resourceGroupName string, sapMonitorName string, options *SapMonitorsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/sapMonitors/{sapMonitorName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sapMonitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter sapMonitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sapMonitorName}\", url.PathEscape(sapMonitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-02-07-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) getCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif localRulestackName == \"\" {\n\t\treturn nil, errors.New(\"parameter localRulestackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{localRulestackName}\", url.PathEscape(localRulestackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetAggregateAccountComplianceByPackRequest() (request *GetAggregateAccountComplianceByPackRequest) {\n\trequest = &GetAggregateAccountComplianceByPackRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Config\", \"2020-09-07\", \"GetAggregateAccountComplianceByPack\", \"\", \"\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (client *VirtualMachineScaleSetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", string(*options.Expand))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagedInstancesClient) getCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, options *ManagedInstancesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}" ]
[ "0.6663986", "0.6623923", "0.6412561", "0.6097547", "0.6086163", "0.6060779", "0.60292196", "0.5974149", "0.5945237", "0.5898692", "0.58942294", "0.58754694", "0.5872743", "0.5866616", "0.5856541", "0.58479375", "0.5844044", "0.58033", "0.5794953", "0.57581514", "0.57564795", "0.57081884", "0.5683718", "0.5651301", "0.5624999", "0.5615734", "0.5613474", "0.55924493", "0.5590977", "0.5536184", "0.55115426", "0.55091184", "0.5484719", "0.54802924", "0.54756844", "0.54740506", "0.5473192", "0.54478455", "0.54227704", "0.5422121", "0.5419113", "0.5380115", "0.5330929", "0.5329952", "0.52945894", "0.5283524", "0.5283524", "0.5245733", "0.5237009", "0.5234577", "0.52311784", "0.52190703", "0.5198627", "0.51871955", "0.5183274", "0.5169765", "0.5153166", "0.51477003", "0.51390254", "0.5131131", "0.51223123", "0.5114582", "0.5104254", "0.5102861", "0.509227", "0.50834304", "0.508195", "0.507398", "0.5069806", "0.5065219", "0.50592524", "0.50563496", "0.5042287", "0.50223005", "0.5005109", "0.49888012", "0.49637255", "0.49575073", "0.49393028", "0.49265504", "0.49102217", "0.49085715", "0.49048874", "0.48874304", "0.4883695", "0.48799044", "0.48714423", "0.48580056", "0.48580056", "0.48552725", "0.48473176", "0.4831095", "0.4827814", "0.48144203", "0.48138165", "0.48129973", "0.48102948", "0.48007643", "0.4799372", "0.47958806" ]
0.77720493
0
getAtManagementGroupHandleResponse handles the GetAtManagementGroup response.
func (client *PolicyDefinitionsClient) getAtManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsGetAtManagementGroupResponse, error) { result := PolicyDefinitionsGetAtManagementGroupResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil { return PolicyDefinitionsGetAtManagementGroupResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *PolicyDefinitionsClient) listByManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsListByManagementGroupResponse, error) {\n\tresult := PolicyDefinitionsListByManagementGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinitionListResult); err != nil {\n\t\treturn PolicyDefinitionsListByManagementGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *LongTermRetentionManagedInstanceBackupsClient) getByResourceGroupHandleResponse(resp *http.Response) (LongTermRetentionManagedInstanceBackupsGetByResourceGroupResponse, error) {\n\tresult := LongTermRetentionManagedInstanceBackupsGetByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceLongTermRetentionBackup); err != nil {\n\t\treturn LongTermRetentionManagedInstanceBackupsGetByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ContainerGroupsClient) listByResourceGroupHandleResponse(resp *http.Response) (ContainerGroupsClientListByResourceGroupResponse, error) {\n\tresult := ContainerGroupsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ContainerGroupListResult); err != nil {\n\t\treturn ContainerGroupsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedInstancesClient) listByResourceGroupHandleResponse(resp *http.Response) (ManagedInstancesClientListByResourceGroupResponse, error) {\n\tresult := ManagedInstancesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceListResult); err != nil {\n\t\treturn ManagedInstancesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MetricAlertsClient) listByResourceGroupHandleResponse(resp *http.Response) (MetricAlertsClientListByResourceGroupResponse, error) {\n\tresult := MetricAlertsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MetricAlertResourceCollection); err != nil {\n\t\treturn MetricAlertsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CapacitiesClient) listByResourceGroupHandleResponse(resp *http.Response) (CapacitiesClientListByResourceGroupResponse, error) {\n\tresult := CapacitiesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DedicatedCapacities); err != nil {\n\t\treturn CapacitiesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DiskEncryptionSetsClient) listByResourceGroupHandleResponse(resp *http.Response) (DiskEncryptionSetsListByResourceGroupResponse, error) {\n\tresult := DiskEncryptionSetsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DiskEncryptionSetList); err != nil {\n\t\treturn DiskEncryptionSetsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ManagedClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (ManagedClustersClientListByResourceGroupResponse, error) {\n\tresult := ManagedClustersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedClusterListResult); err != nil {\n\t\treturn ManagedClustersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CustomAssessmentAutomationsClient) listByResourceGroupHandleResponse(resp *http.Response) (CustomAssessmentAutomationsListByResourceGroupResponse, error) {\n\tresult := CustomAssessmentAutomationsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomAssessmentAutomationsListResult); err != nil {\n\t\treturn CustomAssessmentAutomationsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *IPAllocationsClient) listByResourceGroupHandleResponse(resp *http.Response) (IPAllocationsClientListByResourceGroupResponse, error) {\n\tresult := IPAllocationsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IPAllocationListResult); err != nil {\n\t\treturn IPAllocationsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse, error) {\n\tresult := PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil {\n\t\treturn PolicyDefinitionsCreateOrUpdateAtManagementGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *RedisClient) listByResourceGroupHandleResponse(resp *http.Response) (RedisListByResourceGroupResponse, error) {\n\tresult := RedisListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RedisListResult); err != nil {\n\t\treturn RedisListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineImageTemplatesClient) listByResourceGroupHandleResponse(resp *http.Response) (VirtualMachineImageTemplatesClientListByResourceGroupResponse, error) {\n\tresult := VirtualMachineImageTemplatesClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ImageTemplateListResult); err != nil {\n\t\treturn VirtualMachineImageTemplatesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DevicesClient) listByResourceGroupHandleResponse(resp *http.Response) (DevicesClientListByResourceGroupResponse, error) {\n\tresult := DevicesClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DeviceListResult); err != nil {\n\t\treturn DevicesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MonitorsClient) listByResourceGroupHandleResponse(resp *http.Response) (MonitorsClientListByResourceGroupResponse, error) {\n\tresult := MonitorsClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MonitorResourceListResponse); err != nil {\n\t\treturn MonitorsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualNetworkTapsClient) listByResourceGroupHandleResponse(resp *azcore.Response) (VirtualNetworkTapListResultResponse, error) {\n\tvar val *VirtualNetworkTapListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn VirtualNetworkTapListResultResponse{}, err\n\t}\n\treturn VirtualNetworkTapListResultResponse{RawResponse: resp.Response, VirtualNetworkTapListResult: val}, nil\n}", "func (client *ContainerGroupsClient) getHandleResponse(resp *http.Response) (ContainerGroupsClientGetResponse, error) {\n\tresult := ContainerGroupsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ContainerGroup); err != nil {\n\t\treturn ContainerGroupsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PrivateDNSZoneGroupsClient) getHandleResponse(resp *http.Response) (PrivateDNSZoneGroupsClientGetResponse, error) {\n\tresult := PrivateDNSZoneGroupsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PrivateDNSZoneGroup); err != nil {\n\t\treturn PrivateDNSZoneGroupsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (ClustersListByResourceGroupResponse, error) {\n\tresult := ClustersListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ClusterListResult); err != nil {\n\t\treturn ClustersListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ConnectedEnvironmentsClient) listByResourceGroupHandleResponse(resp *http.Response) (ConnectedEnvironmentsClientListByResourceGroupResponse, error) {\n\tresult := ConnectedEnvironmentsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ConnectedEnvironmentCollection); err != nil {\n\t\treturn ConnectedEnvironmentsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SpatialAnchorsAccountsClient) listByResourceGroupHandleResponse(resp *http.Response) (SpatialAnchorsAccountsClientListByResourceGroupResponse, error) {\n\tresult := SpatialAnchorsAccountsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SpatialAnchorsAccountPage); err != nil {\n\t\treturn SpatialAnchorsAccountsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (ClustersClientListByResourceGroupResponse, error) {\n\tresult := ClustersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ClusterListResult); err != nil {\n\t\treturn ClustersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IotSecuritySolutionClient) listByResourceGroupHandleResponse(resp *http.Response) (IotSecuritySolutionClientListByResourceGroupResponse, error) {\n\tresult := IotSecuritySolutionClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IoTSecuritySolutionsList); err != nil {\n\t\treturn IotSecuritySolutionClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CertificateOrdersClient) listByResourceGroupHandleResponse(resp *http.Response) (CertificateOrdersClientListByResourceGroupResponse, error) {\n\tresult := CertificateOrdersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CertificateOrderCollection); err != nil {\n\t\treturn CertificateOrdersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CassandraClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (CassandraClustersClientListByResourceGroupResponse, error) {\n\tresult := CassandraClustersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListClusters); err != nil {\n\t\treturn CassandraClustersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DataCollectionEndpointsClient) listByResourceGroupHandleResponse(resp *http.Response) (DataCollectionEndpointsListByResourceGroupResponse, error) {\n\tresult := DataCollectionEndpointsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResourceListResult); err != nil {\n\t\treturn DataCollectionEndpointsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SyncGroupsClient) getHandleResponse(resp *http.Response) (SyncGroupsClientGetResponse, error) {\n\tresult := SyncGroupsClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SyncGroup); err != nil {\n\t\treturn SyncGroupsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DataCollectionEndpointsClient) listByResourceGroupHandleResponse(resp *http.Response) (DataCollectionEndpointsClientListByResourceGroupResponse, error) {\n\tresult := DataCollectionEndpointsClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResourceListResult); err != nil {\n\t\treturn DataCollectionEndpointsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *LocalRulestacksClient) listByResourceGroupHandleResponse(resp *http.Response) (LocalRulestacksClientListByResourceGroupResponse, error) {\n\tresult := LocalRulestacksClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LocalRulestackResourceListResult); err != nil {\n\t\treturn LocalRulestacksClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listByResourceGroupHandleResponse(resp *http.Response) (WebAppsListByResourceGroupResponse, error) {\n\tresult := WebAppsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebAppCollection); err != nil {\n\t\treturn WebAppsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ServersClient) listByResourceGroupHandleResponse(resp *http.Response) (ServersClientListByResourceGroupResponse, error) {\n\tresult := ServersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerListResult); err != nil {\n\t\treturn ServersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachinesClient) listByResourceGroupHandleResponse(resp *http.Response) (VirtualMachinesClientListByResourceGroupResponse, error) {\n\tresult := VirtualMachinesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachinesList); err != nil {\n\t\treturn VirtualMachinesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AccountsClient) listByResourceGroupHandleResponse(resp *http.Response) (AccountsClientListByResourceGroupResponse, error) {\n\tresult := AccountsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AccountListResult); err != nil {\n\t\treturn AccountsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SQLVirtualMachinesClient) listByResourceGroupHandleResponse(resp *http.Response) (SQLVirtualMachinesClientListByResourceGroupResponse, error) {\n\tresult := SQLVirtualMachinesClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListResult); err != nil {\n\t\treturn SQLVirtualMachinesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ServersClient) listByResourceGroupHandleResponse(resp *http.Response) (ServersClientListByResourceGroupResponse, error) {\n\tresult := ServersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerList); err != nil {\n\t\treturn ServersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AlertProcessingRulesClient) listByResourceGroupHandleResponse(resp *http.Response) (AlertProcessingRulesClientListByResourceGroupResponse, error) {\n\tresult := AlertProcessingRulesClientListByResourceGroupResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"x-ms-request-id\"); val != \"\" {\n\t\tresult.XMSRequestID = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AlertProcessingRulesList); err != nil {\n\t\treturn AlertProcessingRulesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualRoutersClient) listByResourceGroupHandleResponse(resp *http.Response) (VirtualRoutersClientListByResourceGroupResponse, error) {\n\tresult := VirtualRoutersClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualRouterListResult); err != nil {\n\t\treturn VirtualRoutersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WorkspacesClient) listByResourceGroupHandleResponse(resp *http.Response) (WorkspacesListByResourceGroupResponse, error) {\n\tresult := WorkspacesListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WorkspaceListResult); err != nil {\n\t\treturn WorkspacesListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *GroupClient) getHandleResponse(resp *http.Response) (GroupGetResponse, error) {\n\tresult := GroupGetResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GroupContract); err != nil {\n\t\treturn GroupGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client UsageDetailsClient) ListByManagementGroupResponder(resp *http.Response) (result UsageDetailsListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *ContainerGroupsClient) updateHandleResponse(resp *http.Response) (ContainerGroupsClientUpdateResponse, error) {\n\tresult := ContainerGroupsClientUpdateResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ContainerGroup); err != nil {\n\t\treturn ContainerGroupsClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DedicatedHostsClient) listByHostGroupHandleResponse(resp *http.Response) (DedicatedHostsListByHostGroupResponse, error) {\n\tresult := DedicatedHostsListByHostGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DedicatedHostListResult); err != nil {\n\t\treturn DedicatedHostsListByHostGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *CapacityReservationsClient) listByCapacityReservationGroupHandleResponse(resp *http.Response) (CapacityReservationsListByCapacityReservationGroupResponse, error) {\n\tresult := CapacityReservationsListByCapacityReservationGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CapacityReservationListResult); err != nil {\n\t\treturn CapacityReservationsListByCapacityReservationGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SQLVirtualMachinesClient) listBySQLVMGroupHandleResponse(resp *http.Response) (SQLVirtualMachinesClientListBySQLVMGroupResponse, error) {\n\tresult := SQLVirtualMachinesClientListBySQLVMGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListResult); err != nil {\n\t\treturn SQLVirtualMachinesClientListBySQLVMGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DedicatedHostsClient) listByHostGroupHandleResponse(resp *azcore.Response) (DedicatedHostListResultResponse, error) {\n\tvar val *DedicatedHostListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DedicatedHostListResultResponse{}, err\n\t}\n\treturn DedicatedHostListResultResponse{RawResponse: resp.Response, DedicatedHostListResult: val}, nil\n}", "func (client WorkloadNetworksClient) GetVMGroupResponder(resp *http.Response) (result WorkloadNetworkVMGroup, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *GroupClient) listByServiceHandleResponse(resp *http.Response) (GroupListByServiceResponse, error) {\n\tresult := GroupListByServiceResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GroupCollection); err != nil {\n\t\treturn GroupListByServiceResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ContainerGroupsClient) listHandleResponse(resp *http.Response) (ContainerGroupsClientListResponse, error) {\n\tresult := ContainerGroupsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ContainerGroupListResult); err != nil {\n\t\treturn ContainerGroupsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AvailabilityGroupListenersClient) listByGroupHandleResponse(resp *http.Response) (AvailabilityGroupListenersClientListByGroupResponse, error) {\n\tresult := AvailabilityGroupListenersClientListByGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilityGroupListenerListResult); err != nil {\n\t\treturn AvailabilityGroupListenersClientListByGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client IotHubResourceClient) GetEventHubConsumerGroupResponder(resp *http.Response) (result EventHubConsumerGroupInfo, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *FactoriesClient) listByResourceGroupHandleResponse(resp *http.Response) (FactoriesClientListByResourceGroupResponse, error) {\n\tresult := FactoriesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.FactoryListResponse); err != nil {\n\t\treturn FactoriesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PrivateDNSZoneGroupsClient) listHandleResponse(resp *http.Response) (PrivateDNSZoneGroupsClientListResponse, error) {\n\tresult := PrivateDNSZoneGroupsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PrivateDNSZoneGroupListResult); err != nil {\n\t\treturn PrivateDNSZoneGroupsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *GroupClient) updateHandleResponse(resp *http.Response) (GroupUpdateResponse, error) {\n\tresult := GroupUpdateResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GroupContract); err != nil {\n\t\treturn GroupUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (gppr GetPathPropertiesResponse) XMsGroup() string {\n\treturn gppr.rawResponse.Header.Get(\"x-ms-group\")\n}", "func (client *LongTermRetentionManagedInstanceBackupsClient) listByResourceGroupInstanceHandleResponse(resp *http.Response) (LongTermRetentionManagedInstanceBackupsListByResourceGroupInstanceResponse, error) {\n\tresult := LongTermRetentionManagedInstanceBackupsListByResourceGroupInstanceResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceLongTermRetentionBackupListResult); err != nil {\n\t\treturn LongTermRetentionManagedInstanceBackupsListByResourceGroupInstanceResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SchemaRegistryClient) getHandleResponse(resp *http.Response) (SchemaRegistryClientGetResponse, error) {\n\tresult := SchemaRegistryClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SchemaGroup); err != nil {\n\t\treturn SchemaRegistryClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client AccountClient) ListByResourceGroupResponder(resp *http.Response) (result AccountResourceDescriptionList, err error) {\n err = autorest.Respond(\n resp,\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }", "func (c ActionGroupsAPIsClient) responderForActionGroupsGetTestNotificationsAtActionGroupResourceLevel(resp *http.Response) (result ActionGroupsGetTestNotificationsAtActionGroupResourceLevelOperationResponse, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Model),\n\t\tautorest.ByClosing())\n\tresult.HttpResponse = resp\n\n\treturn\n}", "func (client *LongTermRetentionManagedInstanceBackupsClient) listByResourceGroupLocationHandleResponse(resp *http.Response) (LongTermRetentionManagedInstanceBackupsListByResourceGroupLocationResponse, error) {\n\tresult := LongTermRetentionManagedInstanceBackupsListByResourceGroupLocationResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceLongTermRetentionBackupListResult); err != nil {\n\t\treturn LongTermRetentionManagedInstanceBackupsListByResourceGroupLocationResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *HardwareComponentGroupsClient) listByDeviceHandleResponse(resp *http.Response) (HardwareComponentGroupsClientListByDeviceResponse, error) {\n\tresult := HardwareComponentGroupsClientListByDeviceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.HardwareComponentGroupList); err != nil {\n\t\treturn HardwareComponentGroupsClientListByDeviceResponse{}, err\n\t}\n\treturn result, nil\n}", "func handleGetGroups(c *Context, w http.ResponseWriter, r *http.Request) {\n\tpaging, err := parsePaging(r.URL)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to parse paging parameters\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\twithInstallationCount, err := parseBool(r.URL, model.ShowInstallationCountQueryParameter, false)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to parse request parameters\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tfilter := &model.GroupFilter{\n\t\tPaging: paging,\n\t\tWithInstallationCount: withInstallationCount,\n\t}\n\n\tgroups, err := c.Store.GetGroupDTOs(filter)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to query groups\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif groups == nil {\n\t\tgroups = []*model.GroupDTO{}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\toutputJSON(c, w, groups)\n}", "func (client IotHubResourceClient) ListByResourceGroupResponder(resp *http.Response) (result IotHubDescriptionListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (_obj *DataService) GetGroupInfo(groupInfo *map[int32]string, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.WriteHead(codec.MAP, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_int32(int32(len((*groupInfo))), 0)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tfor k0, v0 := range *groupInfo {\n\n\t\terr = _os.Write_int32(k0, 0)\n\t\tif err != nil {\n\t\t\treturn ret, err\n\t\t}\n\n\t\terr = _os.Write_string(v0, 1)\n\t\tif err != nil {\n\t\t\treturn ret, err\n\t\t}\n\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"getGroupInfo\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr, have = _is.SkipTo(codec.MAP, 1, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _is.Read_int32(&length, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t(*groupInfo) = make(map[int32]string)\n\tfor i1, e1 := int32(0), length; i1 < e1; i1++ {\n\t\tvar k1 int32\n\t\tvar v1 string\n\n\t\terr = _is.Read_int32(&k1, 0, false)\n\t\tif err != nil {\n\t\t\treturn ret, err\n\t\t}\n\n\t\terr = _is.Read_string(&v1, 1, false)\n\t\tif err != nil {\n\t\t\treturn ret, err\n\t\t}\n\n\t\t(*groupInfo)[k1] = v1\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (client *AFDOriginsClient) listByOriginGroupHandleResponse(resp *http.Response) (AFDOriginsClientListByOriginGroupResponse, error) {\n\tresult := AFDOriginsClientListByOriginGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AFDOriginListResult); err != nil {\n\t\treturn AFDOriginsClientListByOriginGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SchemaRegistryClient) listByNamespaceHandleResponse(resp *http.Response) (SchemaRegistryClientListByNamespaceResponse, error) {\n\tresult := SchemaRegistryClientListByNamespaceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SchemaGroupListResult); err != nil {\n\t\treturn SchemaRegistryClientListByNamespaceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *GroupClient) createOrUpdateHandleResponse(resp *http.Response) (GroupCreateOrUpdateResponse, error) {\n\tresult := GroupCreateOrUpdateResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GroupContract); err != nil {\n\t\treturn GroupCreateOrUpdateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *LongTermRetentionManagedInstanceBackupsClient) listByResourceGroupDatabaseHandleResponse(resp *http.Response) (LongTermRetentionManagedInstanceBackupsListByResourceGroupDatabaseResponse, error) {\n\tresult := LongTermRetentionManagedInstanceBackupsListByResourceGroupDatabaseResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceLongTermRetentionBackupListResult); err != nil {\n\t\treturn LongTermRetentionManagedInstanceBackupsListByResourceGroupDatabaseResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client UsageDetailsClient) ListForBillingPeriodByManagementGroupResponder(resp *http.Response) (result UsageDetailsListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client LabClient) ListByResourceGroupResponder(resp *http.Response) (result ResponseWithContinuationLab, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func ( gmhandler *GroupMessageHandler ) GetGroupMessage(response http.ResponseWriter , request *http.Request) {\n\tresponse.Header().Set(\"Content-Type\" , \"application/json\")\n\tlang := GetSetLang(gmhandler , response , request )\n\tsession := gmhandler.Session.GetSession(request)\n\tres := &struct{\n\t\tSuccess bool `json:\"success\"`\n\t\tMessage string `json:\"message\"`\n\t\tMessages []*entity.GroupMessage `json:\"messages\"`\n\t\tGroupID string `json:\"group_id\"`\n\t\tOffset int `json:\"offset\"`\n\t}{\n\t\tSuccess: false ,\n\t\tMessage: translation.Translate(lang , \"Invalid Input \" ),\n\t}\n\tgroupid := request.FormValue(\"group_id\")\n\toffsetString := request.FormValue(\"offset\")\n\toffset :=0\n\toffset , er := strconv.Atoi(offsetString)\n\tif groupid ==\"\" || er != nil {\n\t\tresponse.Write(Helper.MarshalThis(res))\n\t\treturn \n\t}\n\tres.GroupID = groupid\n\tres.Offset = offset\n\tgroupExists := gmhandler.GroupSer.DoesGroupExist(groupid)\n\tif !groupExists {\n\t\tres.Message = fmt.Sprintf(translation.Translate(lang , \"Goup With ID : %s doesn't Exist\") , groupid )\n\t\tresponse.Write(Helper.MarshalThis(res))\n\t\treturn \n\t}\n\tisMember := gmhandler.GroupSer.IsGroupMember(groupid , session.UserID )\n\tif !isMember {\n\t\tres.Message = fmt.Sprintf( translation.Translate(lang ,\" User with ID %s is Not a member in Group With ID : %s\") , session.UserID , groupid )\n\t\tresponse.Write(Helper.MarshalThis(res))\n\t\treturn \n\t}\n\tgmessages := gmhandler.MessageSer.GetGroupMessages( groupid , offset )\n\tif gmessages == nil || len(gmessages)==0 {\n\t\tres.Message = translation.Translate(lang , \"No Message Record Found \")\n\t\tresponse.Write(Helper.MarshalThis(res))\n\t\treturn \n\t}\n\tres.Message = fmt.Sprintf(translation.Translate(lang , \"Succesfuly Found %d %s \") , len(gmessages) , Helper.SetPlural(lang , \"message\" , len(gmessages)))\n\tres.Success = true \n\tres.Messages = gmessages\n\tresponse.Write(Helper.MarshalThis(res))\n}", "func (client IdentityClient) GetDynamicGroup(ctx context.Context, request GetDynamicGroupRequest) (response GetDynamicGroupResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.getDynamicGroup, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = GetDynamicGroupResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = GetDynamicGroupResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(GetDynamicGroupResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into GetDynamicGroupResponse\")\n\t}\n\treturn\n}", "func CreateDescribeContainerGroupMetricResponse() (response *DescribeContainerGroupMetricResponse) {\n\tresponse = &DescribeContainerGroupMetricResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client Client) ListByResourceGroupResponder(resp *http.Response) (result ListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client UsageDetailsClient) ListByManagementGroup(ctx context.Context, managementGroupID string, expand string, filter string, skiptoken string, top *int32, apply string) (result UsageDetailsListResultPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/UsageDetailsClient.ListByManagementGroup\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.udlr.Response.Response != nil {\n\t\t\t\tsc = result.udlr.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: top,\n\t\t\tConstraints: []validation.Constraint{{Target: \"top\", Name: validation.Null, Rule: false,\n\t\t\t\tChain: []validation.Constraint{{Target: \"top\", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},\n\t\t\t\t\t{Target: \"top\", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},\n\t\t\t\t}}}}}); err != nil {\n\t\treturn result, validation.NewError(\"consumption.UsageDetailsClient\", \"ListByManagementGroup\", err.Error())\n\t}\n\n\tresult.fn = client.listByManagementGroupNextResults\n\treq, err := client.ListByManagementGroupPreparer(ctx, managementGroupID, expand, filter, skiptoken, top, apply)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListByManagementGroup\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListByManagementGroupSender(req)\n\tif err != nil {\n\t\tresult.udlr.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListByManagementGroup\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.udlr, err = client.ListByManagementGroupResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListByManagementGroup\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (client WorkloadNetworksClient) UpdateVMGroupResponder(resp *http.Response) (result WorkloadNetworkVMGroup, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client DataControllersClient) ListInGroupResponder(resp *http.Response) (result PageOfDataControllerResource, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client FirewallPolicyRuleGroupsClient) GetResponder(resp *http.Response) (result FirewallPolicyRuleGroup, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client UsageDetailsClient) ListByManagementGroupComplete(ctx context.Context, managementGroupID string, expand string, filter string, skiptoken string, top *int32, apply string) (result UsageDetailsListResultIterator, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/UsageDetailsClient.ListByManagementGroup\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response().Response.Response != nil {\n\t\t\t\tsc = result.page.Response().Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.page, err = client.ListByManagementGroup(ctx, managementGroupID, expand, filter, skiptoken, top, apply)\n\treturn\n}", "func (me *TxsdOperationInformationSequenceAvailableResponseGroups) Walk() (err error) {\n\tif fn := WalkHandlers.TxsdOperationInformationSequenceAvailableResponseGroups; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = me.XsdGoPkgHasElems_ResponseGroupsequenceCreateHITRequestschema_ResponseGroup_XsdtString_.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\treturn\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func GetPolicyDefinitionAtManagementGroup(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *PolicyDefinitionAtManagementGroupState, opts ...pulumi.ResourceOption) (*PolicyDefinitionAtManagementGroup, error) {\n\tvar resource PolicyDefinitionAtManagementGroup\n\terr := ctx.ReadResource(\"azure-native:authorization/v20190101:PolicyDefinitionAtManagementGroup\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (a *IAMApiService) GetGroup(ctx context.Context, gid string) (IamGroup, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = http.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue IamGroup\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/acs/api/v1/groups/{gid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"gid\"+\"}\", fmt.Sprintf(\"%v\", gid), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v IamGroup\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v IamError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (client IdentityClient) getGroup(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/groups/{groupId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetGroupResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client *SyncGroupsClient) listByDatabaseHandleResponse(resp *http.Response) (SyncGroupsClientListByDatabaseResponse, error) {\n\tresult := SyncGroupsClientListByDatabaseResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SyncGroupListResult); err != nil {\n\t\treturn SyncGroupsClientListByDatabaseResponse{}, err\n\t}\n\treturn result, nil\n}", "func CreateDescribeScalingGroupsResponse() (response *DescribeScalingGroupsResponse) {\n\tresponse = &DescribeScalingGroupsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client IdentityClient) getDynamicGroup(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/dynamicGroups/{dynamicGroupId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetDynamicGroupResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client UsageDetailsClient) ListByManagementGroupSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func handleCreateGroup(c *Context, w http.ResponseWriter, r *http.Request) {\n\tcreateGroupRequest, err := model.NewCreateGroupRequestFromReader(r.Body)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to decode request\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tgroup := model.Group{\n\t\tName: createGroupRequest.Name,\n\t\tDescription: createGroupRequest.Description,\n\t\tVersion: createGroupRequest.Version,\n\t\tImage: createGroupRequest.Image,\n\t\tMaxRolling: createGroupRequest.MaxRolling,\n\t\tAPISecurityLock: createGroupRequest.APISecurityLock,\n\t\tMattermostEnv: createGroupRequest.MattermostEnv,\n\t}\n\n\tannotations, err := model.AnnotationsFromStringSlice(createGroupRequest.Annotations)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"invalid annotations\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = c.Store.CreateGroup(&group, annotations)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to create group\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tc.Supervisor.Do()\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\toutputJSON(c, w, group.ToDTO(annotations))\n}", "func (o BackendResponseOutput) Group() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BackendResponse) string { return v.Group }).(pulumi.StringOutput)\n}", "func handleGetGroupsStatus(c *Context, w http.ResponseWriter, r *http.Request) {\n\tfilter := &model.GroupFilter{\n\t\tPaging: model.AllPagesNotDeleted(),\n\t}\n\n\tgroups, err := c.Store.GetGroups(filter)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to query groups\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif groups == nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tvar groupsStatus []model.GroupsStatus\n\n\tfor _, group := range groups {\n\t\tvar groupStatus model.GroupsStatus\n\t\tstatus, err := c.Store.GetGroupStatus(group.ID)\n\t\tif err != nil {\n\t\t\tc.Logger.WithError(err).Error(\"failed to query group status\")\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tif status == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tgroupStatus.ID = group.ID\n\t\tgroupStatus.Status = *status\n\n\t\tgroupsStatus = append(groupsStatus, groupStatus)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\toutputJSON(c, w, groupsStatus)\n}", "func (client IotHubResourceClient) CreateEventHubConsumerGroupResponder(resp *http.Response) (result EventHubConsumerGroupInfo, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (ctx *Context) SpecificGroupsHandler(w http.ResponseWriter, r *http.Request) {\n\n\tuid := getCurrentUser(w, r)\n\tif uid < 0 {\n\t\treturn\n\t}\n\n\t// Only support GET PATCH DELETE method\n\tif r.Method != \"GET\" && r.Method != \"PATCH\" && r.Method != \"DELETE\" && r.Method != \"PUT\" {\n\t\thttp.Error(w, errUnsuportMethod, http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\t// parse group id\n\turlID := path.Base(r.URL.Path)\n\tgid := getIDfromURL(w, r, urlID)\n\tif gid < 0 {\n\t\treturn\n\t}\n\n\t// read group data from db\n\tgroup, err := ctx.Store.GetGroupByID(gid)\n\tif !dbErrorHandle(w, \"Get group\", err) {\n\t\treturn\n\t}\n\n\t// GET request will return the group\n\tif r.Method == \"GET\" {\n\n\t\t//Get all meetings\n\t\tmeetings, err := ctx.Store.GetAllMeetingsOfGroup(gid)\n\t\tif !dbErrorHandle(w, \"Get all meetings\", err) {\n\t\t\treturn\n\t\t}\n\n\t\t//Get all members of the group\n\t\tmembers, err := ctx.Store.GetAllMembers(gid)\n\t\tif !dbErrorHandle(w, \"Get all members\", err) {\n\t\t\treturn\n\t\t}\n\n\t\t//Construct the return struct\n\t\tcompleteGroupInfo := model.GroupReturnBody{\n\t\t\tMeetings: meetings,\n\t\t\tGroupInfo: group,\n\t\t\tMembers: members,\n\t\t}\n\n\t\t// marshal current group into response body\n\t\tresponse := marshalRep(w, completeGroupInfo)\n\t\tif response == nil {\n\t\t\treturn\n\t\t}\n\n\t\t// write into response\n\t\trespondWithHeader(w, typeJSON, response, http.StatusOK)\n\t}\n\n\t// PUT request can update the groups' information, only creator can use this method\n\tif r.Method == \"PUT\" {\n\n\t\t// Check authorization\n\t\tif !isGroupCreator(group, uid, w) {\n\t\t\treturn\n\t\t}\n\n\t\t// Check content type\n\t\tif !isContentTypeJSON(w, r) {\n\t\t\treturn\n\t\t}\n\n\t\t// Get request body\n\t\tbody := getRequestBody(w, r)\n\t\tif body == nil {\n\t\t\treturn\n\t\t}\n\n\t\t// Marshal the body to json\n\t\tnewGroup := &model.Group{}\n\t\tif !unmarshalBody(w, body, newGroup) {\n\t\t\treturn\n\t\t}\n\n\t\t// Update information in database\n\t\terr := ctx.Store.UpdateGroup(newGroup)\n\t\tif !dbErrorHandle(w, \"Update group\", err) {\n\t\t\treturn\n\t\t}\n\n\t\t// TBD: get the newly updated group\n\t\tgroup, err = ctx.Store.GetGroupByID(group.GroupID)\n\t\tif !dbErrorHandle(w, \"Get updated group\", err) {\n\t\t\treturn\n\t\t}\n\n\t\t// marshal into body and response\n\t\tres := marshalRep(w, group)\n\t\tif res == nil {\n\t\t\treturn\n\t\t}\n\n\t\trespondWithHeader(w, typeJSON, res, http.StatusOK)\n\n\t}\n\n\t// PATCH generate an invitation link, and add the invitation email into group members\n\tif r.Method == \"PATCH\" {\n\t\t// TBD: authorization?\n\n\t\t// Get the invitation information form request\n\t\tbody := getRequestBody(w, r)\n\t\tif body == nil {\n\t\t\treturn\n\t\t}\n\n\t\tguest := &model.NewGuest{}\n\t\tif !unmarshalBody(w, body, guest) {\n\t\t\treturn\n\t\t}\n\n\t\t// TESTING: Insert into db\n\t\tguestID := model.GenerateRandomID()\n\t\t_, err := ctx.Store.InsertGuest(guestID, guest.Email, guest.DisplayName, gid, 0, uid) //Comment this\n\t\tif !dbErrorHandle(w, \"insert guest\", err) {\n\t\t\treturn\n\t\t}\n\n\t\t// Generate an invitation link with the email\n\t\tlink := fmt.Sprintf(\"%s/%s/guest/%s/%d/groups/%d\", r.Host, version, guest.Email, guestID, gid)\n\n\t\t// response with the link\n\t\trespondWithHeader(w, typeText, []byte(link), http.StatusCreated)\n\n\t}\n\n\t// Delete the current group, only creator can use this method\n\tif r.Method == \"DELETE\" {\n\n\t\t// Check authorization\n\t\tif !isGroupCreator(group, uid, w) {\n\t\t\treturn\n\t\t}\n\n\t\terr := ctx.Store.DeleteGroup(gid)\n\t\tif !dbErrorHandle(w, \"Delete group\", err) {\n\t\t\treturn\n\t\t}\n\n\t\trespondWithHeader(w, typeText, []byte(\"Delete success\"), http.StatusOK)\n\n\t}\n\n}", "func CreateDescribeMultiContainerGroupMetricResponse() (response *DescribeMultiContainerGroupMetricResponse) {\n\tresponse = &DescribeMultiContainerGroupMetricResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (fn GetAuthorizationAPIGroupHandlerFunc) Handle(params GetAuthorizationAPIGroupParams) middleware.Responder {\n\treturn fn(params)\n}", "func (client *GroupClient) getEntityTagHandleResponse(resp *http.Response) (GroupGetEntityTagResponse, error) {\n\tresult := GroupGetEntityTagResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif resp.StatusCode >= 200 && resp.StatusCode < 300 {\n\t\tresult.Success = true\n\t}\n\treturn result, nil\n}", "func (client *ManagementAssociationsClient) getHandleResponse(resp *http.Response) (ManagementAssociationsGetResponse, error) {\n\tresult := ManagementAssociationsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagementAssociation); err != nil {\n\t\treturn ManagementAssociationsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *PolicyDefinitionsClient) getAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsGetAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client IdentityClient) GetGroup(ctx context.Context, request GetGroupRequest) (response GetGroupResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.getGroup, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = GetGroupResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = GetGroupResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(GetGroupResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into GetGroupResponse\")\n\t}\n\treturn\n}", "func (me *XsdGoPkgHasElem_ResponseGroupInformation) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElem_ResponseGroupInformation; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = me.ResponseGroupInformation.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\treturn\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func getGroups(w http.ResponseWriter, r *http.Request) {\n InitResponse(&w)\n if r.Method == \"OPTIONS\" {return}\n\n allow, _ := HasPermission(&w, r, CoreGroupModuleID, CoreAccessRead)\n if !allow {\n w.WriteHeader(http.StatusUnauthorized)\n json.NewEncoder(w).Encode(&CoreMessage{\n Message: \"Not authorized!\",\n })\n return\n }\n\n groups := []CoreGroup{}\n\n err := Db.Model(&groups).Select()\n if err != nil {\n w.WriteHeader(http.StatusBadRequest)\n return\n }\n\n w.WriteHeader(http.StatusOK)\n json.NewEncoder(w).Encode(groups)\n}", "func (client WorkloadNetworksClient) ListVMGroupsResponder(resp *http.Response) (result WorkloadNetworkVMGroupsList, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}" ]
[ "0.66376185", "0.6585092", "0.64261913", "0.6391692", "0.6382366", "0.63312274", "0.63303494", "0.6312258", "0.63077897", "0.62958425", "0.6260882", "0.62083423", "0.6203065", "0.61537623", "0.6131575", "0.61313987", "0.61255515", "0.61254406", "0.6121473", "0.61212695", "0.6111537", "0.60946476", "0.6072034", "0.60640395", "0.60544366", "0.6052266", "0.6044018", "0.6042864", "0.60323465", "0.6027878", "0.60266733", "0.60036016", "0.60000545", "0.5993663", "0.5993226", "0.5991735", "0.59711504", "0.59291387", "0.59057415", "0.5898499", "0.581746", "0.5798762", "0.57923824", "0.5783489", "0.57679814", "0.5740101", "0.5726802", "0.5696891", "0.5691908", "0.5669448", "0.5630803", "0.5531167", "0.5508712", "0.54644644", "0.5434885", "0.5428255", "0.5393907", "0.53299135", "0.5281433", "0.52729976", "0.52676964", "0.5249187", "0.5194669", "0.5193852", "0.5190536", "0.51792485", "0.5164216", "0.51324975", "0.51098615", "0.50987285", "0.508898", "0.50768733", "0.5048127", "0.503675", "0.50314474", "0.5024127", "0.502003", "0.50144607", "0.50125736", "0.5011326", "0.5010653", "0.49954575", "0.49835038", "0.49748948", "0.4944377", "0.4935331", "0.4933027", "0.49144515", "0.49122733", "0.49088946", "0.4880824", "0.4849112", "0.4846951", "0.48291287", "0.4797712", "0.4781146", "0.47617078", "0.47611895", "0.47610843", "0.47415704" ]
0.7855107
0
getBuiltInCreateRequest creates the GetBuiltIn request.
func (client *PolicyDefinitionsClient) getBuiltInCreateRequest(ctx context.Context, policyDefinitionName string, options *PolicyDefinitionsGetBuiltInOptions) (*policy.Request, error) { urlPath := "/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}" if policyDefinitionName == "" { return nil, errors.New("parameter policyDefinitionName cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{policyDefinitionName}", url.PathEscape(policyDefinitionName)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header.Set("Accept", "application/json") return req, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *PolicyDefinitionsClient) listBuiltInCreateRequest(ctx context.Context, options *PolicyDefinitionsListBuiltInOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Authorization/policyDefinitions\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *BuildServiceClient) getBuildCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, buildName string, options *BuildServiceClientGetBuildOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builds/{buildName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\tif buildName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildName}\", url.PathEscape(buildName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetOpenNLURequest() (request *GetOpenNLURequest) {\n\trequest = &GetOpenNLURequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetOpenNLU\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *BuildServiceClient) getSupportedBuildpackCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, buildpackName string, options *BuildServiceClientGetSupportedBuildpackOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/supportedBuildpacks/{buildpackName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\tif buildpackName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildpackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildpackName}\", url.PathEscape(buildpackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WebhooksClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, webhookName string, options *WebhooksClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif webhookName == \"\" {\n\t\treturn nil, errors.New(\"parameter webhookName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{webhookName}\", url.PathEscape(webhookName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SapMonitorsClient) getCreateRequest(ctx context.Context, resourceGroupName string, sapMonitorName string, options *SapMonitorsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/sapMonitors/{sapMonitorName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sapMonitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter sapMonitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sapMonitorName}\", url.PathEscape(sapMonitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-02-07-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WorkspacesClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, options *WorkspacesGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ImplicitClient) getRequiredGlobalQueryCreateRequest(ctx context.Context, options *ImplicitClientGetRequiredGlobalQueryOptions) (*policy.Request, error) {\n\turlPath := \"/reqopt/global/required/query\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"required-global-query\", client.requiredGlobalQuery)\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WebAppsClient) getCreateRequest(ctx context.Context, resourceGroupName string, name string, options *WebAppsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateGetManagedRuleRequest() (request *GetManagedRuleRequest) {\n\trequest = &GetManagedRuleRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Config\", \"2020-09-07\", \"GetManagedRule\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *RegistrationDefinitionsClient) getCreateRequest(ctx context.Context, scope string, registrationDefinitionID string, options *RegistrationDefinitionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions/{registrationDefinitionId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif registrationDefinitionID == \"\" {\n\t\treturn nil, errors.New(\"parameter registrationDefinitionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registrationDefinitionId}\", url.PathEscape(registrationDefinitionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AlertsClient) getCreateRequest(ctx context.Context, scope string, alertID string, options *AlertsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleManagementAlerts/{alertId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\turlPath = strings.ReplaceAll(urlPath, \"{alertId}\", alertID)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ConnectedEnvironmentsClient) getCreateRequest(ctx context.Context, resourceGroupName string, connectedEnvironmentName string, options *ConnectedEnvironmentsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.App/connectedEnvironments/{connectedEnvironmentName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif connectedEnvironmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter connectedEnvironmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{connectedEnvironmentName}\", url.PathEscape(connectedEnvironmentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) getCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif localRulestackName == \"\" {\n\t\treturn nil, errors.New(\"parameter localRulestackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{localRulestackName}\", url.PathEscape(localRulestackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *MetricAlertsClient) getCreateRequest(ctx context.Context, resourceGroupName string, ruleName string, options *MetricAlertsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ruleName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleName}\", url.PathEscape(ruleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AlertOperationClient) getCreateRequest(ctx context.Context, scope string, operationID string, options *AlertOperationClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleManagementAlertOperations/{operationId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\turlPath = strings.ReplaceAll(urlPath, \"{operationId}\", operationID)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SQLVirtualMachinesClient) getCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineName string, options *SQLVirtualMachinesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/{sqlVirtualMachineName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineName}\", url.PathEscape(sqlVirtualMachineName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *GremlinResourcesClient) getGremlinDatabaseCreateRequest(ctx context.Context, resourceGroupName string, accountName string, databaseName string, options *GremlinResourcesClientGetGremlinDatabaseOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PortalConfigClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, portalConfigID string, options *PortalConfigClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalconfigs/{portalConfigId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif portalConfigID == \"\" {\n\t\treturn nil, errors.New(\"parameter portalConfigID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{portalConfigId}\", url.PathEscape(portalConfigID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CustomAssessmentAutomationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, customAssessmentAutomationName string, options *CustomAssessmentAutomationsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Security/customAssessmentAutomations/{customAssessmentAutomationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif customAssessmentAutomationName == \"\" {\n\t\treturn nil, errors.New(\"parameter customAssessmentAutomationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{customAssessmentAutomationName}\", url.PathEscape(customAssessmentAutomationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ActionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, ruleID string, actionID string, options *ActionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/alertRules/{ruleId}/actions/{actionId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif ruleID == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleId}\", url.PathEscape(ruleID))\n\tif actionID == \"\" {\n\t\treturn nil, errors.New(\"parameter actionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{actionId}\", url.PathEscape(actionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *MonitoringSettingsClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *MonitoringSettingsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SchemaRegistryClient) getCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, schemaGroupName string, options *SchemaRegistryClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups/{schemaGroupName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif schemaGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter schemaGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{schemaGroupName}\", url.PathEscape(schemaGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-10-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *OutputsClient) getCreateRequest(ctx context.Context, resourceGroupName string, jobName string, outputName string, options *OutputsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/outputs/{outputName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif jobName == \"\" {\n\t\treturn nil, errors.New(\"parameter jobName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{jobName}\", url.PathEscape(jobName))\n\tif outputName == \"\" {\n\t\treturn nil, errors.New(\"parameter outputName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{outputName}\", url.PathEscape(outputName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *BuildServiceClient) getBuildResultCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, buildName string, buildResultName string, options *BuildServiceClientGetBuildResultOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builds/{buildName}/results/{buildResultName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\tif buildName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildName}\", url.PathEscape(buildName))\n\tif buildResultName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildResultName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildResultName}\", url.PathEscape(buildResultName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *OutputsClient) getCreateRequest(ctx context.Context, resourceGroupName string, jobName string, outputName string, options *OutputsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/outputs/{outputName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif jobName == \"\" {\n\t\treturn nil, errors.New(\"parameter jobName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{jobName}\", url.PathEscape(jobName))\n\tif outputName == \"\" {\n\t\treturn nil, errors.New(\"parameter outputName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{outputName}\", url.PathEscape(outputName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *BuildServiceClient) getBuildServiceCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, options *BuildServiceClientGetBuildServiceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *IPAllocationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, ipAllocationName string, options *IPAllocationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif ipAllocationName == \"\" {\n\t\treturn nil, errors.New(\"parameter ipAllocationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ipAllocationName}\", url.PathEscape(ipAllocationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetServiceInputMappingRequest() (request *GetServiceInputMappingRequest) {\n\trequest = &GetServiceInputMappingRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"industry-brain\", \"2018-07-12\", \"GetServiceInputMapping\", \"\", \"\")\n\treturn\n}", "func (c *Client) BuildGetRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tvar (\n\t\tid string\n\t)\n\t{\n\t\tp, ok := v.(*warehouse.GetPayload)\n\t\tif !ok {\n\t\t\treturn nil, goahttp.ErrInvalidType(\"Warehouse\", \"Get\", \"*warehouse.GetPayload\", v)\n\t\t}\n\t\tid = p.ID\n\t}\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: GetWarehousePath(id)}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"Warehouse\", \"Get\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *DscCompilationJobClient) getCreateRequest(ctx context.Context, resourceGroupName string, automationAccountName string, compilationJobName string, options *DscCompilationJobClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/compilationjobs/{compilationJobName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif automationAccountName == \"\" {\n\t\treturn nil, errors.New(\"parameter automationAccountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{automationAccountName}\", url.PathEscape(automationAccountName))\n\tif compilationJobName == \"\" {\n\t\treturn nil, errors.New(\"parameter compilationJobName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{compilationJobName}\", url.PathEscape(compilationJobName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-01-13-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *MonitorsClient) getCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, options *MonitorsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PolicyDefinitionsClient) getCreateRequest(ctx context.Context, policyDefinitionName string, options *PolicyDefinitionsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RoleDefinitionsClient) getCreateRequest(ctx context.Context, scope string, roleDefinitionID string, options *RoleDefinitionsGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleDefinitions/{roleDefinitionId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif roleDefinitionID == \"\" {\n\t\treturn nil, errors.New(\"parameter roleDefinitionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{roleDefinitionId}\", url.PathEscape(roleDefinitionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/clusters/{clusterName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagedDatabasesClient) getCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, options *ManagedDatabasesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetRenderResultRequest() (request *GetRenderResultRequest) {\n\trequest = &GetRenderResultRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ivpd\", \"2019-06-25\", \"GetRenderResult\", \"ivpd\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *AvailabilitySetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, availabilitySetName string, options *AvailabilitySetsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif availabilitySetName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilitySetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilitySetName}\", url.PathEscape(availabilitySetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualApplianceSKUsClient) getCreateRequest(ctx context.Context, skuName string, options *VirtualApplianceSKUsGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualApplianceSkus/{skuName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\turlPath = strings.ReplaceAll(urlPath, \"{skuName}\", url.PathEscape(skuName))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *FactoriesClient) getCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, options *FactoriesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfNoneMatch != nil {\n\t\treq.Raw().Header[\"If-None-Match\"] = []string{*options.IfNoneMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WebAppsClient) getWebJobCreateRequest(ctx context.Context, resourceGroupName string, name string, webJobName string, options *WebAppsGetWebJobOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/webjobs/{webJobName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif webJobName == \"\" {\n\t\treturn nil, errors.New(\"parameter webJobName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{webJobName}\", url.PathEscape(webJobName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateGetIndustryCommerceInfoRequest() (request *GetIndustryCommerceInfoRequest) {\n\trequest = &GetIndustryCommerceInfoRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"companyreg\", \"2020-10-22\", \"GetIndustryCommerceInfo\", \"companyreg\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (client *VirtualMachinesClient) getCreateRequest(ctx context.Context, resourceGroupName string, virtualMachineName string, options *VirtualMachinesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{virtualMachineName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif virtualMachineName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualMachineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualMachineName}\", url.PathEscape(virtualMachineName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetArmsConsoleUrlRequest() (request *GetArmsConsoleUrlRequest) {\n\trequest = &GetArmsConsoleUrlRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ARMS\", \"2019-08-08\", \"GetArmsConsoleUrl\", \"arms\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (client *ManagedClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualMachineScaleSetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", string(*options.Expand))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *APIClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, apiID string, options *APIClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif apiID == \"\" {\n\t\treturn nil, errors.New(\"parameter apiID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{apiId}\", url.PathEscape(apiID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *UserMetricsKeysClient) getCreateRequest(ctx context.Context, options *UserMetricsKeysClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/trafficManagerUserMetricsKeys/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WebAppsClient) getTriggeredWebJobCreateRequest(ctx context.Context, resourceGroupName string, name string, webJobName string, options *WebAppsGetTriggeredWebJobOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/triggeredwebjobs/{webJobName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif webJobName == \"\" {\n\t\treturn nil, errors.New(\"parameter webJobName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{webJobName}\", url.PathEscape(webJobName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *TaskRunsClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, taskRunName string, options *TaskRunsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns/{taskRunName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif taskRunName == \"\" {\n\t\treturn nil, errors.New(\"parameter taskRunName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{taskRunName}\", url.PathEscape(taskRunName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AnalysisResultsClient) getCreateRequest(ctx context.Context, resourceGroupName string, testBaseAccountName string, packageName string, testResultName string, analysisResultName AnalysisResultName, options *AnalysisResultsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/packages/{packageName}/testResults/{testResultName}/analysisResults/{analysisResultName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif testBaseAccountName == \"\" {\n\t\treturn nil, errors.New(\"parameter testBaseAccountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{testBaseAccountName}\", url.PathEscape(testBaseAccountName))\n\tif packageName == \"\" {\n\t\treturn nil, errors.New(\"parameter packageName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{packageName}\", url.PathEscape(packageName))\n\tif testResultName == \"\" {\n\t\treturn nil, errors.New(\"parameter testResultName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{testResultName}\", url.PathEscape(testResultName))\n\tif analysisResultName == \"\" {\n\t\treturn nil, errors.New(\"parameter analysisResultName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{analysisResultName}\", url.PathEscape(string(analysisResultName)))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-16-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *Client) getEmbeddingsCreateRequest(ctx context.Context, body EmbeddingsOptions, options *GetEmbeddingsOptions) (*policy.Request, error) {\n\turlPath := \"embeddings\"\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, client.formatURL(urlPath, getDeploymentID(body)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif err := runtime.MarshalAsJSON(req, body); err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (client *InteractionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, hubName string, interactionName string, options *InteractionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/interactions/{interactionName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif interactionName == \"\" {\n\t\treturn nil, errors.New(\"parameter interactionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{interactionName}\", url.PathEscape(interactionName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.LocaleCode != nil {\n\t\treqQP.Set(\"locale-code\", *options.LocaleCode)\n\t}\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *subscriptionClient) getCreateRequest(ctx context.Context, topicName string, subscriptionName string, options *SubscriptionGetOptions) (*policy.Request, error) {\n\turlPath := \"/{topicName}/subscriptions/{subscriptionName}\"\n\tif topicName == \"\" {\n\t\treturn nil, errors.New(\"parameter topicName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{topicName}\", url.PathEscape(topicName))\n\tif subscriptionName == \"\" {\n\t\treturn nil, errors.New(\"parameter subscriptionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionName}\", url.PathEscape(subscriptionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Enrich != nil {\n\t\treqQP.Set(\"enrich\", strconv.FormatBool(*options.Enrich))\n\t}\n\tif client.apiVersion != nil {\n\t\treqQP.Set(\"api-version\", \"2017_04\")\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/xml, application/atom+xml\")\n\treturn req, nil\n}", "func (client *AlertProcessingRulesClient) getByNameCreateRequest(ctx context.Context, resourceGroupName string, alertProcessingRuleName string, options *AlertProcessingRulesClientGetByNameOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AlertsManagement/actionRules/{alertProcessingRuleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif alertProcessingRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter alertProcessingRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{alertProcessingRuleName}\", url.PathEscape(alertProcessingRuleName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateGetWsCustomizedChO2ORequest() (request *GetWsCustomizedChO2ORequest) {\n\trequest = &GetWsCustomizedChO2ORequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetWsCustomizedChO2O\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *PipelinesClient) getCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, pipelineName string, options *PipelinesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelines/{pipelineName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\tif pipelineName == \"\" {\n\t\treturn nil, errors.New(\"parameter pipelineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{pipelineName}\", url.PathEscape(pipelineName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.IfNoneMatch != nil {\n\t\treq.Raw().Header[\"If-None-Match\"] = []string{*options.IfNoneMatch}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DefenderSettingsClient) getCreateRequest(ctx context.Context, options *DefenderSettingsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.IoTSecurity/defenderSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WebAppsClient) getContinuousWebJobCreateRequest(ctx context.Context, resourceGroupName string, name string, webJobName string, options *WebAppsGetContinuousWebJobOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/continuouswebjobs/{webJobName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif webJobName == \"\" {\n\t\treturn nil, errors.New(\"parameter webJobName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{webJobName}\", url.PathEscape(webJobName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *LiveOutputsClient) getCreateRequest(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, liveOutputName string, options *LiveOutputsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents/{liveEventName}/liveOutputs/{liveOutputName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif liveEventName == \"\" {\n\t\treturn nil, errors.New(\"parameter liveEventName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{liveEventName}\", url.PathEscape(liveEventName))\n\tif liveOutputName == \"\" {\n\t\treturn nil, errors.New(\"parameter liveOutputName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{liveOutputName}\", url.PathEscape(liveOutputName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VMInsightsClient) getOnboardingStatusCreateRequest(ctx context.Context, resourceURI string, options *VMInsightsGetOnboardingStatusOptions) (*azcore.Request, error) {\n\turlPath := \"/{resourceUri}/providers/Microsoft.Insights/vmInsightsOnboardingStatuses/default\"\n\tif resourceURI == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceURI cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceUri}\", resourceURI)\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2018-11-27-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *TablesClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, tableName string, options *TablesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif tableName == \"\" {\n\t\treturn nil, errors.New(\"parameter tableName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{tableName}\", url.PathEscape(tableName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *BuildServiceClient) listBuildsCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, options *BuildServiceClientListBuildsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builds\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ContainerAppsDiagnosticsClient) getRootCreateRequest(ctx context.Context, resourceGroupName string, containerAppName string, options *ContainerAppsDiagnosticsClientGetRootOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/detectorProperties/rootApi/\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif containerAppName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerAppName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerAppName}\", url.PathEscape(containerAppName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (c *Client) NewGetMemberRequest(ctx context.Context, path string) (*http.Request, error) {\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"https\"\n\t}\n\tu := url.URL{Host: c.Host, Scheme: scheme, Path: path}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif c.JWTSigner != nil {\n\t\tif err := c.JWTSigner.Sign(req); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn req, nil\n}", "func (client *AFDOriginsClient) getCreateRequest(ctx context.Context, resourceGroupName string, profileName string, originGroupName string, originName string, options *AFDOriginsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/originGroups/{originGroupName}/origins/{originName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif profileName == \"\" {\n\t\treturn nil, errors.New(\"parameter profileName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{profileName}\", url.PathEscape(profileName))\n\tif originGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter originGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{originGroupName}\", url.PathEscape(originGroupName))\n\tif originName == \"\" {\n\t\treturn nil, errors.New(\"parameter originName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{originName}\", url.PathEscape(originName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *KpiClient) getCreateRequest(ctx context.Context, resourceGroupName string, hubName string, kpiName string, options *KpiClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/kpi/{kpiName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif kpiName == \"\" {\n\t\treturn nil, errors.New(\"parameter kpiName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{kpiName}\", url.PathEscape(kpiName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualApplianceSitesClient) getCreateRequest(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, siteName string, options *VirtualApplianceSitesGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites/{siteName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{networkVirtualApplianceName}\", url.PathEscape(networkVirtualApplianceName))\n\turlPath = strings.ReplaceAll(urlPath, \"{siteName}\", url.PathEscape(siteName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SQLResourcesClient) getSQLTriggerCreateRequest(ctx context.Context, resourceGroupName string, accountName string, databaseName string, containerName string, triggerName string, options *SQLResourcesClientGetSQLTriggerOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/triggers/{triggerName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif containerName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerName}\", url.PathEscape(containerName))\n\tif triggerName == \"\" {\n\t\treturn nil, errors.New(\"parameter triggerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{triggerName}\", url.PathEscape(triggerName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DevicesClient) getCreateRequest(ctx context.Context, resourceGroupName string, deviceName string, options *DevicesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridNetwork/devices/{deviceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif deviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter deviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{deviceName}\", url.PathEscape(deviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *WorkspacePurgeClient) getPurgeStatusCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, purgeID string, options *WorkspacePurgeClientGetPurgeStatusOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/operations/{purgeId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif purgeID == \"\" {\n\t\treturn nil, errors.New(\"parameter purgeID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{purgeId}\", url.PathEscape(purgeID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ManagedInstancesClient) getCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, options *ManagedInstancesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *IncidentsClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, incidentID string, options *IncidentsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif incidentID == \"\" {\n\t\treturn nil, errors.New(\"parameter incidentID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{incidentId}\", url.PathEscape(incidentID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-05-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetMultiRateConfigRequest() (request *GetMultiRateConfigRequest) {\n\trequest = &GetMultiRateConfigRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"live\", \"2016-11-01\", \"GetMultiRateConfig\", \"live\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *VirtualMachineScaleSetVMRunCommandsClient) getCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, options *VirtualMachineScaleSetVMRunCommandsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif instanceID == \"\" {\n\t\treturn nil, errors.New(\"parameter instanceID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{instanceId}\", url.PathEscape(instanceID))\n\tif runCommandName == \"\" {\n\t\treturn nil, errors.New(\"parameter runCommandName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{runCommandName}\", url.PathEscape(runCommandName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json, text/json\")\n\treturn req, nil\n}", "func (client *RoleDefinitionsClient) getByIDCreateRequest(ctx context.Context, roleID string, options *RoleDefinitionsGetByIDOptions) (*policy.Request, error) {\n\turlPath := \"/{roleId}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{roleId}\", roleID)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ImplicitClient) getOptionalGlobalQueryCreateRequest(ctx context.Context, options *ImplicitClientGetOptionalGlobalQueryOptions) (*policy.Request, error) {\n\turlPath := \"/reqopt/global/optional/query\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif client.optionalGlobalQuery != nil {\n\t\treqQP.Set(\"optional-global-query\", strconv.FormatInt(int64(*client.optionalGlobalQuery), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ImplicitClient) getRequiredGlobalPathCreateRequest(ctx context.Context, options *ImplicitClientGetRequiredGlobalPathOptions) (*policy.Request, error) {\n\turlPath := \"/reqopt/global/required/path/{required-global-path}\"\n\tif client.requiredGlobalPath == \"\" {\n\t\treturn nil, errors.New(\"parameter client.requiredGlobalPath cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{required-global-path}\", url.PathEscape(client.requiredGlobalPath))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetSnapshotSettingsRequest() (request *GetSnapshotSettingsRequest) {\n\trequest = &GetSnapshotSettingsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"R-kvstore\", \"2015-01-01\", \"GetSnapshotSettings\", \"redisa\", \"openAPI\")\n\treturn\n}", "func (client *ApplicationTypeVersionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string, options *ApplicationTypeVersionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applicationTypes/{applicationTypeName}/versions/{version}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\tif applicationTypeName == \"\" {\n\t\treturn nil, errors.New(\"parameter applicationTypeName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{applicationTypeName}\", url.PathEscape(applicationTypeName))\n\tif version == \"\" {\n\t\treturn nil, errors.New(\"parameter version cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{version}\", url.PathEscape(version))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ExpressRoutePortsLocationsClient) getCreateRequest(ctx context.Context, locationName string, options *ExpressRoutePortsLocationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations/{locationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif locationName == \"\" {\n\t\treturn nil, errors.New(\"parameter locationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{locationName}\", url.PathEscape(locationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ReplicationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, replicationName string, options *ReplicationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif replicationName == \"\" {\n\t\treturn nil, errors.New(\"parameter replicationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{replicationName}\", url.PathEscape(replicationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ConsumerInvitationsClient) getCreateRequest(ctx context.Context, location string, invitationID string, options *ConsumerInvitationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.DataShare/locations/{location}/consumerInvitations/{invitationId}\"\n\tif location == \"\" {\n\t\treturn nil, errors.New(\"parameter location cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{location}\", url.PathEscape(location))\n\tif invitationID == \"\" {\n\t\treturn nil, errors.New(\"parameter invitationID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{invitationId}\", url.PathEscape(invitationID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CompliancesClient) getCreateRequest(ctx context.Context, scope string, complianceName string, options *CompliancesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Security/compliances/{complianceName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif complianceName == \"\" {\n\t\treturn nil, errors.New(\"parameter complianceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{complianceName}\", url.PathEscape(complianceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WebAppsClient) getDeploymentCreateRequest(ctx context.Context, resourceGroupName string, name string, id string, options *WebAppsGetDeploymentOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/deployments/{id}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif id == \"\" {\n\t\treturn nil, errors.New(\"parameter id cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{id}\", url.PathEscape(id))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateGetApplicationListRequest() (request *GetApplicationListRequest) {\n\trequest = &GetApplicationListRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"mse\", \"2019-05-31\", \"GetApplicationList\", \"mse\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateGetFaceSearchUserRequest() (request *GetFaceSearchUserRequest) {\n\trequest = &GetFaceSearchUserRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"imm\", \"2017-09-06\", \"GetFaceSearchUser\", \"imm\", \"openAPI\")\n\treturn\n}", "func (client *DatabaseVulnerabilityAssessmentScansClient) getCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, vulnerabilityAssessmentName VulnerabilityAssessmentName, scanID string, options *DatabaseVulnerabilityAssessmentScansClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}/scans/{scanId}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif vulnerabilityAssessmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter vulnerabilityAssessmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vulnerabilityAssessmentName}\", url.PathEscape(string(vulnerabilityAssessmentName)))\n\tif scanID == \"\" {\n\t\treturn nil, errors.New(\"parameter scanID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{scanId}\", url.PathEscape(scanID))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2/{clusterName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *FirewallRulesClient) getCreateRequest(ctx context.Context, resourceGroupName string, serverName string, firewallRuleName string, options *FirewallRulesGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/firewallRules/{firewallRuleName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif firewallRuleName == \"\" {\n\t\treturn nil, errors.New(\"parameter firewallRuleName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{firewallRuleName}\", url.PathEscape(firewallRuleName))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2017-12-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AssociationsClient) getCreateRequest(ctx context.Context, scope string, associationName string, options *AssociationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.CustomProviders/associations/{associationName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif associationName == \"\" {\n\t\treturn nil, errors.New(\"parameter associationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{associationName}\", url.PathEscape(associationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *TagRulesClient) getCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, ruleSetName string, options *TagRulesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/tagRules/{ruleSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\tif ruleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter ruleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{ruleSetName}\", url.PathEscape(ruleSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-27\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WebAppsClient) getFunctionCreateRequest(ctx context.Context, resourceGroupName string, name string, functionName string, options *WebAppsGetFunctionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/functions/{functionName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif functionName == \"\" {\n\t\treturn nil, errors.New(\"parameter functionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{functionName}\", url.PathEscape(functionName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ReplicationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, replicationName string, options *ReplicationsGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif replicationName == \"\" {\n\t\treturn nil, errors.New(\"parameter replicationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{replicationName}\", url.PathEscape(replicationName))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DicomServicesClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, dicomServiceName string, options *DicomServicesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/workspaces/{workspaceName}/dicomservices/{dicomServiceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif dicomServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter dicomServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dicomServiceName}\", url.PathEscape(dicomServiceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SQLResourcesClient) getSQLUserDefinedFunctionCreateRequest(ctx context.Context, resourceGroupName string, accountName string, databaseName string, containerName string, userDefinedFunctionName string, options *SQLResourcesClientGetSQLUserDefinedFunctionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/userDefinedFunctions/{userDefinedFunctionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif containerName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerName}\", url.PathEscape(containerName))\n\tif userDefinedFunctionName == \"\" {\n\t\treturn nil, errors.New(\"parameter userDefinedFunctionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{userDefinedFunctionName}\", url.PathEscape(userDefinedFunctionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualNetworkLinksClient) getCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, virtualNetworkLinkName string, options *VirtualNetworkLinksGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks/{virtualNetworkLinkName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif virtualNetworkLinkName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualNetworkLinkName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualNetworkLinkName}\", url.PathEscape(virtualNetworkLinkName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DataCollectionEndpointsClient) getCreateRequest(ctx context.Context, resourceGroupName string, dataCollectionEndpointName string, options *DataCollectionEndpointsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints/{dataCollectionEndpointName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dataCollectionEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter dataCollectionEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dataCollectionEndpointName}\", url.PathEscape(dataCollectionEndpointName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CloudServicesClient) getCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif cloudServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter cloudServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cloudServiceName}\", url.PathEscape(cloudServiceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *NotebookWorkspacesClient) getCreateRequest(ctx context.Context, resourceGroupName string, accountName string, notebookWorkspaceName NotebookWorkspaceName, options *NotebookWorkspacesGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/notebookWorkspaces/{notebookWorkspaceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif notebookWorkspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter notebookWorkspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{notebookWorkspaceName}\", url.PathEscape(string(notebookWorkspaceName)))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}" ]
[ "0.71907246", "0.6112511", "0.607343", "0.6039145", "0.6004508", "0.60000503", "0.5975623", "0.596344", "0.5930511", "0.5888355", "0.5842194", "0.58329374", "0.58214843", "0.58069664", "0.5786372", "0.57797956", "0.5777607", "0.5739794", "0.5726773", "0.5718954", "0.57172954", "0.570649", "0.57044214", "0.5697197", "0.56927323", "0.56807613", "0.5677242", "0.565976", "0.564308", "0.5641449", "0.5631516", "0.5622468", "0.562056", "0.5619841", "0.5601659", "0.55973166", "0.5595461", "0.5590116", "0.55893946", "0.55887157", "0.5585612", "0.5555662", "0.5535496", "0.5531304", "0.5485612", "0.5480837", "0.54791915", "0.54785955", "0.547043", "0.5470209", "0.5468979", "0.5467805", "0.5467192", "0.5466677", "0.5465907", "0.546145", "0.5455993", "0.54553443", "0.54517573", "0.5447496", "0.54420346", "0.543899", "0.54348", "0.5434444", "0.542853", "0.5424541", "0.5409712", "0.54044425", "0.5390053", "0.53895396", "0.53894883", "0.5376264", "0.5375195", "0.53707844", "0.5369067", "0.535666", "0.53552616", "0.5341876", "0.5339544", "0.5336449", "0.5330243", "0.53275496", "0.5325403", "0.5320522", "0.5320491", "0.53176135", "0.5314641", "0.5301148", "0.5293349", "0.52908015", "0.5286185", "0.5278572", "0.5270806", "0.5270629", "0.526229", "0.525296", "0.5252145", "0.52519625", "0.5242676", "0.5241544" ]
0.77959424
0
getBuiltInHandleResponse handles the GetBuiltIn response.
func (client *PolicyDefinitionsClient) getBuiltInHandleResponse(resp *http.Response) (PolicyDefinitionsGetBuiltInResponse, error) { result := PolicyDefinitionsGetBuiltInResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil { return PolicyDefinitionsGetBuiltInResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *PolicyDefinitionsClient) listBuiltInHandleResponse(resp *http.Response) (PolicyDefinitionsListBuiltInResponse, error) {\n\tresult := PolicyDefinitionsListBuiltInResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinitionListResult); err != nil {\n\t\treturn PolicyDefinitionsListBuiltInResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineScaleSetVMRunCommandsClient) getHandleResponse(resp *http.Response) (VirtualMachineScaleSetVMRunCommandsGetResponse, error) {\n\tresult := VirtualMachineScaleSetVMRunCommandsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineRunCommand); err != nil {\n\t\treturn VirtualMachineScaleSetVMRunCommandsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *LiveOutputsClient) getHandleResponse(resp *http.Response) (LiveOutputsClientGetResponse, error) {\n\tresult := LiveOutputsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LiveOutput); err != nil {\n\t\treturn LiveOutputsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RegistrationDefinitionsClient) getHandleResponse(resp *http.Response) (RegistrationDefinitionsClientGetResponse, error) {\n\tresult := RegistrationDefinitionsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RegistrationDefinition); err != nil {\n\t\treturn RegistrationDefinitionsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SchemaRegistryClient) getHandleResponse(resp *http.Response) (SchemaRegistryClientGetResponse, error) {\n\tresult := SchemaRegistryClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SchemaGroup); err != nil {\n\t\treturn SchemaRegistryClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *BuildServiceClient) getSupportedBuildpackHandleResponse(resp *http.Response) (BuildServiceClientGetSupportedBuildpackResponse, error) {\n\tresult := BuildServiceClientGetSupportedBuildpackResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SupportedBuildpackResource); err != nil {\n\t\treturn BuildServiceClientGetSupportedBuildpackResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WorkspacesClient) getHandleResponse(resp *http.Response) (WorkspacesGetResponse, error) {\n\tresult := WorkspacesGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Workspace); err != nil {\n\t\treturn WorkspacesGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *CustomAssessmentAutomationsClient) getHandleResponse(resp *http.Response) (CustomAssessmentAutomationsGetResponse, error) {\n\tresult := CustomAssessmentAutomationsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomAssessmentAutomation); err != nil {\n\t\treturn CustomAssessmentAutomationsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) getHandleResponse(resp *http.Response) (WebAppsGetResponse, error) {\n\tresult := WebAppsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Site); err != nil {\n\t\treturn WebAppsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) getHandleResponse(resp *http.Response) (OutputsGetResponse, error) {\n\tresult := OutputsGetResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Output); err != nil {\n\t\treturn OutputsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineImageTemplatesClient) getRunOutputHandleResponse(resp *http.Response) (VirtualMachineImageTemplatesClientGetRunOutputResponse, error) {\n\tresult := VirtualMachineImageTemplatesClientGetRunOutputResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RunOutput); err != nil {\n\t\treturn VirtualMachineImageTemplatesClientGetRunOutputResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PolicyDefinitionsClient) getHandleResponse(resp *http.Response) (PolicyDefinitionsGetResponse, error) {\n\tresult := PolicyDefinitionsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil {\n\t\treturn PolicyDefinitionsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *PipelinesClient) getHandleResponse(resp *http.Response) (PipelinesClientGetResponse, error) {\n\tresult := PipelinesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PipelineResource); err != nil {\n\t\treturn PipelinesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationsClient) getHandleResponse(resp *azcore.Response) (ReplicationResponse, error) {\n\tvar val *Replication\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn ReplicationResponse{}, err\n\t}\n\treturn ReplicationResponse{RawResponse: resp.Response, Replication: val}, nil\n}", "func (client *NotebookWorkspacesClient) getHandleResponse(resp *http.Response) (NotebookWorkspacesGetResponse, error) {\n\tresult := NotebookWorkspacesGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.NotebookWorkspace); err != nil {\n\t\treturn NotebookWorkspacesGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualApplianceSitesClient) getHandleResponse(resp *azcore.Response) (VirtualApplianceSiteResponse, error) {\n\tvar val *VirtualApplianceSite\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn VirtualApplianceSiteResponse{}, err\n\t}\n\treturn VirtualApplianceSiteResponse{RawResponse: resp.Response, VirtualApplianceSite: val}, nil\n}", "func (client *ApplicationTypeVersionsClient) getHandleResponse(resp *http.Response) (ApplicationTypeVersionsClientGetResponse, error) {\n\tresult := ApplicationTypeVersionsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ApplicationTypeVersionResource); err != nil {\n\t\treturn ApplicationTypeVersionsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *LocalRulestacksClient) getHandleResponse(resp *http.Response) (LocalRulestacksClientGetResponse, error) {\n\tresult := LocalRulestacksClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LocalRulestackResource); err != nil {\n\t\treturn LocalRulestacksClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RoleDefinitionsClient) getHandleResponse(resp *http.Response) (RoleDefinitionsGetResponse, error) {\n\tresult := RoleDefinitionsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RoleDefinition); err != nil {\n\t\treturn RoleDefinitionsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SQLResourcesClient) getSQLUserDefinedFunctionHandleResponse(resp *http.Response) (SQLResourcesClientGetSQLUserDefinedFunctionResponse, error) {\n\tresult := SQLResourcesClientGetSQLUserDefinedFunctionResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SQLUserDefinedFunctionGetResults); err != nil {\n\t\treturn SQLResourcesClientGetSQLUserDefinedFunctionResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *GalleryImageVersionsClient) getHandleResponse(resp *azcore.Response) (GalleryImageVersionResponse, error) {\n\tvar val *GalleryImageVersion\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn GalleryImageVersionResponse{}, err\n\t}\n\treturn GalleryImageVersionResponse{RawResponse: resp.Response, GalleryImageVersion: val}, nil\n}", "func (client *AlertOperationClient) getHandleResponse(resp *http.Response) (AlertOperationClientGetResponse, error) {\n\tresult := AlertOperationClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AlertOperationResult); err != nil {\n\t\treturn AlertOperationClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) getHandleResponse(resp *http.Response) (OutputsClientGetResponse, error) {\n\tresult := OutputsClientGetResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Output); err != nil {\n\t\treturn OutputsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MonitorsClient) getHandleResponse(resp *http.Response) (MonitorsClientGetResponse, error) {\n\tresult := MonitorsClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MonitorResource); err != nil {\n\t\treturn MonitorsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AvailabilitySetsClient) getHandleResponse(resp *http.Response) (AvailabilitySetsGetResponse, error) {\n\tresult := AvailabilitySetsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilitySet); err != nil {\n\t\treturn AvailabilitySetsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineScaleSetsClient) getHandleResponse(resp *http.Response) (VirtualMachineScaleSetsGetResponse, error) {\n\tresult := VirtualMachineScaleSetsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSet); err != nil {\n\t\treturn VirtualMachineScaleSetsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ConnectedEnvironmentsClient) getHandleResponse(resp *http.Response) (ConnectedEnvironmentsClientGetResponse, error) {\n\tresult := ConnectedEnvironmentsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ConnectedEnvironment); err != nil {\n\t\treturn ConnectedEnvironmentsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IntegrationRuntimeNodesClient) getHandleResponse(resp *http.Response) (IntegrationRuntimeNodesClientGetResponse, error) {\n\tresult := IntegrationRuntimeNodesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SelfHostedIntegrationRuntimeNode); err != nil {\n\t\treturn IntegrationRuntimeNodesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *InteractionsClient) getHandleResponse(resp *http.Response) (InteractionsClientGetResponse, error) {\n\tresult := InteractionsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.InteractionResourceFormat); err != nil {\n\t\treturn InteractionsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MetricAlertsClient) getHandleResponse(resp *http.Response) (MetricAlertsClientGetResponse, error) {\n\tresult := MetricAlertsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MetricAlertResource); err != nil {\n\t\treturn MetricAlertsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualApplianceSKUsClient) getHandleResponse(resp *azcore.Response) (NetworkVirtualApplianceSKUResponse, error) {\n\tvar val *NetworkVirtualApplianceSKU\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn NetworkVirtualApplianceSKUResponse{}, err\n\t}\n\treturn NetworkVirtualApplianceSKUResponse{RawResponse: resp.Response, NetworkVirtualApplianceSKU: val}, nil\n}", "func (client *KpiClient) getHandleResponse(resp *http.Response) (KpiClientGetResponse, error) {\n\tresult := KpiClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KpiResourceFormat); err != nil {\n\t\treturn KpiClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ScriptExecutionsClient) getHandleResponse(resp *http.Response) (ScriptExecutionsClientGetResponse, error) {\n\tresult := ScriptExecutionsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ScriptExecution); err != nil {\n\t\treturn ScriptExecutionsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MachineExtensionsClient) getHandleResponse(resp *http.Response) (MachineExtensionsClientGetResponse, error) {\n\tresult := MachineExtensionsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MachineExtension); err != nil {\n\t\treturn MachineExtensionsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SapMonitorsClient) getHandleResponse(resp *http.Response) (SapMonitorsClientGetResponse, error) {\n\tresult := SapMonitorsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SapMonitor); err != nil {\n\t\treturn SapMonitorsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AFDOriginsClient) getHandleResponse(resp *http.Response) (AFDOriginsClientGetResponse, error) {\n\tresult := AFDOriginsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AFDOrigin); err != nil {\n\t\treturn AFDOriginsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AnalysisResultsClient) getHandleResponse(resp *http.Response) (AnalysisResultsClientGetResponse, error) {\n\tresult := AnalysisResultsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AnalysisResultSingletonResource); err != nil {\n\t\treturn AnalysisResultsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) getTriggeredWebJobHandleResponse(resp *http.Response) (WebAppsGetTriggeredWebJobResponse, error) {\n\tresult := WebAppsGetTriggeredWebJobResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TriggeredWebJob); err != nil {\n\t\treturn WebAppsGetTriggeredWebJobResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SQLVirtualMachinesClient) getHandleResponse(resp *http.Response) (SQLVirtualMachinesClientGetResponse, error) {\n\tresult := SQLVirtualMachinesClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SQLVirtualMachine); err != nil {\n\t\treturn SQLVirtualMachinesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DatabaseVulnerabilityAssessmentScansClient) getHandleResponse(resp *http.Response) (DatabaseVulnerabilityAssessmentScansClientGetResponse, error) {\n\tresult := DatabaseVulnerabilityAssessmentScansClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VulnerabilityAssessmentScanRecord); err != nil {\n\t\treturn DatabaseVulnerabilityAssessmentScansClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *TriggersClient) getHandleResponse(resp *http.Response) (TriggersClientGetResponse, error) {\n\tresult := TriggersClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result); err != nil {\n\t\treturn TriggersClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebhooksClient) getHandleResponse(resp *http.Response) (WebhooksClientGetResponse, error) {\n\tresult := WebhooksClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Webhook); err != nil {\n\t\treturn WebhooksClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) getProcessHandleResponse(resp *http.Response) (WebAppsGetProcessResponse, error) {\n\tresult := WebAppsGetProcessResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProcessInfo); err != nil {\n\t\treturn WebAppsGetProcessResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineScaleSetVMRunCommandsClient) listHandleResponse(resp *http.Response) (VirtualMachineScaleSetVMRunCommandsListResponse, error) {\n\tresult := VirtualMachineScaleSetVMRunCommandsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineRunCommandsListResult); err != nil {\n\t\treturn VirtualMachineScaleSetVMRunCommandsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *AlertsClient) getHandleResponse(resp *http.Response) (AlertsClientGetResponse, error) {\n\tresult := AlertsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Alert); err != nil {\n\t\treturn AlertsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) getFunctionHandleResponse(resp *http.Response) (WebAppsGetFunctionResponse, error) {\n\tresult := WebAppsGetFunctionResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.FunctionEnvelope); err != nil {\n\t\treturn WebAppsGetFunctionResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SQLResourcesClient) getSQLTriggerHandleResponse(resp *http.Response) (SQLResourcesClientGetSQLTriggerResponse, error) {\n\tresult := SQLResourcesClientGetSQLTriggerResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SQLTriggerGetResults); err != nil {\n\t\treturn SQLResourcesClientGetSQLTriggerResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachinesClient) getHandleResponse(resp *http.Response) (VirtualMachinesClientGetResponse, error) {\n\tresult := VirtualMachinesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachine); err != nil {\n\t\treturn VirtualMachinesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedDatabasesClient) getHandleResponse(resp *http.Response) (ManagedDatabasesClientGetResponse, error) {\n\tresult := ManagedDatabasesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedDatabase); err != nil {\n\t\treturn ManagedDatabasesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *BuildServiceClient) getBuildHandleResponse(resp *http.Response) (BuildServiceClientGetBuildResponse, error) {\n\tresult := BuildServiceClientGetBuildResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Build); err != nil {\n\t\treturn BuildServiceClientGetBuildResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualNetworkLinksClient) getHandleResponse(resp *http.Response) (VirtualNetworkLinksGetResponse, error) {\n\tresult := VirtualNetworkLinksGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualNetworkLink); err != nil {\n\t\treturn VirtualNetworkLinksGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ContainerAppsDiagnosticsClient) getDetectorHandleResponse(resp *http.Response) (ContainerAppsDiagnosticsClientGetDetectorResponse, error) {\n\tresult := ContainerAppsDiagnosticsClientGetDetectorResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Diagnostics); err != nil {\n\t\treturn ContainerAppsDiagnosticsClientGetDetectorResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *subscriptionClient) getHandleResponse(resp *http.Response) (SubscriptionGetResponse, error) {\n\tresult := SubscriptionGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsXML(resp, &result.Object); err != nil {\n\t\treturn SubscriptionGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *JobExecutionsClient) getHandleResponse(resp *http.Response) (JobExecutionsClientGetResponse, error) {\n\tresult := JobExecutionsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.JobExecution); err != nil {\n\t\treturn JobExecutionsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RecordSetsClient) getHandleResponse(resp *azcore.Response) (RecordSetResponse, error) {\n\tvar val *RecordSet\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn RecordSetResponse{}, err\n\t}\n\treturn RecordSetResponse{RawResponse: resp.Response, RecordSet: val}, nil\n}", "func (client *VendorNetworkFunctionsClient) getHandleResponse(resp *http.Response) (VendorNetworkFunctionsClientGetResponse, error) {\n\tresult := VendorNetworkFunctionsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VendorNetworkFunction); err != nil {\n\t\treturn VendorNetworkFunctionsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DscCompilationJobClient) getHandleResponse(resp *http.Response) (DscCompilationJobClientGetResponse, error) {\n\tresult := DscCompilationJobClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DscCompilationJob); err != nil {\n\t\treturn DscCompilationJobClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PermissionBindingsClient) getHandleResponse(resp *http.Response) (PermissionBindingsClientGetResponse, error) {\n\tresult := PermissionBindingsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PermissionBinding); err != nil {\n\t\treturn PermissionBindingsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AgentPoolsClient) getHandleResponse(resp *http.Response) (AgentPoolsClientGetResponse, error) {\n\tresult := AgentPoolsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AgentPool); err != nil {\n\t\treturn AgentPoolsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AgentsClient) getHandleResponse(resp *http.Response) (AgentsClientGetResponse, error) {\n\tresult := AgentsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Agent); err != nil {\n\t\treturn AgentsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationsClient) getHandleResponse(resp *http.Response) (ReplicationsClientGetResponse, error) {\n\tresult := ReplicationsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Replication); err != nil {\n\t\treturn ReplicationsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CompliancesClient) getHandleResponse(resp *http.Response) (CompliancesClientGetResponse, error) {\n\tresult := CompliancesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Compliance); err != nil {\n\t\treturn CompliancesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ActionsClient) getHandleResponse(resp *http.Response) (ActionsClientGetResponse, error) {\n\tresult := ActionsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ActionResponse); err != nil {\n\t\treturn ActionsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WorkflowsClient) getHandleResponse(resp *http.Response) (WorkflowsClientGetResponse, error) {\n\tresult := WorkflowsClientGetResponse{}\n\tif val := resp.Header.Get(\"x-ms-request-id\"); val != \"\" {\n\t\tresult.XMSRequestID = &val\n\t}\n\tif val := resp.Header.Get(\"x-ms-correlation-request-id\"); val != \"\" {\n\t\tresult.XMSCorrelationRequestID = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Workflow); err != nil {\n\t\treturn WorkflowsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedClustersClient) getHandleResponse(resp *http.Response) (ManagedClustersClientGetResponse, error) {\n\tresult := ManagedClustersClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedCluster); err != nil {\n\t\treturn ManagedClustersClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listHandleResponse(resp *http.Response) (WebAppsListResponse, error) {\n\tresult := WebAppsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebAppCollection); err != nil {\n\t\treturn WebAppsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineImageTemplatesClient) getHandleResponse(resp *http.Response) (VirtualMachineImageTemplatesClientGetResponse, error) {\n\tresult := VirtualMachineImageTemplatesClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ImageTemplate); err != nil {\n\t\treturn VirtualMachineImageTemplatesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IPAllocationsClient) getHandleResponse(resp *http.Response) (IPAllocationsClientGetResponse, error) {\n\tresult := IPAllocationsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IPAllocation); err != nil {\n\t\treturn IPAllocationsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *LiveOutputsClient) listHandleResponse(resp *http.Response) (LiveOutputsClientListResponse, error) {\n\tresult := LiveOutputsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LiveOutputListResult); err != nil {\n\t\treturn LiveOutputsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationvCentersClient) getHandleResponse(resp *http.Response) (ReplicationvCentersClientGetResponse, error) {\n\tresult := ReplicationvCentersClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VCenter); err != nil {\n\t\treturn ReplicationvCentersClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client JobClient) GetOutputResponder(resp *http.Response) (result String, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *CustomDomainsClient) getHandleResponse(resp *http.Response) (CustomDomainsGetResponse, error) {\n\tresult := CustomDomainsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomDomainResource); err != nil {\n\t\treturn CustomDomainsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *PortalConfigClient) getHandleResponse(resp *http.Response) (PortalConfigClientGetResponse, error) {\n\tresult := PortalConfigClientGetResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PortalConfigContract); err != nil {\n\t\treturn PortalConfigClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *GremlinResourcesClient) getGremlinDatabaseHandleResponse(resp *http.Response) (GremlinResourcesClientGetGremlinDatabaseResponse, error) {\n\tresult := GremlinResourcesClientGetGremlinDatabaseResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GremlinDatabaseGetResults); err != nil {\n\t\treturn GremlinResourcesClientGetGremlinDatabaseResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *TaskRunsClient) getHandleResponse(resp *http.Response) (TaskRunsClientGetResponse, error) {\n\tresult := TaskRunsClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TaskRun); err != nil {\n\t\treturn TaskRunsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineImagesEdgeZoneClient) getHandleResponse(resp *http.Response) (VirtualMachineImagesEdgeZoneClientGetResponse, error) {\n\tresult := VirtualMachineImagesEdgeZoneClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineImage); err != nil {\n\t\treturn VirtualMachineImagesEdgeZoneClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SourceControlConfigurationsClient) getHandleResponse(resp *http.Response) (SourceControlConfigurationsClientGetResponse, error) {\n\tresult := SourceControlConfigurationsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SourceControlConfiguration); err != nil {\n\t\treturn SourceControlConfigurationsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ExpressRoutePortsLocationsClient) getHandleResponse(resp *http.Response) (ExpressRoutePortsLocationsClientGetResponse, error) {\n\tresult := ExpressRoutePortsLocationsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ExpressRoutePortsLocation); err != nil {\n\t\treturn ExpressRoutePortsLocationsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedInstancesClient) getHandleResponse(resp *http.Response) (ManagedInstancesClientGetResponse, error) {\n\tresult := ManagedInstancesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstance); err != nil {\n\t\treturn ManagedInstancesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *FirewallRulesClient) getHandleResponse(resp *azcore.Response) (FirewallRulesGetResponse, error) {\n\tresult := FirewallRulesGetResponse{RawResponse: resp.Response}\n\tif err := resp.UnmarshalAsJSON(&result.FirewallRule); err != nil {\n\t\treturn FirewallRulesGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagementAssociationsClient) getHandleResponse(resp *http.Response) (ManagementAssociationsGetResponse, error) {\n\tresult := ManagementAssociationsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagementAssociation); err != nil {\n\t\treturn ManagementAssociationsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) getProcessModuleHandleResponse(resp *http.Response) (WebAppsGetProcessModuleResponse, error) {\n\tresult := WebAppsGetProcessModuleResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProcessModuleInfo); err != nil {\n\t\treturn WebAppsGetProcessModuleResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) getTriggeredWebJobHistoryHandleResponse(resp *http.Response) (WebAppsGetTriggeredWebJobHistoryResponse, error) {\n\tresult := WebAppsGetTriggeredWebJobHistoryResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TriggeredJobHistory); err != nil {\n\t\treturn WebAppsGetTriggeredWebJobHistoryResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *GuestAgentsClient) getHandleResponse(resp *http.Response) (GuestAgentsClientGetResponse, error) {\n\tresult := GuestAgentsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GuestAgent); err != nil {\n\t\treturn GuestAgentsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RouteTablesClient) getHandleResponse(resp *http.Response) (RouteTablesGetResponse, error) {\n\tresult := RouteTablesGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RouteTable); err != nil {\n\t\treturn RouteTablesGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *MonitoringSettingsClient) getHandleResponse(resp *http.Response) (MonitoringSettingsClientGetResponse, error) {\n\tresult := MonitoringSettingsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MonitoringSettingResource); err != nil {\n\t\treturn MonitoringSettingsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) getWebJobHandleResponse(resp *http.Response) (WebAppsGetWebJobResponse, error) {\n\tresult := WebAppsGetWebJobResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebJob); err != nil {\n\t\treturn WebAppsGetWebJobResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) getTriggeredWebJobSlotHandleResponse(resp *http.Response) (WebAppsGetTriggeredWebJobSlotResponse, error) {\n\tresult := WebAppsGetTriggeredWebJobSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TriggeredWebJob); err != nil {\n\t\treturn WebAppsGetTriggeredWebJobSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) getConfigurationHandleResponse(resp *http.Response) (WebAppsGetConfigurationResponse, error) {\n\tresult := WebAppsGetConfigurationResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SiteConfigResource); err != nil {\n\t\treturn WebAppsGetConfigurationResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualRoutersClient) getHandleResponse(resp *http.Response) (VirtualRoutersClientGetResponse, error) {\n\tresult := VirtualRoutersClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualRouter); err != nil {\n\t\treturn VirtualRoutersClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SQLResourcesClient) getSQLStoredProcedureHandleResponse(resp *http.Response) (SQLResourcesClientGetSQLStoredProcedureResponse, error) {\n\tresult := SQLResourcesClientGetSQLStoredProcedureResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SQLStoredProcedureGetResults); err != nil {\n\t\treturn SQLResourcesClientGetSQLStoredProcedureResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PolicyDefinitionsClient) getAtManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsGetAtManagementGroupResponse, error) {\n\tresult := PolicyDefinitionsGetAtManagementGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil {\n\t\treturn PolicyDefinitionsGetAtManagementGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *BuildServiceClient) getBuildServiceHandleResponse(resp *http.Response) (BuildServiceClientGetBuildServiceResponse, error) {\n\tresult := BuildServiceClientGetBuildServiceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.BuildService); err != nil {\n\t\treturn BuildServiceClientGetBuildServiceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *LongTermRetentionManagedInstanceBackupsClient) getHandleResponse(resp *http.Response) (LongTermRetentionManagedInstanceBackupsGetResponse, error) {\n\tresult := LongTermRetentionManagedInstanceBackupsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceLongTermRetentionBackup); err != nil {\n\t\treturn LongTermRetentionManagedInstanceBackupsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ContainerAppsDiagnosticsClient) getRootHandleResponse(resp *http.Response) (ContainerAppsDiagnosticsClientGetRootResponse, error) {\n\tresult := ContainerAppsDiagnosticsClientGetRootResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ContainerApp); err != nil {\n\t\treturn ContainerAppsDiagnosticsClientGetRootResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationProtectionContainersClient) getHandleResponse(resp *http.Response) (ReplicationProtectionContainersClientGetResponse, error) {\n\tresult := ReplicationProtectionContainersClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProtectionContainer); err != nil {\n\t\treturn ReplicationProtectionContainersClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *Client) getEmbeddingsHandleResponse(resp *http.Response) (GetEmbeddingsResponse, error) {\n\tresult := GetEmbeddingsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Embeddings); err != nil {\n\t\treturn GetEmbeddingsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) getHostNameBindingHandleResponse(resp *http.Response) (WebAppsGetHostNameBindingResponse, error) {\n\tresult := WebAppsGetHostNameBindingResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.HostNameBinding); err != nil {\n\t\treturn WebAppsGetHostNameBindingResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *RegistrationDefinitionsClient) listHandleResponse(resp *http.Response) (RegistrationDefinitionsClientListResponse, error) {\n\tresult := RegistrationDefinitionsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RegistrationDefinitionList); err != nil {\n\t\treturn RegistrationDefinitionsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *BuildServiceClient) getBuildResultHandleResponse(resp *http.Response) (BuildServiceClientGetBuildResultResponse, error) {\n\tresult := BuildServiceClientGetBuildResultResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.BuildResult); err != nil {\n\t\treturn BuildServiceClientGetBuildResultResponse{}, err\n\t}\n\treturn result, nil\n}" ]
[ "0.76707584", "0.621644", "0.617529", "0.6060703", "0.60193557", "0.59729016", "0.5909901", "0.5874821", "0.5868759", "0.5813757", "0.57856905", "0.5783472", "0.57720095", "0.5766566", "0.57617915", "0.5760955", "0.57606834", "0.5749354", "0.57492363", "0.572956", "0.5728529", "0.57213163", "0.5705168", "0.5702858", "0.5676205", "0.56486666", "0.5639825", "0.56319714", "0.5629528", "0.5627667", "0.5620363", "0.56170624", "0.5611811", "0.56114924", "0.5607766", "0.5603515", "0.5591774", "0.5588677", "0.55854315", "0.5584036", "0.5565719", "0.5543942", "0.55434936", "0.5543315", "0.5540855", "0.55389535", "0.55364454", "0.55318147", "0.552672", "0.5523335", "0.5513076", "0.5505308", "0.5503693", "0.5496195", "0.54932356", "0.5492931", "0.5489195", "0.548413", "0.5480101", "0.54537946", "0.5440487", "0.5429066", "0.54271716", "0.54218656", "0.54168737", "0.54119784", "0.5411533", "0.538122", "0.53765464", "0.5376111", "0.53740203", "0.53725266", "0.5372324", "0.53686017", "0.53662", "0.5359172", "0.53589237", "0.5357592", "0.5349699", "0.53490585", "0.5342232", "0.533927", "0.53366274", "0.5333562", "0.5332224", "0.5328307", "0.5326817", "0.53263557", "0.53177565", "0.5305247", "0.53029966", "0.52972555", "0.52954364", "0.52915794", "0.52814573", "0.5277765", "0.52759963", "0.52755433", "0.5273516", "0.52734905" ]
0.8222317
0
listCreateRequest creates the List request.
func (client *PolicyDefinitionsClient) listCreateRequest(ctx context.Context, options *PolicyDefinitionsListOptions) (*policy.Request, error) { urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions" if client.subscriptionID == "" { return nil, errors.New("parameter client.subscriptionID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") if options != nil && options.Top != nil { reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() unencodedParams := []string{req.Raw().URL.RawQuery} if options != nil && options.Filter != nil { unencodedParams = append(unencodedParams, "$filter="+*options.Filter) } req.Raw().URL.RawQuery = strings.Join(unencodedParams, "&") req.Raw().Header.Set("Accept", "application/json") return req, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewListRequest() *todopb.ListRequest {\n\tmessage := &todopb.ListRequest{}\n\treturn message\n}", "func (client *KustoOperationsClient) listCreateRequest(ctx context.Context, options *KustoOperationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Synapse/kustooperations\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagedClustersClient) listCreateRequest(ctx context.Context, options *ManagedClustersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RoleDefinitionsClient) listCreateRequest(ctx context.Context, scope string, options *RoleDefinitionsListOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleDefinitions\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\treqQP.Set(\"api-version\", \"2018-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *OperationsClient) listCreateRequest(ctx context.Context, options *OperationsListOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.RecoveryServices/operations\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CloudServicesClient) listAllCreateRequest(ctx context.Context, options *CloudServicesClientListAllOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/cloudServices\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RegistrationDefinitionsClient) listCreateRequest(ctx context.Context, scope string, options *RegistrationDefinitionsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-01-preview\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CloudServicesClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *CloudServicesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ManagedInstancesClient) listCreateRequest(ctx context.Context, options *ManagedInstancesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Sql/managedInstances\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ChildAvailabilityStatusesClient) listCreateRequest(ctx context.Context, resourceURI string, options *ChildAvailabilityStatusesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/{resourceUri}/providers/Microsoft.ResourceHealth/childAvailabilityStatuses\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceUri}\", resourceURI)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-07-01\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *IPAllocationsClient) listCreateRequest(ctx context.Context, options *IPAllocationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/IpAllocations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualMachinesClient) listCreateRequest(ctx context.Context, options *VirtualMachinesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AvailabilitySetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *AvailabilitySetsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ExpressRoutePortsLocationsClient) listCreateRequest(ctx context.Context, options *ExpressRoutePortsLocationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateList(request *api.Request, title string) *CreateListRequest {\n\trequest.Add(\"title\", title)\n\n\treturn &CreateListRequest{\n\t\trequest: request,\n\t}\n}", "func (client *ClustersClient) listCreateRequest(ctx context.Context, options *ClustersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *FactoriesClient) listCreateRequest(ctx context.Context, options *FactoriesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.DataFactory/factories\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SQLVirtualMachinesClient) listCreateRequest(ctx context.Context, options *SQLVirtualMachinesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ClustersClient) listCreateRequest(ctx context.Context, options *ClustersListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.OperationalInsights/clusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateGetApplicationListRequest() (request *GetApplicationListRequest) {\n\trequest = &GetApplicationListRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"mse\", \"2019-05-31\", \"GetApplicationList\", \"mse\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *FileServicesClient) listCreateRequest(ctx context.Context, resourceGroupName string, accountName string, options *FileServicesListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2019-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewListRequest() *rolespb.ListRequest {\n\tmessage := &rolespb.ListRequest{}\n\treturn message\n}", "func (client *ActivityLogsClient) listCreateRequest(ctx context.Context, filter string, options *ActivityLogsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Insights/eventtypes/management/values\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2015-04-01\")\n\treqQP.Set(\"$filter\", filter)\n\tif options != nil && options.Select != nil {\n\t\treqQP.Set(\"$select\", *options.Select)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ServersClient) listCreateRequest(ctx context.Context, options *ServersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Sql/servers\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) listCreateRequest(ctx context.Context, options *ContainerGroupsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.ContainerInstance/containerGroups\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *MonitorsClient) listCreateRequest(ctx context.Context, options *MonitorsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Datadog/monitors\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AssociationsClient) listAllCreateRequest(ctx context.Context, scope string, options *AssociationsClientListAllOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.CustomProviders/associations\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualApplianceSKUsClient) listCreateRequest(ctx context.Context, options *VirtualApplianceSKUsListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualApplianceSkus\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RouteTablesClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *RouteTablesListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateListCasesRequest() (request *ListCasesRequest) {\n\trequest = &ListCasesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CCC\", \"2020-07-01\", \"ListCases\", \"CCC\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *CertificateOrdersClient) listCreateRequest(ctx context.Context, options *CertificateOrdersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/certificateOrders\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AccountsClient) listCreateRequest(ctx context.Context, options *AccountsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.CognitiveServices/accounts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateListEndpointsRequest() (request *ListEndpointsRequest) {\n\trequest = &ListEndpointsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Location\", \"2015-06-12\", \"ListEndpoints\", \"location\", \"openAPI\")\n\treturn\n}", "func (client *RouteTablesClient) listAllCreateRequest(ctx context.Context, options *RouteTablesListAllOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualApplianceSitesClient) listCreateRequest(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, options *VirtualApplianceSitesListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{networkVirtualApplianceName}\", url.PathEscape(networkVirtualApplianceName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ApplyUpdatesClient) listCreateRequest(ctx context.Context, options *ApplyUpdatesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Maintenance/applyUpdates\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WebAppsClient) listCreateRequest(ctx context.Context, options *WebAppsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Web/sites\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CompliancesClient) listCreateRequest(ctx context.Context, scope string, options *CompliancesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Security/compliances\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RecommendationsClient) listCreateRequest(ctx context.Context, options *RecommendationsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Web/recommendations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Featured != nil {\n\t\treqQP.Set(\"featured\", strconv.FormatBool(*options.Featured))\n\t}\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RecommendationsClient) listCreateRequest(ctx context.Context, options *RecommendationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Web/recommendations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Featured != nil {\n\t\treqQP.Set(\"featured\", strconv.FormatBool(*options.Featured))\n\t}\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualRoutersClient) listCreateRequest(ctx context.Context, options *VirtualRoutersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualRouters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualMachineScaleSetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualMachineScaleSetsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func createList(w io.Writer, r *http.Request) error {\n\tc := appengine.NewContext(r)\n\n\t// Check that the user is logged in.\n\t// - user.Current : http://golang.org/s/users#Current\n\n\t// Decode a list from the request body.\n\tlist := List{}\n\terr := json.NewDecoder(r.Body).Decode(&list)\n\tif err != nil {\n\t\treturn appErrorf(http.StatusBadRequest, \"decode list: %v\", err)\n\t}\n\n\tif list.Name == \"\" {\n\t\treturn appErrorf(http.StatusBadRequest, \"missing list name\")\n\t}\n\n\t// Set the Creator field of list as the email of the current user.\n\n\t// Put the List in the datastore.\n\tkey := datastore.NewIncompleteKey(c, listKind, nil)\n\tkey, err = datastore.Put(c, key, &list)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create list: %v\", err)\n\t}\n\n\t// Update the encoded key and encode the list.\n\tlist.ID = key.Encode()\n\treturn json.NewEncoder(w).Encode(list)\n}", "func (client *CapacitiesClient) listCreateRequest(ctx context.Context, options *CapacitiesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.PowerBIDedicated/capacities\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-01-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateListFileSystemsRequest() (request *ListFileSystemsRequest) {\n\trequest = &ListFileSystemsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"DFS\", \"2018-06-20\", \"ListFileSystems\", \"alidfs\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *ReplicationsClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *ReplicationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ApplicationClient) listOperationsCreateRequest(ctx context.Context, options *ApplicationClientListOperationsOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Solutions/operations\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AgentsClient) listCreateRequest(ctx context.Context, resourceGroupName string, storageMoverName string, options *AgentsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageMover/storageMovers/{storageMoverName}/agents\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif storageMoverName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageMoverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storageMoverName}\", url.PathEscape(storageMoverName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RestorableDatabaseAccountsClient) listCreateRequest(ctx context.Context, options *RestorableDatabaseAccountsListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/restorableDatabaseAccounts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-15\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListResourcePath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"resource\", \"List\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *TaskRunsClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *TaskRunsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateListApplicationLogsRequest() (request *ListApplicationLogsRequest) {\n\trequest = &ListApplicationLogsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"companyreg\", \"2019-05-08\", \"ListApplicationLogs\", \"companyreg\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *ReservationsDetailsClient) listCreateRequest(ctx context.Context, scope string, options *ReservationsDetailsListOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Consumption/reservationDetails\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.StartDate != nil {\n\t\treqQP.Set(\"startDate\", *options.StartDate)\n\t}\n\tif options != nil && options.EndDate != nil {\n\t\treqQP.Set(\"endDate\", *options.EndDate)\n\t}\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.ReservationID != nil {\n\t\treqQP.Set(\"reservationId\", *options.ReservationID)\n\t}\n\tif options != nil && options.ReservationOrderID != nil {\n\t\treqQP.Set(\"reservationOrderId\", *options.ReservationOrderID)\n\t}\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualNetworkLinksClient) listCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, options *VirtualNetworkLinksListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/virtualNetworkLinks\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateListBucketsRequest() (request *ListBucketsRequest) {\n\trequest = &ListBucketsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"vs\", \"2018-12-12\", \"ListBuckets\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateListServerGroupsRequest() (request *ListServerGroupsRequest) {\n\trequest = &ListServerGroupsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Alb\", \"2020-06-16\", \"ListServerGroups\", \"alb\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *VirtualMachineScaleSetsClient) listAllCreateRequest(ctx context.Context, options *VirtualMachineScaleSetsListAllOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachineScaleSets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DiskEncryptionSetsClient) listCreateRequest(ctx context.Context, options *DiskEncryptionSetsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *TagRulesClient) listCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, options *TagRulesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/tagRules\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-27\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualMachineImageTemplatesClient) listCreateRequest(ctx context.Context, options *VirtualMachineImageTemplatesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.VirtualMachineImages/imageTemplates\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualNetworkTapsClient) listAllCreateRequest(ctx context.Context, options *VirtualNetworkTapsListAllOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworkTaps\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *Client) listCreateRequest(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, resourceType string, resourceName string, options *ClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}/providers/Microsoft.Resources/changes\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif resourceProviderNamespace == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceProviderNamespace cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceProviderNamespace}\", url.PathEscape(resourceProviderNamespace))\n\tif resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(resourceType))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-05-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(*options.Top, 10))\n\t}\n\tif options != nil && options.SkipToken != nil {\n\t\treqQP.Set(\"$skipToken\", *options.SkipToken)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *FactoriesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *FactoriesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateUpdateOrderListRequest() (request *UpdateOrderListRequest) {\n\trequest = &UpdateOrderListRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CSB\", \"2017-11-18\", \"UpdateOrderList\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *SapMonitorsClient) listCreateRequest(ctx context.Context, options *SapMonitorsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.HanaOnAzure/sapMonitors\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-02-07-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ChargesClient) listCreateRequest(ctx context.Context, scope string, options *ChargesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Consumption/charges\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\tif options != nil && options.StartDate != nil {\n\t\treqQP.Set(\"startDate\", *options.StartDate)\n\t}\n\tif options != nil && options.EndDate != nil {\n\t\treqQP.Set(\"endDate\", *options.EndDate)\n\t}\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Apply != nil {\n\t\treqQP.Set(\"$apply\", *options.Apply)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateListResourceTagsRequest() (request *ListResourceTagsRequest) {\n\trequest = &ListResourceTagsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"OutboundBot\", \"2019-12-26\", \"ListResourceTags\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateListAlertMessagesRequest() (request *ListAlertMessagesRequest) {\n\trequest = &ListAlertMessagesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"dataworks-public\", \"2020-05-18\", \"ListAlertMessages\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *ManagedClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ManagedClustersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateListExperimentGroupsRequest() (request *ListExperimentGroupsRequest) {\n\trequest = &ListExperimentGroupsRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"PaiRecService\", \"2022-12-13\", \"ListExperimentGroups\", \"/api/v1/experimentgroups\", \"\", \"\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (c *Client) BuildListRequest(ctx context.Context, v any) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListStoragePath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"storage\", \"list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *CustomDomainsClient) listCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, appName string, options *CustomDomainsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif appName == \"\" {\n\t\treturn nil, errors.New(\"parameter appName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{appName}\", url.PathEscape(appName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *IncidentsClient) listCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, options *IncidentsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-05-01-preview\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Orderby != nil {\n\t\treqQP.Set(\"$orderby\", *options.Orderby)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.SkipToken != nil {\n\t\treqQP.Set(\"$skipToken\", *options.SkipToken)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *PublicIPAddressesClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *PublicIPAddressesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListRecipePath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"recipe\", \"list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *SubscriptionClient) listCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *SubscriptionClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/subscriptions\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (temp *Template) ListRequest() (ListCommand, error) {\n\treq := &ListTemplates{\n\t\tName: temp.Name,\n\t\tAccount: temp.Account,\n\t\tDomainID: temp.DomainID,\n\t\tID: temp.ID,\n\t\tProjectID: temp.ProjectID,\n\t\tZoneID: temp.ZoneID,\n\t\tHypervisor: temp.Hypervisor,\n\t\t//TODO Tags\n\t}\n\tif temp.IsFeatured {\n\t\treq.TemplateFilter = \"featured\"\n\t}\n\tif temp.Removed != \"\" {\n\t\t*req.ShowRemoved = true\n\t}\n\n\treturn req, nil\n}", "func (client *PublicIPAddressesClient) listAllCreateRequest(ctx context.Context, options *PublicIPAddressesClientListAllOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateListServiceSourceRequest() (request *ListServiceSourceRequest) {\n\trequest = &ListServiceSourceRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"mse\", \"2019-05-31\", \"ListServiceSource\", \"mse\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *VirtualMachinesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualMachinesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateListManagedPrivateSpacesRequest() (request *ListManagedPrivateSpacesRequest) {\n\trequest = &ListManagedPrivateSpacesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"SmartHosting\", \"2020-08-01\", \"ListManagedPrivateSpaces\", \"smarthosting\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateListDevicesImagesRequest() (request *ListDevicesImagesRequest) {\n\trequest = &ListDevicesImagesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"reid\", \"2019-09-28\", \"ListDevicesImages\", \"1.1.8.2\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *ReplicationvCentersClient) listCreateRequest(ctx context.Context, options *ReplicationvCentersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationvCenters\"\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif client.resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(client.resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DefenderSettingsClient) listCreateRequest(ctx context.Context, options *DefenderSettingsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.IoTSecurity/defenderSettings\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateListEndpointsRequest() (request *ListEndpointsRequest) {\n\trequest = &ListEndpointsRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"PAIElasticDatasetAccelerator\", \"2022-08-01\", \"ListEndpoints\", \"/api/v1/endpoints\", \"datasetacc\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (client *ReplicationsClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *ReplicationsListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AlertsClient) listForScopeCreateRequest(ctx context.Context, scope string, options *AlertsClientListForScopeOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleManagementAlerts\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SQLVirtualMachinesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *SQLVirtualMachinesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VideosClient) listCreateRequest(ctx context.Context, resourceGroupName string, accountName string, options *VideosClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/videos\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01-preview\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WorkspacesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *WorkspacesListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DevicesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DevicesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridNetwork/devices\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *IPAllocationsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *IPAllocationsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ManagedInstancesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ManagedInstancesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CassandraClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *CassandraClustersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListLogPath()}\n\treq, err := http.NewRequest(\"POST\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"log\", \"list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *AccountsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *AccountsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ConnectedEnvironmentsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ConnectedEnvironmentsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/connectedEnvironments\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListRecorderPath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"recorder\", \"list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func NewServiceInfoRequest_List(s *capnp.Segment, sz int32) (ServiceInfoRequest_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn ServiceInfoRequest_List{l}, err\n}", "func (client *WebhooksClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *WebhooksClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}" ]
[ "0.78801435", "0.75768954", "0.75035113", "0.74889725", "0.7465826", "0.74646765", "0.7455195", "0.743538", "0.74343836", "0.7423846", "0.73989666", "0.7356956", "0.73457086", "0.73375124", "0.73339075", "0.7325205", "0.73243415", "0.7319237", "0.7268763", "0.7210874", "0.7205136", "0.7180776", "0.71643037", "0.7158634", "0.71564543", "0.71554345", "0.7147391", "0.71456146", "0.7136202", "0.7119943", "0.7112119", "0.71101385", "0.7089599", "0.7030039", "0.702362", "0.7018698", "0.7011222", "0.7002993", "0.6996092", "0.698899", "0.69870347", "0.69757056", "0.6965319", "0.6949791", "0.6948139", "0.6929731", "0.692754", "0.69158065", "0.6912015", "0.69031286", "0.69030076", "0.68820304", "0.68646914", "0.6863448", "0.6861379", "0.6859636", "0.68561226", "0.6845171", "0.6838913", "0.6837434", "0.68262935", "0.68233174", "0.68205935", "0.6816438", "0.68150043", "0.68107414", "0.6792686", "0.6781865", "0.67802685", "0.6771401", "0.6771039", "0.6769458", "0.6768558", "0.6764701", "0.67533904", "0.67459613", "0.674128", "0.67409647", "0.67382056", "0.67378134", "0.673277", "0.6732179", "0.67305964", "0.6707539", "0.6696761", "0.6696369", "0.6695703", "0.6694347", "0.66942436", "0.66910523", "0.6690709", "0.6681842", "0.6681126", "0.6676278", "0.6670059", "0.66681385", "0.66552925", "0.6647682", "0.6633289", "0.6624306" ]
0.7192149
21
listHandleResponse handles the List response.
func (client *PolicyDefinitionsClient) listHandleResponse(resp *http.Response) (PolicyDefinitionsListResponse, error) { result := PolicyDefinitionsListResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinitionListResult); err != nil { return PolicyDefinitionsListResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *LiveOutputsClient) listHandleResponse(resp *http.Response) (LiveOutputsClientListResponse, error) {\n\tresult := LiveOutputsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LiveOutputListResult); err != nil {\n\t\treturn LiveOutputsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationsClient) listHandleResponse(resp *azcore.Response) (ReplicationListResultResponse, error) {\n\tvar val *ReplicationListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn ReplicationListResultResponse{}, err\n\t}\n\treturn ReplicationListResultResponse{RawResponse: resp.Response, ReplicationListResult: val}, nil\n}", "func (client *ServersClient) listHandleResponse(resp *http.Response) (ServersClientListResponse, error) {\n\tresult := ServersClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerListResult); err != nil {\n\t\treturn ServersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *Client) listHandleResponse(resp *http.Response) (ClientListResponse, error) {\n\tresult := ClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ChangeResourceListResult); err != nil {\n\t\treturn ClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *OperationsClient) listHandleResponse(resp *http.Response) (OperationsListResponse, error) {\n\tresult := OperationsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ClientDiscoveryResponse); err != nil {\n\t\treturn OperationsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *AnalysisResultsClient) listHandleResponse(resp *http.Response) (AnalysisResultsClientListResponse, error) {\n\tresult := AnalysisResultsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AnalysisResultListResult); err != nil {\n\t\treturn AnalysisResultsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AccountsClient) listHandleResponse(resp *http.Response) (AccountsClientListResponse, error) {\n\tresult := AccountsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AccountListResult); err != nil {\n\t\treturn AccountsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RecordSetsClient) listHandleResponse(resp *azcore.Response) (RecordSetListResultResponse, error) {\n\tvar val *RecordSetListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn RecordSetListResultResponse{}, err\n\t}\n\treturn RecordSetListResultResponse{RawResponse: resp.Response, RecordSetListResult: val}, nil\n}", "func (client *ManagedInstancesClient) listHandleResponse(resp *http.Response) (ManagedInstancesClientListResponse, error) {\n\tresult := ManagedInstancesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceListResult); err != nil {\n\t\treturn ManagedInstancesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VideosClient) listHandleResponse(resp *http.Response) (VideosClientListResponse, error) {\n\tresult := VideosClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VideoEntityCollection); err != nil {\n\t\treturn VideosClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listHandleResponse(resp *http.Response) (WebAppsListResponse, error) {\n\tresult := WebAppsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebAppCollection); err != nil {\n\t\treturn WebAppsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SQLVirtualMachinesClient) listHandleResponse(resp *http.Response) (SQLVirtualMachinesClientListResponse, error) {\n\tresult := SQLVirtualMachinesClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListResult); err != nil {\n\t\treturn SQLVirtualMachinesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *FileServicesClient) listHandleResponse(resp *azcore.Response) (FileServiceItemsResponse, error) {\n\tvar val *FileServiceItems\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn FileServiceItemsResponse{}, err\n\t}\n\treturn FileServiceItemsResponse{RawResponse: resp.Response, FileServiceItems: val}, nil\n}", "func (client *VirtualApplianceSKUsClient) listHandleResponse(resp *azcore.Response) (NetworkVirtualApplianceSKUListResultResponse, error) {\n\tvar val *NetworkVirtualApplianceSKUListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn NetworkVirtualApplianceSKUListResultResponse{}, err\n\t}\n\treturn NetworkVirtualApplianceSKUListResultResponse{RawResponse: resp.Response, NetworkVirtualApplianceSKUListResult: val}, nil\n}", "func (client *RecommendationsClient) listHandleResponse(resp *http.Response) (RecommendationsListResponse, error) {\n\tresult := RecommendationsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RecommendationCollection); err != nil {\n\t\treturn RecommendationsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *KustoOperationsClient) listHandleResponse(resp *http.Response) (KustoOperationsClientListResponse, error) {\n\tresult := KustoOperationsClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.OperationListResult); err != nil {\n\t\treturn KustoOperationsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ClustersClient) listHandleResponse(resp *http.Response) (ClustersListResponse, error) {\n\tresult := ClustersListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ClusterListResult); err != nil {\n\t\treturn ClustersListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebhooksClient) listHandleResponse(resp *http.Response) (WebhooksClientListResponse, error) {\n\tresult := WebhooksClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebhookListResult); err != nil {\n\t\treturn WebhooksClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ClustersClient) listHandleResponse(resp *http.Response) (ClustersClientListResponse, error) {\n\tresult := ClustersClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ClusterListResult); err != nil {\n\t\treturn ClustersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CloudServicesClient) listHandleResponse(resp *http.Response) (CloudServicesClientListResponse, error) {\n\tresult := CloudServicesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CloudServiceListResult); err != nil {\n\t\treturn CloudServicesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RecommendationsClient) listHandleResponse(resp *http.Response) (RecommendationsClientListResponse, error) {\n\tresult := RecommendationsClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RecommendationCollection); err != nil {\n\t\treturn RecommendationsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ContainerGroupsClient) listHandleResponse(resp *http.Response) (ContainerGroupsClientListResponse, error) {\n\tresult := ContainerGroupsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ContainerGroupListResult); err != nil {\n\t\treturn ContainerGroupsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CustomDomainsClient) listHandleResponse(resp *http.Response) (CustomDomainsListResponse, error) {\n\tresult := CustomDomainsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomDomainResourceCollection); err != nil {\n\t\treturn CustomDomainsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *RouteTablesClient) listHandleResponse(resp *http.Response) (RouteTablesListResponse, error) {\n\tresult := RouteTablesListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RouteTableListResult); err != nil {\n\t\treturn RouteTablesListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *RoleDefinitionsClient) listHandleResponse(resp *http.Response) (RoleDefinitionsListResponse, error) {\n\tresult := RoleDefinitionsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RoleDefinitionListResult); err != nil {\n\t\treturn RoleDefinitionsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ManagedClustersClient) listHandleResponse(resp *http.Response) (ManagedClustersClientListResponse, error) {\n\tresult := ManagedClustersClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedClusterListResult); err != nil {\n\t\treturn ManagedClustersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualApplianceSitesClient) listHandleResponse(resp *azcore.Response) (NetworkVirtualApplianceSiteListResultResponse, error) {\n\tvar val *NetworkVirtualApplianceSiteListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn NetworkVirtualApplianceSiteListResultResponse{}, err\n\t}\n\treturn NetworkVirtualApplianceSiteListResultResponse{RawResponse: resp.Response, NetworkVirtualApplianceSiteListResult: val}, nil\n}", "func (client *ScriptExecutionsClient) listHandleResponse(resp *http.Response) (ScriptExecutionsClientListResponse, error) {\n\tresult := ScriptExecutionsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ScriptExecutionsList); err != nil {\n\t\treturn ScriptExecutionsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MonitorsClient) listHandleResponse(resp *http.Response) (MonitorsClientListResponse, error) {\n\tresult := MonitorsClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MonitorResourceListResponse); err != nil {\n\t\treturn MonitorsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IncidentsClient) listHandleResponse(resp *http.Response) (IncidentsClientListResponse, error) {\n\tresult := IncidentsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IncidentList); err != nil {\n\t\treturn IncidentsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualNetworkLinksClient) listHandleResponse(resp *http.Response) (VirtualNetworkLinksListResponse, error) {\n\tresult := VirtualNetworkLinksListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualNetworkLinkListResult); err != nil {\n\t\treturn VirtualNetworkLinksListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ActivityLogsClient) listHandleResponse(resp *http.Response) (ActivityLogsClientListResponse, error) {\n\tresult := ActivityLogsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.EventDataCollection); err != nil {\n\t\treturn ActivityLogsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationvCentersClient) listHandleResponse(resp *http.Response) (ReplicationvCentersClientListResponse, error) {\n\tresult := ReplicationvCentersClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VCenterCollection); err != nil {\n\t\treturn ReplicationvCentersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ExpressRoutePortsLocationsClient) listHandleResponse(resp *http.Response) (ExpressRoutePortsLocationsClientListResponse, error) {\n\tresult := ExpressRoutePortsLocationsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ExpressRoutePortsLocationListResult); err != nil {\n\t\treturn ExpressRoutePortsLocationsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ChildAvailabilityStatusesClient) listHandleResponse(resp *http.Response) (ChildAvailabilityStatusesClientListResponse, error) {\n\tresult := ChildAvailabilityStatusesClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilityStatusListResult); err != nil {\n\t\treturn ChildAvailabilityStatusesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *TagRulesClient) listHandleResponse(resp *http.Response) (TagRulesClientListResponse, error) {\n\tresult := TagRulesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TagRuleListResult); err != nil {\n\t\treturn TagRulesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationsClient) listHandleResponse(resp *http.Response) (ReplicationsClientListResponse, error) {\n\tresult := ReplicationsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ReplicationListResult); err != nil {\n\t\treturn ReplicationsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *FactoriesClient) listHandleResponse(resp *http.Response) (FactoriesClientListResponse, error) {\n\tresult := FactoriesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.FactoryListResponse); err != nil {\n\t\treturn FactoriesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RegistrationDefinitionsClient) listHandleResponse(resp *http.Response) (RegistrationDefinitionsClientListResponse, error) {\n\tresult := RegistrationDefinitionsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RegistrationDefinitionList); err != nil {\n\t\treturn RegistrationDefinitionsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SubscriptionClient) listHandleResponse(resp *http.Response) (SubscriptionClientListResponse, error) {\n\tresult := SubscriptionClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SubscriptionCollection); err != nil {\n\t\treturn SubscriptionClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *TaskRunsClient) listHandleResponse(resp *http.Response) (TaskRunsClientListResponse, error) {\n\tresult := TaskRunsClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TaskRunListResult); err != nil {\n\t\treturn TaskRunsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AgentsClient) listHandleResponse(resp *http.Response) (AgentsClientListResponse, error) {\n\tresult := AgentsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AgentList); err != nil {\n\t\treturn AgentsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReservationsDetailsClient) listHandleResponse(resp *http.Response) (ReservationsDetailsListResponse, error) {\n\tresult := ReservationsDetailsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ReservationDetailsListResult); err != nil {\n\t\treturn ReservationsDetailsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualRoutersClient) listHandleResponse(resp *http.Response) (VirtualRoutersClientListResponse, error) {\n\tresult := VirtualRoutersClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualRouterListResult); err != nil {\n\t\treturn VirtualRoutersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationProtectionContainersClient) listHandleResponse(resp *http.Response) (ReplicationProtectionContainersClientListResponse, error) {\n\tresult := ReplicationProtectionContainersClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProtectionContainerCollection); err != nil {\n\t\treturn ReplicationProtectionContainersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AvailabilitySetsClient) listHandleResponse(resp *http.Response) (AvailabilitySetsListResponse, error) {\n\tresult := AvailabilitySetsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilitySetListResult); err != nil {\n\t\treturn AvailabilitySetsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *FirewallRulesClient) listHandleResponse(resp *http.Response) (FirewallRulesClientListResponse, error) {\n\tresult := FirewallRulesClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.FirewallRuleListResult); err != nil {\n\t\treturn FirewallRulesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CloudServiceRoleInstancesClient) listHandleResponse(resp *azcore.Response) (RoleInstanceListResultResponse, error) {\n\tvar val *RoleInstanceListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn RoleInstanceListResultResponse{}, err\n\t}\n\treturn RoleInstanceListResultResponse{RawResponse: resp.Response, RoleInstanceListResult: val}, nil\n}", "func (client *WebhooksClient) listEventsHandleResponse(resp *http.Response) (WebhooksClientListEventsResponse, error) {\n\tresult := WebhooksClientListEventsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.EventListResult); err != nil {\n\t\treturn WebhooksClientListEventsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachinesClient) listHandleResponse(resp *http.Response) (VirtualMachinesClientListResponse, error) {\n\tresult := VirtualMachinesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachinesList); err != nil {\n\t\treturn VirtualMachinesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IPAllocationsClient) listHandleResponse(resp *http.Response) (IPAllocationsClientListResponse, error) {\n\tresult := IPAllocationsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IPAllocationListResult); err != nil {\n\t\treturn IPAllocationsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VendorNetworkFunctionsClient) listHandleResponse(resp *http.Response) (VendorNetworkFunctionsClientListResponse, error) {\n\tresult := VendorNetworkFunctionsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VendorNetworkFunctionListResult); err != nil {\n\t\treturn VendorNetworkFunctionsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ApplyUpdatesClient) listHandleResponse(resp *http.Response) (ApplyUpdatesClientListResponse, error) {\n\tresult := ApplyUpdatesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListApplyUpdate); err != nil {\n\t\treturn ApplyUpdatesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *APIClient) listByTagsHandleResponse(resp *http.Response) (APIClientListByTagsResponse, error) {\n\tresult := APIClientListByTagsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TagResourceCollection); err != nil {\n\t\treturn APIClientListByTagsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MachineExtensionsClient) listHandleResponse(resp *http.Response) (MachineExtensionsClientListResponse, error) {\n\tresult := MachineExtensionsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MachineExtensionsListResult); err != nil {\n\t\treturn MachineExtensionsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *LocalRulestacksClient) listAppIDsHandleResponse(resp *http.Response) (LocalRulestacksClientListAppIDsResponse, error) {\n\tresult := LocalRulestacksClientListAppIDsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListAppIDResponse); err != nil {\n\t\treturn LocalRulestacksClientListAppIDsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ChargesClient) listHandleResponse(resp *http.Response) (ChargesClientListResponse, error) {\n\tresult := ChargesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ChargesListResult); err != nil {\n\t\treturn ChargesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CertificateOrdersClient) listHandleResponse(resp *http.Response) (CertificateOrdersClientListResponse, error) {\n\tresult := CertificateOrdersClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CertificateOrderCollection); err != nil {\n\t\treturn CertificateOrdersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineImageTemplatesClient) listHandleResponse(resp *http.Response) (VirtualMachineImageTemplatesClientListResponse, error) {\n\tresult := VirtualMachineImageTemplatesClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ImageTemplateListResult); err != nil {\n\t\treturn VirtualMachineImageTemplatesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CloudServicesClient) listAllHandleResponse(resp *http.Response) (CloudServicesClientListAllResponse, error) {\n\tresult := CloudServicesClientListAllResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CloudServiceListResult); err != nil {\n\t\treturn CloudServicesClientListAllResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ApplicationTypeVersionsClient) listHandleResponse(resp *http.Response) (ApplicationTypeVersionsClientListResponse, error) {\n\tresult := ApplicationTypeVersionsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ApplicationTypeVersionResourceList); err != nil {\n\t\treturn ApplicationTypeVersionsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *APIClient) listByServiceHandleResponse(resp *http.Response) (APIClientListByServiceResponse, error) {\n\tresult := APIClientListByServiceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.APICollection); err != nil {\n\t\treturn APIClientListByServiceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualNetworkTapsClient) listAllHandleResponse(resp *azcore.Response) (VirtualNetworkTapListResultResponse, error) {\n\tvar val *VirtualNetworkTapListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn VirtualNetworkTapListResultResponse{}, err\n\t}\n\treturn VirtualNetworkTapListResultResponse{RawResponse: resp.Response, VirtualNetworkTapListResult: val}, nil\n}", "func (client *RouteTablesClient) listAllHandleResponse(resp *http.Response) (RouteTablesListAllResponse, error) {\n\tresult := RouteTablesListAllResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RouteTableListResult); err != nil {\n\t\treturn RouteTablesListAllResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client Client) ListResponder(resp *http.Response) (result ListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *GroupClient) listByServiceHandleResponse(resp *http.Response) (GroupListByServiceResponse, error) {\n\tresult := GroupListByServiceResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GroupCollection); err != nil {\n\t\treturn GroupListByServiceResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *PublicIPAddressesClient) listHandleResponse(resp *http.Response) (PublicIPAddressesClientListResponse, error) {\n\tresult := PublicIPAddressesClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PublicIPAddressListResult); err != nil {\n\t\treturn PublicIPAddressesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ApplicationClient) listOperationsHandleResponse(resp *http.Response) (ApplicationClientListOperationsResponse, error) {\n\tresult := ApplicationClientListOperationsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.OperationListResult); err != nil {\n\t\treturn ApplicationClientListOperationsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SapMonitorsClient) listHandleResponse(resp *http.Response) (SapMonitorsClientListResponse, error) {\n\tresult := SapMonitorsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SapMonitorListResult); err != nil {\n\t\treturn SapMonitorsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedDatabasesClient) listByInstanceHandleResponse(resp *http.Response) (ManagedDatabasesClientListByInstanceResponse, error) {\n\tresult := ManagedDatabasesClientListByInstanceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedDatabaseListResult); err != nil {\n\t\treturn ManagedDatabasesClientListByInstanceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DefenderSettingsClient) listHandleResponse(resp *http.Response) (DefenderSettingsClientListResponse, error) {\n\tresult := DefenderSettingsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DefenderSettingsList); err != nil {\n\t\treturn DefenderSettingsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CapacitiesClient) listHandleResponse(resp *http.Response) (CapacitiesClientListResponse, error) {\n\tresult := CapacitiesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DedicatedCapacities); err != nil {\n\t\treturn CapacitiesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AssociationsClient) listAllHandleResponse(resp *http.Response) (AssociationsClientListAllResponse, error) {\n\tresult := AssociationsClientListAllResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AssociationsList); err != nil {\n\t\treturn AssociationsClientListAllResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CompliancesClient) listHandleResponse(resp *http.Response) (CompliancesClientListResponse, error) {\n\tresult := CompliancesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ComplianceList); err != nil {\n\t\treturn CompliancesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineScaleSetVMRunCommandsClient) listHandleResponse(resp *http.Response) (VirtualMachineScaleSetVMRunCommandsListResponse, error) {\n\tresult := VirtualMachineScaleSetVMRunCommandsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineRunCommandsListResult); err != nil {\n\t\treturn VirtualMachineScaleSetVMRunCommandsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *PrivateDNSZoneGroupsClient) listHandleResponse(resp *http.Response) (PrivateDNSZoneGroupsClientListResponse, error) {\n\tresult := PrivateDNSZoneGroupsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PrivateDNSZoneGroupListResult); err != nil {\n\t\treturn PrivateDNSZoneGroupsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SyncGroupsClient) listByDatabaseHandleResponse(resp *http.Response) (SyncGroupsClientListByDatabaseResponse, error) {\n\tresult := SyncGroupsClientListByDatabaseResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SyncGroupListResult); err != nil {\n\t\treturn SyncGroupsClientListByDatabaseResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SyncGroupsClient) listLogsHandleResponse(resp *http.Response) (SyncGroupsClientListLogsResponse, error) {\n\tresult := SyncGroupsClientListLogsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SyncGroupLogListResult); err != nil {\n\t\treturn SyncGroupsClientListLogsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RestorableDatabaseAccountsClient) listHandleResponse(resp *azcore.Response) (RestorableDatabaseAccountsListResponse, error) {\n\tresult := RestorableDatabaseAccountsListResponse{RawResponse: resp.Response}\n\tif err := resp.UnmarshalAsJSON(&result.RestorableDatabaseAccountsListResult); err != nil {\n\t\treturn RestorableDatabaseAccountsListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ServersClient) listKeysHandleResponse(resp *http.Response) (ServersClientListKeysResponse, error) {\n\tresult := ServersClientListKeysResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerKeys); err != nil {\n\t\treturn ServersClientListKeysResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AccountsClient) listKeysHandleResponse(resp *http.Response) (AccountsClientListKeysResponse, error) {\n\tresult := AccountsClientListKeysResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.APIKeys); err != nil {\n\t\treturn AccountsClientListKeysResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DiskEncryptionSetsClient) listHandleResponse(resp *http.Response) (DiskEncryptionSetsListResponse, error) {\n\tresult := DiskEncryptionSetsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DiskEncryptionSetList); err != nil {\n\t\treturn DiskEncryptionSetsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (s *PhotoStreamServer) ListHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"received list request\")\n\n\timageDir := s.RootDir + \"/assets/images\"\n\tfiles, err := ioutil.ReadDir(imageDir)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\timageListResponse := &ImageListResponse{Images: make([]*ImageFileInfo, len(files))}\n\tfor i, f := range files {\n\t\tinfo := &ImageFileInfo{\n\t\t\tFilename: f.Name(),\n\t\t\tCreatedAt: fmt.Sprintf(\"%v\", f.ModTime()),\n\t\t}\n\t\timageListResponse.Images[i] = info\n\t}\n\n\tres, err := json.Marshal(imageListResponse)\n\tif err != nil {\n\t\tfmt.Println(\"problem marshalling json list response:\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.Write(res)\n}", "func responseList (w http.ResponseWriter, response *model.Response, invoices *model.InvoicesResponse) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tvar err error\n\tif response.Code < 0{\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\terr = json.NewEncoder(w).Encode(&response)\n\t}else{\n\t\tw.WriteHeader(http.StatusOK)\n\t\terr = json.NewEncoder(w).Encode(&invoices)\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (client *OutputsClient) listByStreamingJobHandleResponse(resp *http.Response) (OutputsClientListByStreamingJobResponse, error) {\n\tresult := OutputsClientListByStreamingJobResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.OutputListResult); err != nil {\n\t\treturn OutputsClientListByStreamingJobResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *FirewallRulesClient) listByServerHandleResponse(resp *azcore.Response) (FirewallRulesListByServerResponse, error) {\n\tresult := FirewallRulesListByServerResponse{RawResponse: resp.Response}\n\tif err := resp.UnmarshalAsJSON(&result.FirewallRuleListResult); err != nil {\n\t\treturn FirewallRulesListByServerResponse{}, err\n\t}\n\treturn result, nil\n}", "func (rm *RequestManager) ListHandler(w http.ResponseWriter, r *http.Request) {\n\tperson := &rm.Person;\n\tif r.Method != \"GET\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\n\tlogger.log(\"GET /list \" + r.URL.Path)\n\n\tdata, err := person.GetList(w)\n\tif(err == nil) {\n\t\tjson.NewEncoder(w).Encode(&data)\n\t}\n}", "func handleList(w http.ResponseWriter, r *http.Request) {\n\tappid := requestheader.GetAppID(r)\n\n\tlist, errCode, err := GetSwitches(appid)\n\tif errCode != ApiError.SUCCESS {\n\t\tutil.WriteJSON(w, util.GenRetObj(errCode, err))\n\t} else {\n\t\tutil.WriteJSON(w, util.GenRetObj(errCode, list))\n\t}\n}", "func (client *HCRPAssignmentsClient) listHandleResponse(resp *http.Response) (HCRPAssignmentsClientListResponse, error) {\n\tresult := HCRPAssignmentsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AssignmentList); err != nil {\n\t\treturn HCRPAssignmentsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedInstancesClient) listByInstancePoolHandleResponse(resp *http.Response) (ManagedInstancesClientListByInstancePoolResponse, error) {\n\tresult := ManagedInstancesClientListByInstancePoolResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceListResult); err != nil {\n\t\treturn ManagedInstancesClientListByInstancePoolResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AccountsClient) listModelsHandleResponse(resp *http.Response) (AccountsClientListModelsResponse, error) {\n\tresult := AccountsClientListModelsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AccountModelListResult); err != nil {\n\t\treturn AccountsClientListModelsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PermissionBindingsClient) listByNamespaceHandleResponse(resp *http.Response) (PermissionBindingsClientListByNamespaceResponse, error) {\n\tresult := PermissionBindingsClientListByNamespaceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PermissionBindingsListResult); err != nil {\n\t\treturn PermissionBindingsClientListByNamespaceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *TablesClient) listByWorkspaceHandleResponse(resp *http.Response) (TablesClientListByWorkspaceResponse, error) {\n\tresult := TablesClientListByWorkspaceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TablesListResult); err != nil {\n\t\treturn TablesClientListByWorkspaceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listMetadataHandleResponse(resp *http.Response) (WebAppsListMetadataResponse, error) {\n\tresult := WebAppsListMetadataResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.StringDictionary); err != nil {\n\t\treturn WebAppsListMetadataResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) listByStreamingJobHandleResponse(resp *http.Response) (OutputsListByStreamingJobResponse, error) {\n\tresult := OutputsListByStreamingJobResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.OutputListResult); err != nil {\n\t\treturn OutputsListByStreamingJobResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SourceControlConfigurationsClient) listHandleResponse(resp *http.Response) (SourceControlConfigurationsClientListResponse, error) {\n\tresult := SourceControlConfigurationsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SourceControlConfigurationList); err != nil {\n\t\treturn SourceControlConfigurationsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SubscriptionClient) listSecretsHandleResponse(resp *http.Response) (SubscriptionClientListSecretsResponse, error) {\n\tresult := SubscriptionClientListSecretsResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SubscriptionKeysContract); err != nil {\n\t\treturn SubscriptionClientListSecretsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ActionsClient) listByAlertRuleHandleResponse(resp *http.Response) (ActionsClientListByAlertRuleResponse, error) {\n\tresult := ActionsClientListByAlertRuleResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ActionsList); err != nil {\n\t\treturn ActionsClientListByAlertRuleResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AlertsClient) listForScopeHandleResponse(resp *http.Response) (AlertsClientListForScopeResponse, error) {\n\tresult := AlertsClientListForScopeResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AlertListResult); err != nil {\n\t\treturn AlertsClientListForScopeResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DicomServicesClient) listByWorkspaceHandleResponse(resp *http.Response) (DicomServicesClientListByWorkspaceResponse, error) {\n\tresult := DicomServicesClientListByWorkspaceResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DicomServiceCollection); err != nil {\n\t\treturn DicomServicesClientListByWorkspaceResponse{}, err\n\t}\n\treturn result, nil\n}" ]
[ "0.79782814", "0.7962708", "0.7869895", "0.7809686", "0.78079766", "0.78077203", "0.77588916", "0.77508783", "0.77483225", "0.7747638", "0.77462274", "0.77179635", "0.7717475", "0.7716036", "0.7704544", "0.76879525", "0.7673159", "0.7667837", "0.76593477", "0.76441395", "0.7637867", "0.76247036", "0.7614247", "0.76020986", "0.76015484", "0.7599422", "0.7594455", "0.7578927", "0.7576379", "0.75758564", "0.7570361", "0.75650907", "0.7547433", "0.75433415", "0.75336164", "0.75252855", "0.7524681", "0.7521814", "0.7500349", "0.7499498", "0.74799377", "0.7477431", "0.74701613", "0.7461991", "0.7461769", "0.7460206", "0.74398", "0.7423116", "0.74122834", "0.7392066", "0.7385218", "0.7374215", "0.73656446", "0.73442644", "0.7321537", "0.7321217", "0.7309049", "0.73084944", "0.7294372", "0.72838706", "0.72752506", "0.7274869", "0.72488296", "0.7238843", "0.7215353", "0.7207295", "0.7198499", "0.71969986", "0.71967465", "0.7166902", "0.71581686", "0.7141322", "0.7137527", "0.7128704", "0.7127052", "0.71218365", "0.7117778", "0.71119505", "0.7098396", "0.7076281", "0.70757526", "0.7068196", "0.70548785", "0.7044633", "0.70407975", "0.70363784", "0.70283645", "0.7014192", "0.70079285", "0.7006392", "0.7001563", "0.6999787", "0.6998262", "0.6993137", "0.69862133", "0.69847745", "0.6984745", "0.694123", "0.6934184", "0.69211674" ]
0.7666352
18
listBuiltInCreateRequest creates the ListBuiltIn request.
func (client *PolicyDefinitionsClient) listBuiltInCreateRequest(ctx context.Context, options *PolicyDefinitionsListBuiltInOptions) (*policy.Request, error) { urlPath := "/providers/Microsoft.Authorization/policyDefinitions" req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") if options != nil && options.Top != nil { reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() unencodedParams := []string{req.Raw().URL.RawQuery} if options != nil && options.Filter != nil { unencodedParams = append(unencodedParams, "$filter="+*options.Filter) } req.Raw().URL.RawQuery = strings.Join(unencodedParams, "&") req.Raw().Header.Set("Accept", "application/json") return req, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *SapMonitorsClient) listCreateRequest(ctx context.Context, options *SapMonitorsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.HanaOnAzure/sapMonitors\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-02-07-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *PolicyDefinitionsClient) getBuiltInCreateRequest(ctx context.Context, policyDefinitionName string, options *PolicyDefinitionsGetBuiltInOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *BuildServiceClient) listBuildsCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, options *BuildServiceClientListBuildsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builds\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetApplicationListRequest() (request *GetApplicationListRequest) {\n\trequest = &GetApplicationListRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"mse\", \"2019-05-31\", \"GetApplicationList\", \"mse\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *RoleDefinitionsClient) listCreateRequest(ctx context.Context, scope string, options *RoleDefinitionsListOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleDefinitions\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\treqQP.Set(\"api-version\", \"2018-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *MonitorsClient) listCreateRequest(ctx context.Context, options *MonitorsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Datadog/monitors\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListWarehousePath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"Warehouse\", \"List\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *ConnectedEnvironmentsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ConnectedEnvironmentsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/connectedEnvironments\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RegistrationDefinitionsClient) listCreateRequest(ctx context.Context, scope string, options *RegistrationDefinitionsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-01-preview\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WebAppsClient) listCreateRequest(ctx context.Context, options *WebAppsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Web/sites\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AvailabilitySetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *AvailabilitySetsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ChildAvailabilityStatusesClient) listCreateRequest(ctx context.Context, resourceURI string, options *ChildAvailabilityStatusesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/{resourceUri}/providers/Microsoft.ResourceHealth/childAvailabilityStatuses\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceUri}\", resourceURI)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-07-01\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *IPAllocationsClient) listCreateRequest(ctx context.Context, options *IPAllocationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/IpAllocations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SQLVirtualMachinesClient) listCreateRequest(ctx context.Context, options *SQLVirtualMachinesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *KustoOperationsClient) listCreateRequest(ctx context.Context, options *KustoOperationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Synapse/kustooperations\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *BuildServiceClient) listSupportedBuildpacksCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, options *BuildServiceClientListSupportedBuildpacksOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/supportedBuildpacks\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListResourcePath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"resource\", \"List\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *WebhooksClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *WebhooksClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualMachineScaleSetsClient) listAllCreateRequest(ctx context.Context, options *VirtualMachineScaleSetsListAllOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachineScaleSets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualApplianceSKUsClient) listCreateRequest(ctx context.Context, options *VirtualApplianceSKUsListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualApplianceSkus\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualMachinesClient) listCreateRequest(ctx context.Context, options *VirtualMachinesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *BuildServiceClient) listBuildServicesCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *BuildServiceClientListBuildServicesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) listAppIDsCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientListAppIDsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/listAppIds\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif localRulestackName == \"\" {\n\t\treturn nil, errors.New(\"parameter localRulestackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{localRulestackName}\", url.PathEscape(localRulestackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\tif options != nil && options.AppIDVersion != nil {\n\t\treqQP.Set(\"appIdVersion\", *options.AppIDVersion)\n\t}\n\tif options != nil && options.AppPrefix != nil {\n\t\treqQP.Set(\"appPrefix\", *options.AppPrefix)\n\t}\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"skip\", *options.Skip)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *FactoriesClient) listCreateRequest(ctx context.Context, options *FactoriesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.DataFactory/factories\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func NewListRequest() *rolespb.ListRequest {\n\tmessage := &rolespb.ListRequest{}\n\treturn message\n}", "func (c *Client) BuildListRequest(ctx context.Context, v any) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListStoragePath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"storage\", \"list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *ManagedClustersClient) listCreateRequest(ctx context.Context, options *ManagedClustersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ExpressRoutePortsLocationsClient) listCreateRequest(ctx context.Context, options *ExpressRoutePortsLocationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *PolicyDefinitionsClient) listCreateRequest(ctx context.Context, options *PolicyDefinitionsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tunencodedParams := []string{req.Raw().URL.RawQuery}\n\tif options != nil && options.Filter != nil {\n\t\tunencodedParams = append(unencodedParams, \"$filter=\"+*options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = strings.Join(unencodedParams, \"&\")\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *LocalRulestacksClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ClustersClient) listCreateRequest(ctx context.Context, options *ClustersListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.OperationalInsights/clusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewListRequest() *todopb.ListRequest {\n\tmessage := &todopb.ListRequest{}\n\treturn message\n}", "func (client *CloudServicesClient) listAllCreateRequest(ctx context.Context, options *CloudServicesClientListAllOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/cloudServices\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SQLResourcesClient) listSQLRoleDefinitionsCreateRequest(ctx context.Context, resourceGroupName string, accountName string, options *SQLResourcesClientListSQLRoleDefinitionsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleDefinitions\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualApplianceSitesClient) listCreateRequest(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, options *VirtualApplianceSitesListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{networkVirtualApplianceName}\", url.PathEscape(networkVirtualApplianceName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *OperationsClient) listCreateRequest(ctx context.Context, options *OperationsListOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.RecoveryServices/operations\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListRecipePath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"recipe\", \"list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *WebAppsClient) listWebJobsCreateRequest(ctx context.Context, resourceGroupName string, name string, options *WebAppsListWebJobsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/webjobs\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListRecorderPath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"recorder\", \"list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *WorkspacesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *WorkspacesListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AlertsClient) listForScopeCreateRequest(ctx context.Context, scope string, options *AlertsClientListForScopeOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Authorization/roleManagementAlerts\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *MonitorsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *MonitorsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *Client) BuildVMListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: VMListSpinRegistryPath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"spin-registry\", \"vm_list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *ManagedInstancesClient) listCreateRequest(ctx context.Context, options *ManagedInstancesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Sql/managedInstances\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ReplicationsClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *ReplicationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AssociationsClient) listAllCreateRequest(ctx context.Context, scope string, options *AssociationsClientListAllOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.CustomProviders/associations\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ApplicationClient) listOperationsCreateRequest(ctx context.Context, options *ApplicationClientListOperationsOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Solutions/operations\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RouteTablesClient) listAllCreateRequest(ctx context.Context, options *RouteTablesListAllOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *Client) NewListRolesRequest(ctx context.Context, path string, resourceType *string) (*http.Request, error) {\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"http\"\n\t}\n\tu := url.URL{Host: c.Host, Scheme: scheme, Path: path}\n\tvalues := u.Query()\n\tif resourceType != nil {\n\t\tvalues.Set(\"resource_type\", *resourceType)\n\t}\n\tu.RawQuery = values.Encode()\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif c.JWTSigner != nil {\n\t\tc.JWTSigner.Sign(req)\n\t}\n\treturn req, nil\n}", "func (client *FactoriesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *FactoriesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SQLVirtualMachinesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *SQLVirtualMachinesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *BuildServiceClient) listSupportedStacksCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, options *BuildServiceClientListSupportedStacksOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/supportedStacks\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateListMcubeNebulaResourcesRequest() (request *ListMcubeNebulaResourcesRequest) {\n\trequest = &ListMcubeNebulaResourcesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"mPaaS\", \"2020-10-28\", \"ListMcubeNebulaResources\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *GremlinResourcesClient) listGremlinDatabasesCreateRequest(ctx context.Context, resourceGroupName string, accountName string, options *GremlinResourcesClientListGremlinDatabasesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DicomServicesClient) listByWorkspaceCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, options *DicomServicesClientListByWorkspaceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/workspaces/{workspaceName}/dicomservices\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateGetRegionListRequest() (request *GetRegionListRequest) {\n\trequest = &GetRegionListRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"cr\", \"2016-06-07\", \"GetRegionList\", \"/regions\", \"cr\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (client *IPAllocationsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *IPAllocationsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (rm *resourceManager) newListRequestPayload(\n\tr *resource,\n) (*svcsdk.DescribeReplicationGroupsInput, error) {\n\tres := &svcsdk.DescribeReplicationGroupsInput{}\n\n\tif r.ko.Spec.ReplicationGroupID != nil {\n\t\tres.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID)\n\t}\n\n\treturn res, nil\n}", "func (client *VirtualMachineScaleSetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualMachineScaleSetsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CustomDomainsClient) listCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, appName string, options *CustomDomainsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif appName == \"\" {\n\t\treturn nil, errors.New(\"parameter appName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{appName}\", url.PathEscape(appName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateListAvailableFileSystemTypesRequest() (request *ListAvailableFileSystemTypesRequest) {\n\trequest = &ListAvailableFileSystemTypesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"EHPC\", \"2018-04-12\", \"ListAvailableFileSystemTypes\", \"ehs\", \"openAPI\")\n\treturn\n}", "func (client *TaskRunsClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *TaskRunsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/taskRuns\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ReplicationsClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *ReplicationsListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *MonitorsClient) listLinkedResourcesCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, options *MonitorsClientListLinkedResourcesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}/listLinkedResources\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListBlogPath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"blog\", \"list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func CreateListAlertMessagesRequest() (request *ListAlertMessagesRequest) {\n\trequest = &ListAlertMessagesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"dataworks-public\", \"2020-05-18\", \"ListAlertMessages\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *RestorableDatabaseAccountsClient) listByLocationCreateRequest(ctx context.Context, location string, options *RestorableDatabaseAccountsListByLocationOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/locations/{location}/restorableDatabaseAccounts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif location == \"\" {\n\t\treturn nil, errors.New(\"parameter location cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{location}\", url.PathEscape(location))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-15\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *WebAppsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *WebAppsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.IncludeSlots != nil {\n\t\treqQP.Set(\"includeSlots\", strconv.FormatBool(*options.IncludeSlots))\n\t}\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ConnectedEnvironmentsClient) listBySubscriptionCreateRequest(ctx context.Context, options *ConnectedEnvironmentsClientListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.App/connectedEnvironments\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualMachinesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualMachinesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) listFirewallsCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientListFirewallsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/listFirewalls\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif localRulestackName == \"\" {\n\t\treturn nil, errors.New(\"parameter localRulestackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{localRulestackName}\", url.PathEscape(localRulestackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualRoutersClient) listCreateRequest(ctx context.Context, options *VirtualRoutersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualRouters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) listBySubscriptionCreateRequest(ctx context.Context, options *LocalRulestacksClientListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (rm *resourceManager) newListRequestPayload(\n\tr *resource,\n) (*svcsdk.DescribeReservedInstancesListingsInput, error) {\n\tres := &svcsdk.DescribeReservedInstancesListingsInput{}\n\n\tif r.ko.Spec.ReservedInstancesID != nil {\n\t\tres.SetReservedInstancesId(*r.ko.Spec.ReservedInstancesID)\n\t}\n\n\treturn res, nil\n}", "func (client *TablesClient) listByWorkspaceCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, options *TablesClientListByWorkspaceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateListApplicationLogsRequest() (request *ListApplicationLogsRequest) {\n\trequest = &ListApplicationLogsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"companyreg\", \"2019-05-08\", \"ListApplicationLogs\", \"companyreg\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *WorkspacesClient) listBySubscriptionCreateRequest(ctx context.Context, options *WorkspacesListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Databricks/workspaces\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *Client) ListBuiltInIntents(ctx context.Context, params *ListBuiltInIntentsInput, optFns ...func(*Options)) (*ListBuiltInIntentsOutput, error) {\n\tif params == nil {\n\t\tparams = &ListBuiltInIntentsInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"ListBuiltInIntents\", params, optFns, c.addOperationListBuiltInIntentsMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*ListBuiltInIntentsOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func (client *VirtualMachineScaleSetsClient) listByLocationCreateRequest(ctx context.Context, location string, options *VirtualMachineScaleSetsListByLocationOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/virtualMachineScaleSets\"\n\tif location == \"\" {\n\t\treturn nil, errors.New(\"parameter location cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{location}\", url.PathEscape(location))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *MonitorsClient) listMonitoredResourcesCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, options *MonitorsClientListMonitoredResourcesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}/listMonitoredResources\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AlertProcessingRulesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *AlertProcessingRulesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AlertsManagement/actionRules\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *Client) BuildListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListLogPath()}\n\treq, err := http.NewRequest(\"POST\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"log\", \"list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (client *RestorableDatabaseAccountsClient) listCreateRequest(ctx context.Context, options *RestorableDatabaseAccountsListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/restorableDatabaseAccounts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-15\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ActivityLogsClient) listCreateRequest(ctx context.Context, filter string, options *ActivityLogsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Insights/eventtypes/management/values\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2015-04-01\")\n\treqQP.Set(\"$filter\", filter)\n\tif options != nil && options.Select != nil {\n\t\treqQP.Set(\"$select\", *options.Select)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetNamespaceListRequest() (request *GetNamespaceListRequest) {\n\trequest = &GetNamespaceListRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"cr\", \"2016-06-07\", \"GetNamespaceList\", \"/namespace\", \"cr\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (client *CloudServicesClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *CloudServicesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ApplicationTypeVersionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, options *ApplicationTypeVersionsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applicationTypes/{applicationTypeName}/versions\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\tif applicationTypeName == \"\" {\n\t\treturn nil, errors.New(\"parameter applicationTypeName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{applicationTypeName}\", url.PathEscape(applicationTypeName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ManagedClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ManagedClustersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DevicesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DevicesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridNetwork/devices\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *TagRulesClient) listCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, options *TagRulesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/tagRules\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-27\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WebAppsClient) listNetworkFeaturesCreateRequest(ctx context.Context, resourceGroupName string, name string, view string, options *WebAppsListNetworkFeaturesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/networkFeatures/{view}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif view == \"\" {\n\t\treturn nil, errors.New(\"parameter view cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{view}\", url.PathEscape(view))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CompliancesClient) listCreateRequest(ctx context.Context, scope string, options *CompliancesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Security/compliances\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WebAppsClient) listFunctionsCreateRequest(ctx context.Context, resourceGroupName string, name string, options *WebAppsListFunctionsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/functions\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *BuildServiceClient) listBuildResultsCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, buildServiceName string, buildName string, options *BuildServiceClientListBuildResultsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builds/{buildName}/results\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif buildServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildServiceName}\", url.PathEscape(buildServiceName))\n\tif buildName == \"\" {\n\t\treturn nil, errors.New(\"parameter buildName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{buildName}\", url.PathEscape(buildName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *MetricAlertsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *MetricAlertsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ApplyUpdatesClient) listCreateRequest(ctx context.Context, options *ApplyUpdatesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Maintenance/applyUpdates\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *LiveOutputsClient) listCreateRequest(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, options *LiveOutputsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents/{liveEventName}/liveOutputs\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif liveEventName == \"\" {\n\t\treturn nil, errors.New(\"parameter liveEventName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{liveEventName}\", url.PathEscape(liveEventName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ServersClient) listCreateRequest(ctx context.Context, options *ServersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Sql/servers\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ClustersListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/clusters\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateListServerGroupsRequest() (request *ListServerGroupsRequest) {\n\trequest = &ListServerGroupsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Alb\", \"2020-06-16\", \"ListServerGroups\", \"alb\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}" ]
[ "0.60232586", "0.5982126", "0.5869841", "0.5857119", "0.5838637", "0.5705415", "0.56493896", "0.56372064", "0.5619246", "0.5618151", "0.56148225", "0.56142336", "0.5599064", "0.55896854", "0.5574594", "0.55553883", "0.55527395", "0.5521917", "0.5519978", "0.5480475", "0.547694", "0.54728234", "0.5456693", "0.5449977", "0.542701", "0.542654", "0.5426327", "0.54206973", "0.5406934", "0.54034716", "0.5399393", "0.5385655", "0.53846866", "0.536695", "0.53659964", "0.5347355", "0.5346649", "0.5342399", "0.5341838", "0.53331274", "0.5316923", "0.53146154", "0.53143156", "0.5313506", "0.5310865", "0.5304088", "0.5298454", "0.5296025", "0.5289526", "0.5285597", "0.5282052", "0.5281836", "0.52784795", "0.52771777", "0.52771527", "0.5270854", "0.5268224", "0.52659416", "0.52594286", "0.5250464", "0.52498704", "0.5246777", "0.5245892", "0.5242977", "0.5207698", "0.5206456", "0.5200932", "0.51877636", "0.5186897", "0.51800054", "0.5179383", "0.51679116", "0.5164312", "0.5163834", "0.5162378", "0.51564443", "0.51450634", "0.5143636", "0.514067", "0.51404834", "0.5131331", "0.51296234", "0.51206964", "0.5111664", "0.5108215", "0.5094949", "0.5092192", "0.50891334", "0.5086448", "0.508325", "0.5079826", "0.50781304", "0.50716966", "0.5071199", "0.5061336", "0.5060229", "0.5058643", "0.5053994", "0.50525653", "0.5050618" ]
0.7813031
0
listBuiltInHandleResponse handles the ListBuiltIn response.
func (client *PolicyDefinitionsClient) listBuiltInHandleResponse(resp *http.Response) (PolicyDefinitionsListBuiltInResponse, error) { result := PolicyDefinitionsListBuiltInResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinitionListResult); err != nil { return PolicyDefinitionsListBuiltInResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *PolicyDefinitionsClient) getBuiltInHandleResponse(resp *http.Response) (PolicyDefinitionsGetBuiltInResponse, error) {\n\tresult := PolicyDefinitionsGetBuiltInResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil {\n\t\treturn PolicyDefinitionsGetBuiltInResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listHandleResponse(resp *http.Response) (WebAppsListResponse, error) {\n\tresult := WebAppsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebAppCollection); err != nil {\n\t\treturn WebAppsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ReplicationsClient) listHandleResponse(resp *azcore.Response) (ReplicationListResultResponse, error) {\n\tvar val *ReplicationListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn ReplicationListResultResponse{}, err\n\t}\n\treturn ReplicationListResultResponse{RawResponse: resp.Response, ReplicationListResult: val}, nil\n}", "func (client *LiveOutputsClient) listHandleResponse(resp *http.Response) (LiveOutputsClientListResponse, error) {\n\tresult := LiveOutputsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LiveOutputListResult); err != nil {\n\t\treturn LiveOutputsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineScaleSetVMRunCommandsClient) listHandleResponse(resp *http.Response) (VirtualMachineScaleSetVMRunCommandsListResponse, error) {\n\tresult := VirtualMachineScaleSetVMRunCommandsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineRunCommandsListResult); err != nil {\n\t\treturn VirtualMachineScaleSetVMRunCommandsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *RoleDefinitionsClient) listHandleResponse(resp *http.Response) (RoleDefinitionsListResponse, error) {\n\tresult := RoleDefinitionsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RoleDefinitionListResult); err != nil {\n\t\treturn RoleDefinitionsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualApplianceSKUsClient) listHandleResponse(resp *azcore.Response) (NetworkVirtualApplianceSKUListResultResponse, error) {\n\tvar val *NetworkVirtualApplianceSKUListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn NetworkVirtualApplianceSKUListResultResponse{}, err\n\t}\n\treturn NetworkVirtualApplianceSKUListResultResponse{RawResponse: resp.Response, NetworkVirtualApplianceSKUListResult: val}, nil\n}", "func (client *RegistrationDefinitionsClient) listHandleResponse(resp *http.Response) (RegistrationDefinitionsClientListResponse, error) {\n\tresult := RegistrationDefinitionsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RegistrationDefinitionList); err != nil {\n\t\treturn RegistrationDefinitionsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AvailabilitySetsClient) listHandleResponse(resp *http.Response) (AvailabilitySetsListResponse, error) {\n\tresult := AvailabilitySetsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilitySetListResult); err != nil {\n\t\treturn AvailabilitySetsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualApplianceSitesClient) listHandleResponse(resp *azcore.Response) (NetworkVirtualApplianceSiteListResultResponse, error) {\n\tvar val *NetworkVirtualApplianceSiteListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn NetworkVirtualApplianceSiteListResultResponse{}, err\n\t}\n\treturn NetworkVirtualApplianceSiteListResultResponse{RawResponse: resp.Response, NetworkVirtualApplianceSiteListResult: val}, nil\n}", "func (client *AnalysisResultsClient) listHandleResponse(resp *http.Response) (AnalysisResultsClientListResponse, error) {\n\tresult := AnalysisResultsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AnalysisResultListResult); err != nil {\n\t\treturn AnalysisResultsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SQLVirtualMachinesClient) listHandleResponse(resp *http.Response) (SQLVirtualMachinesClientListResponse, error) {\n\tresult := SQLVirtualMachinesClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListResult); err != nil {\n\t\treturn SQLVirtualMachinesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MonitorsClient) listHandleResponse(resp *http.Response) (MonitorsClientListResponse, error) {\n\tresult := MonitorsClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MonitorResourceListResponse); err != nil {\n\t\treturn MonitorsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ApplicationTypeVersionsClient) listHandleResponse(resp *http.Response) (ApplicationTypeVersionsClientListResponse, error) {\n\tresult := ApplicationTypeVersionsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ApplicationTypeVersionResourceList); err != nil {\n\t\treturn ApplicationTypeVersionsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KustoOperationsClient) listHandleResponse(resp *http.Response) (KustoOperationsClientListResponse, error) {\n\tresult := KustoOperationsClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.OperationListResult); err != nil {\n\t\treturn KustoOperationsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PolicyDefinitionsClient) listHandleResponse(resp *http.Response) (PolicyDefinitionsListResponse, error) {\n\tresult := PolicyDefinitionsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinitionListResult); err != nil {\n\t\treturn PolicyDefinitionsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebhooksClient) listHandleResponse(resp *http.Response) (WebhooksClientListResponse, error) {\n\tresult := WebhooksClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebhookListResult); err != nil {\n\t\treturn WebhooksClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RecordSetsClient) listHandleResponse(resp *azcore.Response) (RecordSetListResultResponse, error) {\n\tvar val *RecordSetListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn RecordSetListResultResponse{}, err\n\t}\n\treturn RecordSetListResultResponse{RawResponse: resp.Response, RecordSetListResult: val}, nil\n}", "func (client *ChildAvailabilityStatusesClient) listHandleResponse(resp *http.Response) (ChildAvailabilityStatusesClientListResponse, error) {\n\tresult := ChildAvailabilityStatusesClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilityStatusListResult); err != nil {\n\t\treturn ChildAvailabilityStatusesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationsClient) listHandleResponse(resp *http.Response) (ReplicationsClientListResponse, error) {\n\tresult := ReplicationsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ReplicationListResult); err != nil {\n\t\treturn ReplicationsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *LocalRulestacksClient) listAppIDsHandleResponse(resp *http.Response) (LocalRulestacksClientListAppIDsResponse, error) {\n\tresult := LocalRulestacksClientListAppIDsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListAppIDResponse); err != nil {\n\t\treturn LocalRulestacksClientListAppIDsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MachineExtensionsClient) listHandleResponse(resp *http.Response) (MachineExtensionsClientListResponse, error) {\n\tresult := MachineExtensionsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MachineExtensionsListResult); err != nil {\n\t\treturn MachineExtensionsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CustomDomainsClient) listHandleResponse(resp *http.Response) (CustomDomainsListResponse, error) {\n\tresult := CustomDomainsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomDomainResourceCollection); err != nil {\n\t\treturn CustomDomainsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *BuildServiceClient) listBuildsHandleResponse(resp *http.Response) (BuildServiceClientListBuildsResponse, error) {\n\tresult := BuildServiceClientListBuildsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.BuildCollection); err != nil {\n\t\treturn BuildServiceClientListBuildsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SapMonitorsClient) listHandleResponse(resp *http.Response) (SapMonitorsClientListResponse, error) {\n\tresult := SapMonitorsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SapMonitorListResult); err != nil {\n\t\treturn SapMonitorsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineScaleSetsClient) listHandleResponse(resp *http.Response) (VirtualMachineScaleSetsListResponse, error) {\n\tresult := VirtualMachineScaleSetsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetListResult); err != nil {\n\t\treturn VirtualMachineScaleSetsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *IPAllocationsClient) listHandleResponse(resp *http.Response) (IPAllocationsClientListResponse, error) {\n\tresult := IPAllocationsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IPAllocationListResult); err != nil {\n\t\treturn IPAllocationsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachinesClient) listHandleResponse(resp *http.Response) (VirtualMachinesClientListResponse, error) {\n\tresult := VirtualMachinesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachinesList); err != nil {\n\t\treturn VirtualMachinesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listWebJobsHandleResponse(resp *http.Response) (WebAppsListWebJobsResponse, error) {\n\tresult := WebAppsListWebJobsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebJobCollection); err != nil {\n\t\treturn WebAppsListWebJobsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ManagedClustersClient) listHandleResponse(resp *http.Response) (ManagedClustersClientListResponse, error) {\n\tresult := ManagedClustersClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedClusterListResult); err != nil {\n\t\treturn ManagedClustersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedInstancesClient) listHandleResponse(resp *http.Response) (ManagedInstancesClientListResponse, error) {\n\tresult := ManagedInstancesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceListResult); err != nil {\n\t\treturn ManagedInstancesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *BuildServiceClient) listSupportedBuildpacksHandleResponse(resp *http.Response) (BuildServiceClientListSupportedBuildpacksResponse, error) {\n\tresult := BuildServiceClientListSupportedBuildpacksResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SupportedBuildpacksCollection); err != nil {\n\t\treturn BuildServiceClientListSupportedBuildpacksResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualNetworkLinksClient) listHandleResponse(resp *http.Response) (VirtualNetworkLinksListResponse, error) {\n\tresult := VirtualNetworkLinksListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualNetworkLinkListResult); err != nil {\n\t\treturn VirtualNetworkLinksListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listByResourceGroupHandleResponse(resp *http.Response) (WebAppsListByResourceGroupResponse, error) {\n\tresult := WebAppsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebAppCollection); err != nil {\n\t\treturn WebAppsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineScaleSetsClient) listAllHandleResponse(resp *http.Response) (VirtualMachineScaleSetsListAllResponse, error) {\n\tresult := VirtualMachineScaleSetsListAllResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetListWithLinkResult); err != nil {\n\t\treturn VirtualMachineScaleSetsListAllResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SQLResourcesClient) listSQLUserDefinedFunctionsHandleResponse(resp *http.Response) (SQLResourcesClientListSQLUserDefinedFunctionsResponse, error) {\n\tresult := SQLResourcesClientListSQLUserDefinedFunctionsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SQLUserDefinedFunctionListResult); err != nil {\n\t\treturn SQLResourcesClientListSQLUserDefinedFunctionsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listProcessesHandleResponse(resp *http.Response) (WebAppsListProcessesResponse, error) {\n\tresult := WebAppsListProcessesResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProcessInfoCollection); err != nil {\n\t\treturn WebAppsListProcessesResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ExpressRoutePortsLocationsClient) listHandleResponse(resp *http.Response) (ExpressRoutePortsLocationsClientListResponse, error) {\n\tresult := ExpressRoutePortsLocationsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ExpressRoutePortsLocationListResult); err != nil {\n\t\treturn ExpressRoutePortsLocationsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *TaskRunsClient) listHandleResponse(resp *http.Response) (TaskRunsClientListResponse, error) {\n\tresult := TaskRunsClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TaskRunListResult); err != nil {\n\t\treturn TaskRunsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AgentsClient) listHandleResponse(resp *http.Response) (AgentsClientListResponse, error) {\n\tresult := AgentsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AgentList); err != nil {\n\t\treturn AgentsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *OperationsClient) listHandleResponse(resp *http.Response) (OperationsListResponse, error) {\n\tresult := OperationsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ClientDiscoveryResponse); err != nil {\n\t\treturn OperationsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) listByStreamingJobHandleResponse(resp *http.Response) (OutputsClientListByStreamingJobResponse, error) {\n\tresult := OutputsClientListByStreamingJobResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.OutputListResult); err != nil {\n\t\treturn OutputsClientListByStreamingJobResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *FirewallRulesClient) listHandleResponse(resp *http.Response) (FirewallRulesClientListResponse, error) {\n\tresult := FirewallRulesClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.FirewallRuleListResult); err != nil {\n\t\treturn FirewallRulesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationProtectionContainersClient) listHandleResponse(resp *http.Response) (ReplicationProtectionContainersClientListResponse, error) {\n\tresult := ReplicationProtectionContainersClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProtectionContainerCollection); err != nil {\n\t\treturn ReplicationProtectionContainersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ServersClient) listHandleResponse(resp *http.Response) (ServersClientListResponse, error) {\n\tresult := ServersClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerListResult); err != nil {\n\t\treturn ServersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ScriptExecutionsClient) listHandleResponse(resp *http.Response) (ScriptExecutionsClientListResponse, error) {\n\tresult := ScriptExecutionsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ScriptExecutionsList); err != nil {\n\t\treturn ScriptExecutionsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *OutputsClient) listByStreamingJobHandleResponse(resp *http.Response) (OutputsListByStreamingJobResponse, error) {\n\tresult := OutputsListByStreamingJobResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.OutputListResult); err != nil {\n\t\treturn OutputsListByStreamingJobResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VendorNetworkFunctionsClient) listHandleResponse(resp *http.Response) (VendorNetworkFunctionsClientListResponse, error) {\n\tresult := VendorNetworkFunctionsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VendorNetworkFunctionListResult); err != nil {\n\t\treturn VendorNetworkFunctionsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ClustersClient) listHandleResponse(resp *http.Response) (ClustersClientListResponse, error) {\n\tresult := ClustersClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ClusterListResult); err != nil {\n\t\treturn ClustersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CloudServiceRoleInstancesClient) listHandleResponse(resp *azcore.Response) (RoleInstanceListResultResponse, error) {\n\tvar val *RoleInstanceListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn RoleInstanceListResultResponse{}, err\n\t}\n\treturn RoleInstanceListResultResponse{RawResponse: resp.Response, RoleInstanceListResult: val}, nil\n}", "func (client *ApplicationClient) listOperationsHandleResponse(resp *http.Response) (ApplicationClientListOperationsResponse, error) {\n\tresult := ApplicationClientListOperationsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.OperationListResult); err != nil {\n\t\treturn ApplicationClientListOperationsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ClustersClient) listHandleResponse(resp *http.Response) (ClustersListResponse, error) {\n\tresult := ClustersListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ClusterListResult); err != nil {\n\t\treturn ClustersListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SubscriptionClient) listHandleResponse(resp *http.Response) (SubscriptionClientListResponse, error) {\n\tresult := SubscriptionClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SubscriptionCollection); err != nil {\n\t\treturn SubscriptionClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ContainerGroupsClient) listHandleResponse(resp *http.Response) (ContainerGroupsClientListResponse, error) {\n\tresult := ContainerGroupsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ContainerGroupListResult); err != nil {\n\t\treturn ContainerGroupsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *Client) listHandleResponse(resp *http.Response) (ClientListResponse, error) {\n\tresult := ClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ChangeResourceListResult); err != nil {\n\t\treturn ClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *FactoriesClient) listHandleResponse(resp *http.Response) (FactoriesClientListResponse, error) {\n\tresult := FactoriesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.FactoryListResponse); err != nil {\n\t\treturn FactoriesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationvCentersClient) listHandleResponse(resp *http.Response) (ReplicationvCentersClientListResponse, error) {\n\tresult := ReplicationvCentersClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VCenterCollection); err != nil {\n\t\treturn ReplicationvCentersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RouteTablesClient) listHandleResponse(resp *http.Response) (RouteTablesListResponse, error) {\n\tresult := RouteTablesListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RouteTableListResult); err != nil {\n\t\treturn RouteTablesListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listTriggeredWebJobsHandleResponse(resp *http.Response) (WebAppsListTriggeredWebJobsResponse, error) {\n\tresult := WebAppsListTriggeredWebJobsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TriggeredWebJobCollection); err != nil {\n\t\treturn WebAppsListTriggeredWebJobsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualRoutersClient) listHandleResponse(resp *http.Response) (VirtualRoutersClientListResponse, error) {\n\tresult := VirtualRoutersClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualRouterListResult); err != nil {\n\t\treturn VirtualRoutersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CustomAssessmentAutomationsClient) listByResourceGroupHandleResponse(resp *http.Response) (CustomAssessmentAutomationsListByResourceGroupResponse, error) {\n\tresult := CustomAssessmentAutomationsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomAssessmentAutomationsListResult); err != nil {\n\t\treturn CustomAssessmentAutomationsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineImageTemplatesClient) listRunOutputsHandleResponse(resp *http.Response) (VirtualMachineImageTemplatesClientListRunOutputsResponse, error) {\n\tresult := VirtualMachineImageTemplatesClientListRunOutputsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RunOutputCollection); err != nil {\n\t\treturn VirtualMachineImageTemplatesClientListRunOutputsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listFunctionsHandleResponse(resp *http.Response) (WebAppsListFunctionsResponse, error) {\n\tresult := WebAppsListFunctionsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.FunctionEnvelopeCollection); err != nil {\n\t\treturn WebAppsListFunctionsResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *BuildServiceClient) listSupportedStacksHandleResponse(resp *http.Response) (BuildServiceClientListSupportedStacksResponse, error) {\n\tresult := BuildServiceClientListSupportedStacksResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SupportedStacksCollection); err != nil {\n\t\treturn BuildServiceClientListSupportedStacksResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ApplyUpdatesClient) listHandleResponse(resp *http.Response) (ApplyUpdatesClientListResponse, error) {\n\tresult := ApplyUpdatesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListApplyUpdate); err != nil {\n\t\treturn ApplyUpdatesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *JobExecutionsClient) listByAgentHandleResponse(resp *http.Response) (JobExecutionsClientListByAgentResponse, error) {\n\tresult := JobExecutionsClientListByAgentResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.JobExecutionListResult); err != nil {\n\t\treturn JobExecutionsClientListByAgentResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SQLVirtualMachinesClient) listByResourceGroupHandleResponse(resp *http.Response) (SQLVirtualMachinesClientListByResourceGroupResponse, error) {\n\tresult := SQLVirtualMachinesClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListResult); err != nil {\n\t\treturn SQLVirtualMachinesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listProcessModulesHandleResponse(resp *http.Response) (WebAppsListProcessModulesResponse, error) {\n\tresult := WebAppsListProcessModulesResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProcessModuleInfoCollection); err != nil {\n\t\treturn WebAppsListProcessModulesResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *TagRulesClient) listHandleResponse(resp *http.Response) (TagRulesClientListResponse, error) {\n\tresult := TagRulesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TagRuleListResult); err != nil {\n\t\treturn TagRulesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RouteTablesClient) listAllHandleResponse(resp *http.Response) (RouteTablesListAllResponse, error) {\n\tresult := RouteTablesListAllResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RouteTableListResult); err != nil {\n\t\treturn RouteTablesListAllResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *MonitorsClient) listByResourceGroupHandleResponse(resp *http.Response) (MonitorsClientListByResourceGroupResponse, error) {\n\tresult := MonitorsClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MonitorResourceListResponse); err != nil {\n\t\treturn MonitorsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ServersClient) listByResourceGroupHandleResponse(resp *http.Response) (ServersClientListByResourceGroupResponse, error) {\n\tresult := ServersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerListResult); err != nil {\n\t\treturn ServersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ActivityLogsClient) listHandleResponse(resp *http.Response) (ActivityLogsClientListResponse, error) {\n\tresult := ActivityLogsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.EventDataCollection); err != nil {\n\t\treturn ActivityLogsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineImagesEdgeZoneClient) listHandleResponse(resp *http.Response) (VirtualMachineImagesEdgeZoneClientListResponse, error) {\n\tresult := VirtualMachineImagesEdgeZoneClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineImageResourceArray); err != nil {\n\t\treturn VirtualMachineImagesEdgeZoneClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SQLResourcesClient) listSQLRoleDefinitionsHandleResponse(resp *http.Response) (SQLResourcesClientListSQLRoleDefinitionsResponse, error) {\n\tresult := SQLResourcesClientListSQLRoleDefinitionsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SQLRoleDefinitionListResult); err != nil {\n\t\treturn SQLResourcesClientListSQLRoleDefinitionsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RecordSetsClient) listByTypeHandleResponse(resp *azcore.Response) (RecordSetListResultResponse, error) {\n\tvar val *RecordSetListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn RecordSetListResultResponse{}, err\n\t}\n\treturn RecordSetListResultResponse{RawResponse: resp.Response, RecordSetListResult: val}, nil\n}", "func (client *CompliancesClient) listHandleResponse(resp *http.Response) (CompliancesClientListResponse, error) {\n\tresult := CompliancesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ComplianceList); err != nil {\n\t\treturn CompliancesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RecommendationsClient) listHandleResponse(resp *http.Response) (RecommendationsListResponse, error) {\n\tresult := RecommendationsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RecommendationCollection); err != nil {\n\t\treturn RecommendationsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineImageTemplatesClient) listHandleResponse(resp *http.Response) (VirtualMachineImageTemplatesClientListResponse, error) {\n\tresult := VirtualMachineImageTemplatesClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ImageTemplateListResult); err != nil {\n\t\treturn VirtualMachineImageTemplatesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DevicesClient) listByResourceGroupHandleResponse(resp *http.Response) (DevicesClientListByResourceGroupResponse, error) {\n\tresult := DevicesClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DeviceListResult); err != nil {\n\t\treturn DevicesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DefenderSettingsClient) listHandleResponse(resp *http.Response) (DefenderSettingsClientListResponse, error) {\n\tresult := DefenderSettingsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DefenderSettingsList); err != nil {\n\t\treturn DefenderSettingsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RedisClient) listByResourceGroupHandleResponse(resp *http.Response) (RedisListByResourceGroupResponse, error) {\n\tresult := RedisListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RedisListResult); err != nil {\n\t\treturn RedisListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listWebJobsSlotHandleResponse(resp *http.Response) (WebAppsListWebJobsSlotResponse, error) {\n\tresult := WebAppsListWebJobsSlotResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebJobCollection); err != nil {\n\t\treturn WebAppsListWebJobsSlotResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listMetadataHandleResponse(resp *http.Response) (WebAppsListMetadataResponse, error) {\n\tresult := WebAppsListMetadataResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.StringDictionary); err != nil {\n\t\treturn WebAppsListMetadataResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *CloudServicesClient) listHandleResponse(resp *http.Response) (CloudServicesClientListResponse, error) {\n\tresult := CloudServicesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CloudServiceListResult); err != nil {\n\t\treturn CloudServicesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (ManagedClustersClientListByResourceGroupResponse, error) {\n\tresult := ManagedClustersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedClusterListResult); err != nil {\n\t\treturn ManagedClustersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualNetworkTapsClient) listAllHandleResponse(resp *azcore.Response) (VirtualNetworkTapListResultResponse, error) {\n\tvar val *VirtualNetworkTapListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn VirtualNetworkTapListResultResponse{}, err\n\t}\n\treturn VirtualNetworkTapListResultResponse{RawResponse: resp.Response, VirtualNetworkTapListResult: val}, nil\n}", "func (client *BuildServiceClient) listBuildResultsHandleResponse(resp *http.Response) (BuildServiceClientListBuildResultsResponse, error) {\n\tresult := BuildServiceClientListBuildResultsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.BuildResultCollection); err != nil {\n\t\treturn BuildServiceClientListBuildResultsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (ClustersClientListByResourceGroupResponse, error) {\n\tresult := ClustersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ClusterListResult); err != nil {\n\t\treturn ClustersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WorkspacesClient) listByResourceGroupHandleResponse(resp *http.Response) (WorkspacesListByResourceGroupResponse, error) {\n\tresult := WorkspacesListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WorkspaceListResult); err != nil {\n\t\treturn WorkspacesListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *TablesClient) listByWorkspaceHandleResponse(resp *http.Response) (TablesClientListByWorkspaceResponse, error) {\n\tresult := TablesClientListByWorkspaceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TablesListResult); err != nil {\n\t\treturn TablesClientListByWorkspaceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *FactoriesClient) listByResourceGroupHandleResponse(resp *http.Response) (FactoriesClientListByResourceGroupResponse, error) {\n\tresult := FactoriesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.FactoryListResponse); err != nil {\n\t\treturn FactoriesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (ClustersListByResourceGroupResponse, error) {\n\tresult := ClustersListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ClusterListResult); err != nil {\n\t\treturn ClustersListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *AccountsClient) listHandleResponse(resp *http.Response) (AccountsClientListResponse, error) {\n\tresult := AccountsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AccountListResult); err != nil {\n\t\treturn AccountsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedInstancesClient) listByResourceGroupHandleResponse(resp *http.Response) (ManagedInstancesClientListByResourceGroupResponse, error) {\n\tresult := ManagedInstancesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceListResult); err != nil {\n\t\treturn ManagedInstancesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PolicyDefinitionsClient) listByManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsListByManagementGroupResponse, error) {\n\tresult := PolicyDefinitionsListByManagementGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinitionListResult); err != nil {\n\t\treturn PolicyDefinitionsListByManagementGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *RecommendationsClient) listHandleResponse(resp *http.Response) (RecommendationsClientListResponse, error) {\n\tresult := RecommendationsClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RecommendationCollection); err != nil {\n\t\treturn RecommendationsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *BuildServiceClient) listBuildServicesHandleResponse(resp *http.Response) (BuildServiceClientListBuildServicesResponse, error) {\n\tresult := BuildServiceClientListBuildServicesResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.BuildServiceCollection); err != nil {\n\t\treturn BuildServiceClientListBuildServicesResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *FileServicesClient) listHandleResponse(resp *azcore.Response) (FileServiceItemsResponse, error) {\n\tvar val *FileServiceItems\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn FileServiceItemsResponse{}, err\n\t}\n\treturn FileServiceItemsResponse{RawResponse: resp.Response, FileServiceItems: val}, nil\n}", "func (client *IPAllocationsClient) listByResourceGroupHandleResponse(resp *http.Response) (IPAllocationsClientListByResourceGroupResponse, error) {\n\tresult := IPAllocationsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IPAllocationListResult); err != nil {\n\t\treturn IPAllocationsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}" ]
[ "0.7082903", "0.65406626", "0.6517782", "0.6450229", "0.6407186", "0.6368368", "0.6320369", "0.626896", "0.62285346", "0.6223435", "0.6213455", "0.62095153", "0.61994046", "0.617806", "0.6167983", "0.6167837", "0.6165637", "0.6164217", "0.6158206", "0.61385083", "0.6090056", "0.6078462", "0.6072821", "0.60237056", "0.6017464", "0.6001595", "0.5975038", "0.5960289", "0.59422505", "0.59361863", "0.592639", "0.592028", "0.5916019", "0.59119767", "0.5893246", "0.5877771", "0.5869159", "0.5868357", "0.5867682", "0.58650106", "0.58632493", "0.58578235", "0.5857489", "0.5828229", "0.58272356", "0.5816861", "0.58163357", "0.5815506", "0.5812888", "0.5812069", "0.58032197", "0.58030283", "0.57948226", "0.5785845", "0.577627", "0.57685775", "0.5765737", "0.57613456", "0.57539445", "0.574249", "0.573961", "0.57341635", "0.5728227", "0.5726798", "0.57266325", "0.57235986", "0.57187265", "0.57162124", "0.56988835", "0.56934255", "0.5691408", "0.56862575", "0.568147", "0.5678593", "0.5673003", "0.5664103", "0.5662639", "0.56430733", "0.5640552", "0.563833", "0.5637361", "0.5636317", "0.56338286", "0.5631075", "0.56310207", "0.5628609", "0.56202894", "0.56132126", "0.56074286", "0.5604131", "0.5598924", "0.5598117", "0.5591202", "0.5587291", "0.5585898", "0.5584386", "0.5574499", "0.5572008", "0.55681145", "0.5563129" ]
0.85405135
0
listByManagementGroupCreateRequest creates the ListByManagementGroup request.
func (client *PolicyDefinitionsClient) listByManagementGroupCreateRequest(ctx context.Context, managementGroupID string, options *PolicyDefinitionsListByManagementGroupOptions) (*policy.Request, error) { urlPath := "/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions" if managementGroupID == "" { return nil, errors.New("parameter managementGroupID cannot be empty") } urlPath = strings.ReplaceAll(urlPath, "{managementGroupId}", url.PathEscape(managementGroupID)) req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath)) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("api-version", "2021-06-01") if options != nil && options.Top != nil { reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() unencodedParams := []string{req.Raw().URL.RawQuery} if options != nil && options.Filter != nil { unencodedParams = append(unencodedParams, "$filter="+*options.Filter) } req.Raw().URL.RawQuery = strings.Join(unencodedParams, "&") req.Raw().Header.Set("Accept", "application/json") return req, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *MonitorsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *MonitorsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagedInstancesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ManagedInstancesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ManagedClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ManagedClustersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ClustersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ContainerGroupsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DevicesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DevicesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridNetwork/devices\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ClustersListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/clusters\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AvailabilityGroupListenersClient) listByGroupCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineGroupName string, options *AvailabilityGroupListenersClientListByGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineGroupName}\", url.PathEscape(sqlVirtualMachineGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DiskEncryptionSetsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DiskEncryptionSetsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CustomAssessmentAutomationsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *CustomAssessmentAutomationsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Security/customAssessmentAutomations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualMachinesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualMachinesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *PolicyDefinitionsClient) getAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsGetAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SQLVirtualMachinesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *SQLVirtualMachinesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ServersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ServersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *LocalRulestacksClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *LocalRulestacksClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *MetricAlertsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *MetricAlertsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client UsageDetailsClient) ListByManagementGroupSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (client *HardwareComponentGroupsClient) listByDeviceCreateRequest(ctx context.Context, deviceName string, resourceGroupName string, managerName string, options *HardwareComponentGroupsClientListByDeviceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorSimple/managers/{managerName}/devices/{deviceName}/hardwareComponentGroups\"\n\turlPath = strings.ReplaceAll(urlPath, \"{deviceName}\", deviceName)\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", client.subscriptionID)\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", resourceGroupName)\n\turlPath = strings.ReplaceAll(urlPath, \"{managerName}\", managerName)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *IPAllocationsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *IPAllocationsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client UsageDetailsClient) ListByManagementGroup(ctx context.Context, managementGroupID string, expand string, filter string, skiptoken string, top *int32, apply string) (result UsageDetailsListResultPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/UsageDetailsClient.ListByManagementGroup\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.udlr.Response.Response != nil {\n\t\t\t\tsc = result.udlr.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: top,\n\t\t\tConstraints: []validation.Constraint{{Target: \"top\", Name: validation.Null, Rule: false,\n\t\t\t\tChain: []validation.Constraint{{Target: \"top\", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},\n\t\t\t\t\t{Target: \"top\", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},\n\t\t\t\t}}}}}); err != nil {\n\t\treturn result, validation.NewError(\"consumption.UsageDetailsClient\", \"ListByManagementGroup\", err.Error())\n\t}\n\n\tresult.fn = client.listByManagementGroupNextResults\n\treq, err := client.ListByManagementGroupPreparer(ctx, managementGroupID, expand, filter, skiptoken, top, apply)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListByManagementGroup\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListByManagementGroupSender(req)\n\tif err != nil {\n\t\tresult.udlr.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListByManagementGroup\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.udlr, err = client.ListByManagementGroupResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListByManagementGroup\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (client *ConnectedEnvironmentsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ConnectedEnvironmentsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/connectedEnvironments\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DedicatedHostsClient) listByHostGroupCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, options *DedicatedHostsListByHostGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hostGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter hostGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RedisClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *RedisListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *WorkspacesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *WorkspacesListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SyncGroupsClient) listByDatabaseCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, options *SyncGroupsClientListByDatabaseOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client UsageDetailsClient) ListByManagementGroupPreparer(ctx context.Context, managementGroupID string, expand string, filter string, skiptoken string, top *int32, apply string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"managementGroupId\": autorest.Encode(\"path\", managementGroupID),\n\t}\n\n\tconst APIVersion = \"2018-06-30\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\tif len(expand) > 0 {\n\t\tqueryParameters[\"$expand\"] = autorest.Encode(\"query\", expand)\n\t}\n\tif len(filter) > 0 {\n\t\tqueryParameters[\"$filter\"] = autorest.Encode(\"query\", filter)\n\t}\n\tif len(skiptoken) > 0 {\n\t\tqueryParameters[\"$skiptoken\"] = autorest.Encode(\"query\", skiptoken)\n\t}\n\tif top != nil {\n\t\tqueryParameters[\"$top\"] = autorest.Encode(\"query\", *top)\n\t}\n\tif len(apply) > 0 {\n\t\tqueryParameters[\"$apply\"] = autorest.Encode(\"query\", apply)\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Consumption/usageDetails\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (client *AlertProcessingRulesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *AlertProcessingRulesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AlertsManagement/actionRules\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-08-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CapacitiesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *CapacitiesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBIDedicated/capacities\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-01-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CassandraClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *CassandraClustersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualNetworkTapsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualNetworkTapsListByResourceGroupOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ServersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroup string, options *ServersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroup == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroup cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroup}\", url.PathEscape(resourceGroup))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SQLVirtualMachinesClient) listBySQLVMGroupCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineGroupName string, options *SQLVirtualMachinesClientListBySQLVMGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/sqlVirtualMachines\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineGroupName}\", url.PathEscape(sqlVirtualMachineGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateListExperimentGroupsRequest() (request *ListExperimentGroupsRequest) {\n\trequest = &ListExperimentGroupsRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"PaiRecService\", \"2022-12-13\", \"ListExperimentGroups\", \"/api/v1/experimentgroups\", \"\", \"\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (client *DedicatedHostsClient) listByHostGroupCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, options *DedicatedHostsListByHostGroupOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{hostGroupName}\", url.PathEscape(hostGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VirtualRoutersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualRoutersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DataCollectionEndpointsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DataCollectionEndpointsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *IotSecuritySolutionClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *IotSecuritySolutionClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-08-01\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *GroupClient) listByServiceCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *GroupListByServiceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DataCollectionEndpointsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DataCollectionEndpointsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CertificateOrdersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *CertificateOrdersClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ContainerGroupsClient) listCreateRequest(ctx context.Context, options *ContainerGroupsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.ContainerInstance/containerGroups\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AccountsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *AccountsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *FactoriesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *FactoriesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualMachineImageTemplatesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualMachineImageTemplatesClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imageTemplates\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-10-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *WebAppsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *WebAppsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.IncludeSlots != nil {\n\t\treqQP.Set(\"includeSlots\", strconv.FormatBool(*options.IncludeSlots))\n\t}\n\treqQP.Set(\"api-version\", \"2021-02-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PolicyDefinitionsClient) createOrUpdateAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, parameters PolicyDefinition, options *PolicyDefinitionsCreateOrUpdateAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *CapacityReservationsClient) listByCapacityReservationGroupCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, options *CapacityReservationsListByCapacityReservationGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif capacityReservationGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationGroupName}\", url.PathEscape(capacityReservationGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateListServerGroupsRequest() (request *ListServerGroupsRequest) {\n\trequest = &ListServerGroupsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Alb\", \"2020-06-16\", \"ListServerGroups\", \"alb\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *SpatialAnchorsAccountsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *SpatialAnchorsAccountsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MixedReality/spatialAnchorsAccounts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AvailabilitySetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *AvailabilitySetsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ClustersClient) listCreateRequest(ctx context.Context, options *ClustersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateListManagedPrivateSpacesRequest() (request *ListManagedPrivateSpacesRequest) {\n\trequest = &ListManagedPrivateSpacesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"SmartHosting\", \"2020-08-01\", \"ListManagedPrivateSpaces\", \"smarthosting\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *PolicyDefinitionsClient) deleteAtManagementGroupCreateRequest(ctx context.Context, policyDefinitionName string, managementGroupID string, options *PolicyDefinitionsDeleteAtManagementGroupOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}\"\n\tif policyDefinitionName == \"\" {\n\t\treturn nil, errors.New(\"parameter policyDefinitionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{policyDefinitionName}\", url.PathEscape(policyDefinitionName))\n\tif managementGroupID == \"\" {\n\t\treturn nil, errors.New(\"parameter managementGroupID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementGroupId}\", url.PathEscape(managementGroupID))\n\treq, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *PrivateDNSZoneGroupsClient) listCreateRequest(ctx context.Context, privateEndpointName string, resourceGroupName string, options *PrivateDNSZoneGroupsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateEndpoints/{privateEndpointName}/privateDnsZoneGroups\"\n\tif privateEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateEndpointName}\", url.PathEscape(privateEndpointName))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client UsageDetailsClient) ListByManagementGroupResponder(resp *http.Response) (result UsageDetailsListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *VirtualMachineScaleSetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualMachineScaleSetsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateDescribeScalingGroupsRequest() (request *DescribeScalingGroupsRequest) {\n\trequest = &DescribeScalingGroupsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Ess\", \"2014-08-28\", \"DescribeScalingGroups\", \"ess\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) {\n\tr := client.NewRequest(http.MethodGet, \"/azure/compute/group\")\n\tresp, err := client.RequireOK(s.Client.Do(ctx, r))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tgs, err := groupsFromHttpResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ListGroupsOutput{Groups: gs}, nil\n}", "func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) {\n\tr := client.NewRequest(http.MethodGet, \"/compute/azure/group\")\n\tresp, err := client.RequireOK(s.Client.Do(ctx, r))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tgs, err := groupsFromHttpResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ListGroupsOutput{Groups: gs}, nil\n}", "func (client *SyncGroupsClient) listLogsCreateRequest(ctx context.Context, resourceGroupName string, serverName string, databaseName string, syncGroupName string, startTime string, endTime string, typeParam SyncGroupsType, options *SyncGroupsClientListLogsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}/logs\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif syncGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter syncGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{syncGroupName}\", url.PathEscape(syncGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"startTime\", startTime)\n\treqQP.Set(\"endTime\", endTime)\n\treqQP.Set(\"type\", string(typeParam))\n\tif options != nil && options.ContinuationToken != nil {\n\t\treqQP.Set(\"continuationToken\", *options.ContinuationToken)\n\t}\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client UsageDetailsClient) ListForBillingPeriodByManagementGroupPreparer(ctx context.Context, managementGroupID string, billingPeriodName string, expand string, filter string, apply string, skiptoken string, top *int32) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"billingPeriodName\": autorest.Encode(\"path\", billingPeriodName),\n\t\t\"managementGroupId\": autorest.Encode(\"path\", managementGroupID),\n\t}\n\n\tconst APIVersion = \"2018-06-30\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\tif len(expand) > 0 {\n\t\tqueryParameters[\"$expand\"] = autorest.Encode(\"query\", expand)\n\t}\n\tif len(filter) > 0 {\n\t\tqueryParameters[\"$filter\"] = autorest.Encode(\"query\", filter)\n\t}\n\tif len(apply) > 0 {\n\t\tqueryParameters[\"$apply\"] = autorest.Encode(\"query\", apply)\n\t}\n\tif len(skiptoken) > 0 {\n\t\tqueryParameters[\"$skiptoken\"] = autorest.Encode(\"query\", skiptoken)\n\t}\n\tif top != nil {\n\t\tqueryParameters[\"$top\"] = autorest.Encode(\"query\", *top)\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/providers/Microsoft.Management/managementGroups/{managementGroupId}/providers/Microsoft.Billing/billingPeriods/{billingPeriodName}/providers/Microsoft.Consumption/usageDetails\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (client *PeeringPoliciesClient) listByManagedNetworkCreateRequest(ctx context.Context, resourceGroupName string, managedNetworkName string, options *PeeringPoliciesClientListByManagedNetworkOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedNetworkName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedNetworkName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedNetworkName}\", url.PathEscape(managedNetworkName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Skiptoken != nil {\n\t\treqQP.Set(\"$skiptoken\", *options.Skiptoken)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *AgentPoolsClient) listByKubernetesClusterCreateRequest(ctx context.Context, resourceGroupName string, kubernetesClusterName string, options *AgentPoolsClientListByKubernetesClusterOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/kubernetesClusters/{kubernetesClusterName}/agentPools\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif kubernetesClusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter kubernetesClusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{kubernetesClusterName}\", url.PathEscape(kubernetesClusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *KpiClient) listByHubCreateRequest(ctx context.Context, resourceGroupName string, hubName string, options *KpiClientListByHubOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/kpi\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client UsageDetailsClient) ListForBillingPeriodByManagementGroup(ctx context.Context, managementGroupID string, billingPeriodName string, expand string, filter string, apply string, skiptoken string, top *int32) (result UsageDetailsListResultPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/UsageDetailsClient.ListForBillingPeriodByManagementGroup\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.udlr.Response.Response != nil {\n\t\t\t\tsc = result.udlr.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: top,\n\t\t\tConstraints: []validation.Constraint{{Target: \"top\", Name: validation.Null, Rule: false,\n\t\t\t\tChain: []validation.Constraint{{Target: \"top\", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},\n\t\t\t\t\t{Target: \"top\", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},\n\t\t\t\t}}}}}); err != nil {\n\t\treturn result, validation.NewError(\"consumption.UsageDetailsClient\", \"ListForBillingPeriodByManagementGroup\", err.Error())\n\t}\n\n\tresult.fn = client.listForBillingPeriodByManagementGroupNextResults\n\treq, err := client.ListForBillingPeriodByManagementGroupPreparer(ctx, managementGroupID, billingPeriodName, expand, filter, apply, skiptoken, top)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListForBillingPeriodByManagementGroup\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListForBillingPeriodByManagementGroupSender(req)\n\tif err != nil {\n\t\tresult.udlr.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListForBillingPeriodByManagementGroup\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.udlr, err = client.ListForBillingPeriodByManagementGroupResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListForBillingPeriodByManagementGroup\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (client *ManagedClustersClient) listCreateRequest(ctx context.Context, options *ManagedClustersClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *SchemaRegistryClient) listByNamespaceCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *SchemaRegistryClientListByNamespaceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif namespaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter namespaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{namespaceName}\", url.PathEscape(namespaceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-10-01-preview\")\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *MonitorsClient) listCreateRequest(ctx context.Context, options *MonitorsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Datadog/monitors\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RegistrationDefinitionsClient) listCreateRequest(ctx context.Context, scope string, options *RegistrationDefinitionsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-01-preview\")\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ManagedInstancesClient) listByManagedInstanceCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, options *ManagedInstancesClientListByManagedInstanceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/topqueries\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.NumberOfQueries != nil {\n\t\treqQP.Set(\"numberOfQueries\", strconv.FormatInt(int64(*options.NumberOfQueries), 10))\n\t}\n\tif options != nil && options.Databases != nil {\n\t\treqQP.Set(\"databases\", *options.Databases)\n\t}\n\tif options != nil && options.StartTime != nil {\n\t\treqQP.Set(\"startTime\", *options.StartTime)\n\t}\n\tif options != nil && options.EndTime != nil {\n\t\treqQP.Set(\"endTime\", *options.EndTime)\n\t}\n\tif options != nil && options.Interval != nil {\n\t\treqQP.Set(\"interval\", string(*options.Interval))\n\t}\n\tif options != nil && options.AggregationFunction != nil {\n\t\treqQP.Set(\"aggregationFunction\", string(*options.AggregationFunction))\n\t}\n\tif options != nil && options.ObservationMetric != nil {\n\t\treqQP.Set(\"observationMetric\", string(*options.ObservationMetric))\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ManagedDatabasesClient) listByInstanceCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, options *ManagedDatabasesClientListByInstanceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateCreateMonitorGroupRequest() (request *CreateMonitorGroupRequest) {\n\trequest = &CreateMonitorGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2019-01-01\", \"CreateMonitorGroup\", \"cms\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateCreateMonitorGroupByResourceGroupIdRequest() (request *CreateMonitorGroupByResourceGroupIdRequest) {\n\trequest = &CreateMonitorGroupByResourceGroupIdRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2019-01-01\", \"CreateMonitorGroupByResourceGroupId\", \"cms\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client UsageDetailsClient) ListForBillingPeriodByManagementGroupSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (client *MonitorsClient) listHostsCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, options *MonitorsClientListHostsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}/listHosts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagedInstancesClient) listCreateRequest(ctx context.Context, options *ManagedInstancesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Sql/managedInstances\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *TagRulesClient) listCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, options *TagRulesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/tagRules\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-27\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func NewListCmd(f *cmdutil.Factory) *ListCmd {\n\tccmd := &ListCmd{\n\t\tfactory: f,\n\t}\n\tcmd := &cobra.Command{\n\t\tUse: \"list\",\n\t\tShort: \"List smart group collection\",\n\t\tLong: `Get a collection of smart groups based on filter parameters`,\n\t\tExample: heredoc.Doc(`\n\t\t\t$ c8y smartgroups list\n\t\t\tGet a list of smart groups\n\n\t\t\t$ c8y smartgroups list --name \"myText*\"\n\t\t\tGet a list of smart groups with the names starting with 'myText'\n\n\t\t\t$ c8y smartgroups list --name \"myText*\" | c8y devices list\n\t\t\tGet a list of smart groups with their names starting with \"myText\", then get the devices from the smart groups\n `),\n\t\tPreRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn nil\n\t\t},\n\t\tRunE: ccmd.RunE,\n\t}\n\n\tcmd.SilenceUsage = true\n\n\tcmd.Flags().String(\"name\", \"\", \"Filter by name\")\n\tcmd.Flags().String(\"fragmentType\", \"\", \"Filter by fragment type\")\n\tcmd.Flags().String(\"owner\", \"\", \"Filter by owner\")\n\tcmd.Flags().String(\"deviceQuery\", \"\", \"Filter by device query\")\n\tcmd.Flags().String(\"query\", \"\", \"Additional query filter (accepts pipeline)\")\n\tcmd.Flags().String(\"queryTemplate\", \"\", \"String template to be used when applying the given query. Use %s to reference the query/pipeline input\")\n\tcmd.Flags().String(\"orderBy\", \"name\", \"Order by. e.g. _id asc or name asc or creationTime.date desc\")\n\tcmd.Flags().Bool(\"onlyInvisible\", false, \"Only include invisible smart groups\")\n\tcmd.Flags().Bool(\"onlyVisible\", false, \"Only include visible smart groups\")\n\tcmd.Flags().Bool(\"withParents\", false, \"Include a flat list of all parents and grandparents of the given object\")\n\n\tflags.WithOptions(\n\t\tcmd,\n\t\tflags.WithExtendedPipelineSupport(\"query\", \"query\", false, \"c8y_DeviceQueryString\"),\n\t)\n\n\t// Required flags\n\tccmd.SubCommand = subcommand.NewSubCommand(cmd)\n\n\treturn ccmd\n}", "func (client *ManagementAssociationsClient) listBySubscriptionCreateRequest(ctx context.Context, options *ManagementAssociationsListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.OperationsManagement/ManagementAssociations\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2015-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *MonitorsClient) listAPIKeysCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, options *MonitorsClientListAPIKeysOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}/listApiKeys\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateDeleteCorpGroupRequest() (request *DeleteCorpGroupRequest) {\n\trequest = &DeleteCorpGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Vcs\", \"2020-05-15\", \"DeleteCorpGroup\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (rm *resourceManager) newListRequestPayload(\n\tr *resource,\n) (*svcsdk.DescribeReplicationGroupsInput, error) {\n\tres := &svcsdk.DescribeReplicationGroupsInput{}\n\n\tif r.ko.Spec.ReplicationGroupID != nil {\n\t\tres.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID)\n\t}\n\n\treturn res, nil\n}", "func CreateGroup(params types.ContextParams, clientSet apimachinery.ClientSetInterface, groupItems []metadata.Group) []Group {\n\tresults := make([]Group, 0)\n\tfor _, grp := range groupItems {\n\n\t\tresults = append(results, &group{\n\t\t\tgrp: grp,\n\t\t\tparams: params,\n\t\t\tclientSet: clientSet,\n\t\t})\n\t}\n\n\treturn results\n}", "func (client *RedisClient) listKeysCreateRequest(ctx context.Context, resourceGroupName string, name string, options *RedisListKeysOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{name}/listKeys\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AvailabilitySetsClient) listBySubscriptionCreateRequest(ctx context.Context, options *AvailabilitySetsListBySubscriptionOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *MonitorsClient) listMonitoredResourcesCreateRequest(ctx context.Context, resourceGroupName string, monitorName string, options *MonitorsClientListMonitoredResourcesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Datadog/monitors/{monitorName}/listMonitoredResources\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif monitorName == \"\" {\n\t\treturn nil, errors.New(\"parameter monitorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{monitorName}\", url.PathEscape(monitorName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ClustersClient) listCreateRequest(ctx context.Context, options *ClustersListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.OperationalInsights/clusters\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *SapMonitorsClient) listCreateRequest(ctx context.Context, options *SapMonitorsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.HanaOnAzure/sapMonitors\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-02-07-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (c *UMemClient) NewCreateUMemcacheGroupRequest() *CreateUMemcacheGroupRequest {\n\treq := &CreateUMemcacheGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func CreateCreateApplicationGroupRequest() (request *CreateApplicationGroupRequest) {\n\trequest = &CreateApplicationGroupRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"oos\", \"2019-06-01\", \"CreateApplicationGroup\", \"oos\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *CloudServicesClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *CloudServicesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DicomServicesClient) listByWorkspaceCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, options *DicomServicesClientListByWorkspaceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/workspaces/{workspaceName}/dicomservices\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ReplicationsClient) listCreateRequest(ctx context.Context, resourceGroupName string, registryName string, options *ReplicationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *KustoOperationsClient) listCreateRequest(ctx context.Context, options *KustoOperationsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.Synapse/kustooperations\"\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DevicesClient) listRegistrationKeyCreateRequest(ctx context.Context, resourceGroupName string, deviceName string, options *DevicesClientListRegistrationKeyOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HybridNetwork/devices/{deviceName}/listRegistrationKey\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif deviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter deviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{deviceName}\", url.PathEscape(deviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagedClustersClient) listClusterMonitoringUserCredentialsCreateRequest(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientListClusterMonitoringUserCredentialsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *WorkflowsClient) listByStorageSyncServiceCreateRequest(ctx context.Context, resourceGroupName string, storageSyncServiceName string, options *WorkflowsClientListByStorageSyncServiceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/storageSyncServices/{storageSyncServiceName}/workflows\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif storageSyncServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageSyncServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storageSyncServiceName}\", url.PathEscape(storageSyncServiceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *LongTermRetentionManagedInstanceBackupsClient) listByResourceGroupDatabaseCreateRequest(ctx context.Context, resourceGroupName string, locationName string, managedInstanceName string, databaseName string, options *LongTermRetentionManagedInstanceBackupsListByResourceGroupDatabaseOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/locations/{locationName}/longTermRetentionManagedInstances/{managedInstanceName}/longTermRetentionDatabases/{databaseName}/longTermRetentionManagedInstanceBackups\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif locationName == \"\" {\n\t\treturn nil, errors.New(\"parameter locationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{locationName}\", url.PathEscape(locationName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif databaseName == \"\" {\n\t\treturn nil, errors.New(\"parameter databaseName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{databaseName}\", url.PathEscape(databaseName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.OnlyLatestPerDatabase != nil {\n\t\treqQP.Set(\"onlyLatestPerDatabase\", strconv.FormatBool(*options.OnlyLatestPerDatabase))\n\t}\n\tif options != nil && options.DatabaseState != nil {\n\t\treqQP.Set(\"databaseState\", string(*options.DatabaseState))\n\t}\n\treqQP.Set(\"api-version\", \"2020-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *RecordSetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, privateZoneName string, options *RecordSetsListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateZoneName}/ALL\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif privateZoneName == \"\" {\n\t\treturn nil, errors.New(\"parameter privateZoneName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{privateZoneName}\", url.PathEscape(privateZoneName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Recordsetnamesuffix != nil {\n\t\treqQP.Set(\"$recordsetnamesuffix\", *options.Recordsetnamesuffix)\n\t}\n\treqQP.Set(\"api-version\", \"2020-06-01\")\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateDescribeContainerGroupMetricRequest() (request *DescribeContainerGroupMetricRequest) {\n\trequest = &DescribeContainerGroupMetricRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Eci\", \"2018-08-08\", \"DescribeContainerGroupMetric\", \"eci\", \"openAPI\")\n\treturn\n}" ]
[ "0.69496083", "0.6796391", "0.67800164", "0.67291677", "0.6685094", "0.65846455", "0.65812635", "0.6464833", "0.64444846", "0.64438474", "0.6417571", "0.6413473", "0.6367035", "0.6345273", "0.6328532", "0.6325075", "0.63092667", "0.6294302", "0.6265202", "0.6218708", "0.62121326", "0.61779267", "0.6159489", "0.61585087", "0.6141909", "0.6141015", "0.6134294", "0.6133518", "0.61290765", "0.6117258", "0.6116931", "0.61121714", "0.6100779", "0.6088963", "0.6086512", "0.6065806", "0.6063861", "0.6050626", "0.6049295", "0.60322016", "0.6028864", "0.59935635", "0.5909858", "0.5892871", "0.58404547", "0.58390075", "0.5838152", "0.5836992", "0.5817903", "0.5776243", "0.571107", "0.56981635", "0.56693554", "0.56627107", "0.5629798", "0.5610828", "0.5542878", "0.54183555", "0.54072577", "0.5387532", "0.53609294", "0.5336984", "0.5300042", "0.5275028", "0.5237035", "0.5234658", "0.52239335", "0.52175343", "0.52085775", "0.51974654", "0.5178501", "0.51559615", "0.5152492", "0.51495236", "0.5123344", "0.5111615", "0.50889766", "0.5083543", "0.5080035", "0.50710946", "0.5070843", "0.50666684", "0.5050772", "0.5041552", "0.5029419", "0.5022239", "0.50211686", "0.50165427", "0.5014785", "0.50112116", "0.5006811", "0.4999826", "0.4992793", "0.49797168", "0.49795702", "0.49787295", "0.4977172", "0.4971931", "0.49700576", "0.49593434" ]
0.78612286
0
listByManagementGroupHandleResponse handles the ListByManagementGroup response.
func (client *PolicyDefinitionsClient) listByManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsListByManagementGroupResponse, error) { result := PolicyDefinitionsListByManagementGroupResponse{RawResponse: resp} if err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinitionListResult); err != nil { return PolicyDefinitionsListByManagementGroupResponse{}, runtime.NewResponseError(err, resp) } return result, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client UsageDetailsClient) ListByManagementGroupResponder(resp *http.Response) (result UsageDetailsListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *ManagedInstancesClient) listByResourceGroupHandleResponse(resp *http.Response) (ManagedInstancesClientListByResourceGroupResponse, error) {\n\tresult := ManagedInstancesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceListResult); err != nil {\n\t\treturn ManagedInstancesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SQLVirtualMachinesClient) listBySQLVMGroupHandleResponse(resp *http.Response) (SQLVirtualMachinesClientListBySQLVMGroupResponse, error) {\n\tresult := SQLVirtualMachinesClientListBySQLVMGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListResult); err != nil {\n\t\treturn SQLVirtualMachinesClientListBySQLVMGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MonitorsClient) listByResourceGroupHandleResponse(resp *http.Response) (MonitorsClientListByResourceGroupResponse, error) {\n\tresult := MonitorsClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MonitorResourceListResponse); err != nil {\n\t\treturn MonitorsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *RedisClient) listByResourceGroupHandleResponse(resp *http.Response) (RedisListByResourceGroupResponse, error) {\n\tresult := RedisListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RedisListResult); err != nil {\n\t\treturn RedisListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ManagedClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (ManagedClustersClientListByResourceGroupResponse, error) {\n\tresult := ManagedClustersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedClusterListResult); err != nil {\n\t\treturn ManagedClustersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DedicatedHostsClient) listByHostGroupHandleResponse(resp *azcore.Response) (DedicatedHostListResultResponse, error) {\n\tvar val *DedicatedHostListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DedicatedHostListResultResponse{}, err\n\t}\n\treturn DedicatedHostListResultResponse{RawResponse: resp.Response, DedicatedHostListResult: val}, nil\n}", "func (client *GroupClient) listByServiceHandleResponse(resp *http.Response) (GroupListByServiceResponse, error) {\n\tresult := GroupListByServiceResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GroupCollection); err != nil {\n\t\treturn GroupListByServiceResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ContainerGroupsClient) listByResourceGroupHandleResponse(resp *http.Response) (ContainerGroupsClientListByResourceGroupResponse, error) {\n\tresult := ContainerGroupsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ContainerGroupListResult); err != nil {\n\t\treturn ContainerGroupsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DedicatedHostsClient) listByHostGroupHandleResponse(resp *http.Response) (DedicatedHostsListByHostGroupResponse, error) {\n\tresult := DedicatedHostsListByHostGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DedicatedHostListResult); err != nil {\n\t\treturn DedicatedHostsListByHostGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *ClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (ClustersListByResourceGroupResponse, error) {\n\tresult := ClustersListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ClusterListResult); err != nil {\n\t\treturn ClustersListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *CustomAssessmentAutomationsClient) listByResourceGroupHandleResponse(resp *http.Response) (CustomAssessmentAutomationsListByResourceGroupResponse, error) {\n\tresult := CustomAssessmentAutomationsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomAssessmentAutomationsListResult); err != nil {\n\t\treturn CustomAssessmentAutomationsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *DevicesClient) listByResourceGroupHandleResponse(resp *http.Response) (DevicesClientListByResourceGroupResponse, error) {\n\tresult := DevicesClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DeviceListResult); err != nil {\n\t\treturn DevicesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SQLVirtualMachinesClient) listByResourceGroupHandleResponse(resp *http.Response) (SQLVirtualMachinesClientListByResourceGroupResponse, error) {\n\tresult := SQLVirtualMachinesClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListResult); err != nil {\n\t\treturn SQLVirtualMachinesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (ClustersClientListByResourceGroupResponse, error) {\n\tresult := ClustersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ClusterListResult); err != nil {\n\t\treturn ClustersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ServersClient) listByResourceGroupHandleResponse(resp *http.Response) (ServersClientListByResourceGroupResponse, error) {\n\tresult := ServersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerListResult); err != nil {\n\t\treturn ServersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualRoutersClient) listByResourceGroupHandleResponse(resp *http.Response) (VirtualRoutersClientListByResourceGroupResponse, error) {\n\tresult := VirtualRoutersClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualRouterListResult); err != nil {\n\t\treturn VirtualRoutersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SyncGroupsClient) listByDatabaseHandleResponse(resp *http.Response) (SyncGroupsClientListByDatabaseResponse, error) {\n\tresult := SyncGroupsClientListByDatabaseResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SyncGroupListResult); err != nil {\n\t\treturn SyncGroupsClientListByDatabaseResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WorkspacesClient) listByResourceGroupHandleResponse(resp *http.Response) (WorkspacesListByResourceGroupResponse, error) {\n\tresult := WorkspacesListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WorkspaceListResult); err != nil {\n\t\treturn WorkspacesListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *DiskEncryptionSetsClient) listByResourceGroupHandleResponse(resp *http.Response) (DiskEncryptionSetsListByResourceGroupResponse, error) {\n\tresult := DiskEncryptionSetsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DiskEncryptionSetList); err != nil {\n\t\treturn DiskEncryptionSetsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *HardwareComponentGroupsClient) listByDeviceHandleResponse(resp *http.Response) (HardwareComponentGroupsClientListByDeviceResponse, error) {\n\tresult := HardwareComponentGroupsClientListByDeviceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.HardwareComponentGroupList); err != nil {\n\t\treturn HardwareComponentGroupsClientListByDeviceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *AvailabilityGroupListenersClient) listByGroupHandleResponse(resp *http.Response) (AvailabilityGroupListenersClientListByGroupResponse, error) {\n\tresult := AvailabilityGroupListenersClientListByGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilityGroupListenerListResult); err != nil {\n\t\treturn AvailabilityGroupListenersClientListByGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CassandraClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (CassandraClustersClientListByResourceGroupResponse, error) {\n\tresult := CassandraClustersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListClusters); err != nil {\n\t\treturn CassandraClustersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachinesClient) listByResourceGroupHandleResponse(resp *http.Response) (VirtualMachinesClientListByResourceGroupResponse, error) {\n\tresult := VirtualMachinesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachinesList); err != nil {\n\t\treturn VirtualMachinesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client UsageDetailsClient) ListByManagementGroupSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (client *MetricAlertsClient) listByResourceGroupHandleResponse(resp *http.Response) (MetricAlertsClientListByResourceGroupResponse, error) {\n\tresult := MetricAlertsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MetricAlertResourceCollection); err != nil {\n\t\treturn MetricAlertsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ContainerGroupsClient) listHandleResponse(resp *http.Response) (ContainerGroupsClientListResponse, error) {\n\tresult := ContainerGroupsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ContainerGroupListResult); err != nil {\n\t\treturn ContainerGroupsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client AccountClient) ListByResourceGroupResponder(resp *http.Response) (result AccountResourceDescriptionList, err error) {\n err = autorest.Respond(\n resp,\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }", "func (client *LocalRulestacksClient) listByResourceGroupHandleResponse(resp *http.Response) (LocalRulestacksClientListByResourceGroupResponse, error) {\n\tresult := LocalRulestacksClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.LocalRulestackResourceListResult); err != nil {\n\t\treturn LocalRulestacksClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ServersClient) listByResourceGroupHandleResponse(resp *http.Response) (ServersClientListByResourceGroupResponse, error) {\n\tresult := ServersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ServerList); err != nil {\n\t\treturn ServersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualNetworkTapsClient) listByResourceGroupHandleResponse(resp *azcore.Response) (VirtualNetworkTapListResultResponse, error) {\n\tvar val *VirtualNetworkTapListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn VirtualNetworkTapListResultResponse{}, err\n\t}\n\treturn VirtualNetworkTapListResultResponse{RawResponse: resp.Response, VirtualNetworkTapListResult: val}, nil\n}", "func (client *CapacityReservationsClient) listByCapacityReservationGroupHandleResponse(resp *http.Response) (CapacityReservationsListByCapacityReservationGroupResponse, error) {\n\tresult := CapacityReservationsListByCapacityReservationGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CapacityReservationListResult); err != nil {\n\t\treturn CapacityReservationsListByCapacityReservationGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client DataControllersClient) ListInGroupResponder(resp *http.Response) (result PageOfDataControllerResource, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client IotHubResourceClient) ListByResourceGroupResponder(resp *http.Response) (result IotHubDescriptionListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client UsageDetailsClient) ListForBillingPeriodByManagementGroupResponder(resp *http.Response) (result UsageDetailsListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *AccountsClient) listByResourceGroupHandleResponse(resp *http.Response) (AccountsClientListByResourceGroupResponse, error) {\n\tresult := AccountsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AccountListResult); err != nil {\n\t\treturn AccountsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WebAppsClient) listByResourceGroupHandleResponse(resp *http.Response) (WebAppsListByResourceGroupResponse, error) {\n\tresult := WebAppsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebAppCollection); err != nil {\n\t\treturn WebAppsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineImageTemplatesClient) listByResourceGroupHandleResponse(resp *http.Response) (VirtualMachineImageTemplatesClientListByResourceGroupResponse, error) {\n\tresult := VirtualMachineImageTemplatesClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ImageTemplateListResult); err != nil {\n\t\treturn VirtualMachineImageTemplatesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CapacitiesClient) listByResourceGroupHandleResponse(resp *http.Response) (CapacitiesClientListByResourceGroupResponse, error) {\n\tresult := CapacitiesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DedicatedCapacities); err != nil {\n\t\treturn CapacitiesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DataCollectionEndpointsClient) listByResourceGroupHandleResponse(resp *http.Response) (DataCollectionEndpointsListByResourceGroupResponse, error) {\n\tresult := DataCollectionEndpointsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResourceListResult); err != nil {\n\t\treturn DataCollectionEndpointsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *AlertProcessingRulesClient) listByResourceGroupHandleResponse(resp *http.Response) (AlertProcessingRulesClientListByResourceGroupResponse, error) {\n\tresult := AlertProcessingRulesClientListByResourceGroupResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"x-ms-request-id\"); val != \"\" {\n\t\tresult.XMSRequestID = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AlertProcessingRulesList); err != nil {\n\t\treturn AlertProcessingRulesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ConnectedEnvironmentsClient) listByResourceGroupHandleResponse(resp *http.Response) (ConnectedEnvironmentsClientListByResourceGroupResponse, error) {\n\tresult := ConnectedEnvironmentsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ConnectedEnvironmentCollection); err != nil {\n\t\treturn ConnectedEnvironmentsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client CloudEndpointsClient) ListBySyncGroupResponder(resp *http.Response) (result CloudEndpointArray, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *IPAllocationsClient) listByResourceGroupHandleResponse(resp *http.Response) (IPAllocationsClientListByResourceGroupResponse, error) {\n\tresult := IPAllocationsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IPAllocationListResult); err != nil {\n\t\treturn IPAllocationsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client Client) ListByResourceGroupResponder(resp *http.Response) (result ListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *DataCollectionEndpointsClient) listByResourceGroupHandleResponse(resp *http.Response) (DataCollectionEndpointsClientListByResourceGroupResponse, error) {\n\tresult := DataCollectionEndpointsClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DataCollectionEndpointResourceListResult); err != nil {\n\t\treturn DataCollectionEndpointsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IotSecuritySolutionClient) listByResourceGroupHandleResponse(resp *http.Response) (IotSecuritySolutionClientListByResourceGroupResponse, error) {\n\tresult := IotSecuritySolutionClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IoTSecuritySolutionsList); err != nil {\n\t\treturn IotSecuritySolutionClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client WorkloadNetworksClient) ListVMGroupsResponder(resp *http.Response) (result WorkloadNetworkVMGroupsList, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client UsageDetailsClient) ListByManagementGroup(ctx context.Context, managementGroupID string, expand string, filter string, skiptoken string, top *int32, apply string) (result UsageDetailsListResultPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/UsageDetailsClient.ListByManagementGroup\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.udlr.Response.Response != nil {\n\t\t\t\tsc = result.udlr.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: top,\n\t\t\tConstraints: []validation.Constraint{{Target: \"top\", Name: validation.Null, Rule: false,\n\t\t\t\tChain: []validation.Constraint{{Target: \"top\", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},\n\t\t\t\t\t{Target: \"top\", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},\n\t\t\t\t}}}}}); err != nil {\n\t\treturn result, validation.NewError(\"consumption.UsageDetailsClient\", \"ListByManagementGroup\", err.Error())\n\t}\n\n\tresult.fn = client.listByManagementGroupNextResults\n\treq, err := client.ListByManagementGroupPreparer(ctx, managementGroupID, expand, filter, skiptoken, top, apply)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListByManagementGroup\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListByManagementGroupSender(req)\n\tif err != nil {\n\t\tresult.udlr.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListByManagementGroup\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.udlr, err = client.ListByManagementGroupResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"consumption.UsageDetailsClient\", \"ListByManagementGroup\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (client *PolicyDefinitionsClient) getAtManagementGroupHandleResponse(resp *http.Response) (PolicyDefinitionsGetAtManagementGroupResponse, error) {\n\tresult := PolicyDefinitionsGetAtManagementGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PolicyDefinition); err != nil {\n\t\treturn PolicyDefinitionsGetAtManagementGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SchemaRegistryClient) listByNamespaceHandleResponse(resp *http.Response) (SchemaRegistryClientListByNamespaceResponse, error) {\n\tresult := SchemaRegistryClientListByNamespaceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SchemaGroupListResult); err != nil {\n\t\treturn SchemaRegistryClientListByNamespaceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *CertificateOrdersClient) listByResourceGroupHandleResponse(resp *http.Response) (CertificateOrdersClientListByResourceGroupResponse, error) {\n\tresult := CertificateOrdersClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CertificateOrderCollection); err != nil {\n\t\treturn CertificateOrdersClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SpatialAnchorsAccountsClient) listByResourceGroupHandleResponse(resp *http.Response) (SpatialAnchorsAccountsClientListByResourceGroupResponse, error) {\n\tresult := SpatialAnchorsAccountsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SpatialAnchorsAccountPage); err != nil {\n\t\treturn SpatialAnchorsAccountsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PrivateDNSZoneGroupsClient) listHandleResponse(resp *http.Response) (PrivateDNSZoneGroupsClientListResponse, error) {\n\tresult := PrivateDNSZoneGroupsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PrivateDNSZoneGroupListResult); err != nil {\n\t\treturn PrivateDNSZoneGroupsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *FactoriesClient) listByResourceGroupHandleResponse(resp *http.Response) (FactoriesClientListByResourceGroupResponse, error) {\n\tresult := FactoriesClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.FactoryListResponse); err != nil {\n\t\treturn FactoriesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client LabClient) ListByResourceGroupResponder(resp *http.Response) (result ResponseWithContinuationLab, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *LongTermRetentionManagedInstanceBackupsClient) listByResourceGroupDatabaseHandleResponse(resp *http.Response) (LongTermRetentionManagedInstanceBackupsListByResourceGroupDatabaseResponse, error) {\n\tresult := LongTermRetentionManagedInstanceBackupsListByResourceGroupDatabaseResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceLongTermRetentionBackupListResult); err != nil {\n\t\treturn LongTermRetentionManagedInstanceBackupsListByResourceGroupDatabaseResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client PolicyStatesClient) ListQueryResultsForManagementGroupResponder(resp *http.Response) (result PolicyStatesQueryResults, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *AFDOriginsClient) listByOriginGroupHandleResponse(resp *http.Response) (AFDOriginsClientListByOriginGroupResponse, error) {\n\tresult := AFDOriginsClientListByOriginGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AFDOriginListResult); err != nil {\n\t\treturn AFDOriginsClientListByOriginGroupResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedInstancesClient) listByManagedInstanceHandleResponse(resp *http.Response) (ManagedInstancesClientListByManagedInstanceResponse, error) {\n\tresult := ManagedInstancesClientListByManagedInstanceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.TopQueriesListResult); err != nil {\n\t\treturn ManagedInstancesClientListByManagedInstanceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *PeeringPoliciesClient) listByManagedNetworkHandleResponse(resp *http.Response) (PeeringPoliciesClientListByManagedNetworkResponse, error) {\n\tresult := PeeringPoliciesClientListByManagedNetworkResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PeeringPolicyListResult); err != nil {\n\t\treturn PeeringPoliciesClientListByManagedNetworkResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client UsageDetailsClient) ListByManagementGroupComplete(ctx context.Context, managementGroupID string, expand string, filter string, skiptoken string, top *int32, apply string) (result UsageDetailsListResultIterator, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/UsageDetailsClient.ListByManagementGroup\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response().Response.Response != nil {\n\t\t\t\tsc = result.page.Response().Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.page, err = client.ListByManagementGroup(ctx, managementGroupID, expand, filter, skiptoken, top, apply)\n\treturn\n}", "func (client *ReplicationProtectionContainersClient) listByReplicationFabricsHandleResponse(resp *http.Response) (ReplicationProtectionContainersClientListByReplicationFabricsResponse, error) {\n\tresult := ReplicationProtectionContainersClientListByReplicationFabricsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProtectionContainerCollection); err != nil {\n\t\treturn ReplicationProtectionContainersClientListByReplicationFabricsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client IotHubResourceClient) ListEventHubConsumerGroupsResponder(resp *http.Response) (result EventHubConsumerGroupsListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *LongTermRetentionManagedInstanceBackupsClient) listByResourceGroupInstanceHandleResponse(resp *http.Response) (LongTermRetentionManagedInstanceBackupsListByResourceGroupInstanceResponse, error) {\n\tresult := LongTermRetentionManagedInstanceBackupsListByResourceGroupInstanceResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceLongTermRetentionBackupListResult); err != nil {\n\t\treturn LongTermRetentionManagedInstanceBackupsListByResourceGroupInstanceResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client FirewallPolicyRuleGroupsClient) ListResponder(resp *http.Response) (result FirewallPolicyRuleGroupListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *PermissionBindingsClient) listByNamespaceHandleResponse(resp *http.Response) (PermissionBindingsClientListByNamespaceResponse, error) {\n\tresult := PermissionBindingsClientListByNamespaceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.PermissionBindingsListResult); err != nil {\n\t\treturn PermissionBindingsClientListByNamespaceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *SyncGroupsClient) listLogsHandleResponse(resp *http.Response) (SyncGroupsClientListLogsResponse, error) {\n\tresult := SyncGroupsClientListLogsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SyncGroupLogListResult); err != nil {\n\t\treturn SyncGroupsClientListLogsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KpiClient) listByHubHandleResponse(resp *http.Response) (KpiClientListByHubResponse, error) {\n\tresult := KpiClientListByHubResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KpiListResult); err != nil {\n\t\treturn KpiClientListByHubResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedInstancesClient) listByInstancePoolHandleResponse(resp *http.Response) (ManagedInstancesClientListByInstancePoolResponse, error) {\n\tresult := ManagedInstancesClientListByInstancePoolResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceListResult); err != nil {\n\t\treturn ManagedInstancesClientListByInstancePoolResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *WorkflowsClient) listByStorageSyncServiceHandleResponse(resp *http.Response) (WorkflowsClientListByStorageSyncServiceResponse, error) {\n\tresult := WorkflowsClientListByStorageSyncServiceResponse{}\n\tif val := resp.Header.Get(\"x-ms-request-id\"); val != \"\" {\n\t\tresult.XMSRequestID = &val\n\t}\n\tif val := resp.Header.Get(\"x-ms-correlation-request-id\"); val != \"\" {\n\t\tresult.XMSCorrelationRequestID = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WorkflowArray); err != nil {\n\t\treturn WorkflowsClientListByStorageSyncServiceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *MonitorsClient) listHandleResponse(resp *http.Response) (MonitorsClientListResponse, error) {\n\tresult := MonitorsClientListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.MonitorResourceListResponse); err != nil {\n\t\treturn MonitorsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationProtectionContainersClient) listHandleResponse(resp *http.Response) (ReplicationProtectionContainersClientListResponse, error) {\n\tresult := ReplicationProtectionContainersClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ProtectionContainerCollection); err != nil {\n\t\treturn ReplicationProtectionContainersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client DataControllersClient) ListInGroup(ctx context.Context, resourceGroupName string) (result PageOfDataControllerResourcePage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/DataControllersClient.ListInGroup\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.podcr.Response.Response != nil {\n\t\t\t\tsc = result.podcr.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listInGroupNextResults\n\treq, err := client.ListInGroupPreparer(ctx, resourceGroupName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"azuredata.DataControllersClient\", \"ListInGroup\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListInGroupSender(req)\n\tif err != nil {\n\t\tresult.podcr.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"azuredata.DataControllersClient\", \"ListInGroup\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.podcr, err = client.ListInGroupResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"azuredata.DataControllersClient\", \"ListInGroup\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.podcr.hasNextLink() && result.podcr.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}", "func (client *SyncGroupsClient) listSyncDatabaseIDsHandleResponse(resp *http.Response) (SyncGroupsClientListSyncDatabaseIDsResponse, error) {\n\tresult := SyncGroupsClientListSyncDatabaseIDsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SyncDatabaseIDListResult); err != nil {\n\t\treturn SyncGroupsClientListSyncDatabaseIDsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedDatabasesClient) listByInstanceHandleResponse(resp *http.Response) (ManagedDatabasesClientListByInstanceResponse, error) {\n\tresult := ManagedDatabasesClientListByInstanceResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedDatabaseListResult); err != nil {\n\t\treturn ManagedDatabasesClientListByInstanceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *VirtualMachineScaleSetsClient) listHandleResponse(resp *http.Response) (VirtualMachineScaleSetsListResponse, error) {\n\tresult := VirtualMachineScaleSetsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetListResult); err != nil {\n\t\treturn VirtualMachineScaleSetsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client PolicyStatesClient) ListQueryResultsForManagementGroupSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func handleGetGroups(c *Context, w http.ResponseWriter, r *http.Request) {\n\tpaging, err := parsePaging(r.URL)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to parse paging parameters\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\twithInstallationCount, err := parseBool(r.URL, model.ShowInstallationCountQueryParameter, false)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to parse request parameters\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tfilter := &model.GroupFilter{\n\t\tPaging: paging,\n\t\tWithInstallationCount: withInstallationCount,\n\t}\n\n\tgroups, err := c.Store.GetGroupDTOs(filter)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to query groups\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif groups == nil {\n\t\tgroups = []*model.GroupDTO{}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\toutputJSON(c, w, groups)\n}", "func (client *GroupMgmtClient) List(path string) (interface{}, error) {\n\tlistResp, err := client.ListFromParams(path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn listResp, nil\n}", "func (client *LongTermRetentionManagedInstanceBackupsClient) getByResourceGroupHandleResponse(resp *http.Response) (LongTermRetentionManagedInstanceBackupsGetByResourceGroupResponse, error) {\n\tresult := LongTermRetentionManagedInstanceBackupsGetByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceLongTermRetentionBackup); err != nil {\n\t\treturn LongTermRetentionManagedInstanceBackupsGetByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *GalleryImagesClient) listByGalleryHandleResponse(resp *http.Response) (GalleryImagesClientListByGalleryResponse, error) {\n\tresult := GalleryImagesClientListByGalleryResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GalleryImageList); err != nil {\n\t\treturn GalleryImagesClientListByGalleryResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *DicomServicesClient) listByWorkspaceHandleResponse(resp *http.Response) (DicomServicesClientListByWorkspaceResponse, error) {\n\tresult := DicomServicesClientListByWorkspaceResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DicomServiceCollection); err != nil {\n\t\treturn DicomServicesClientListByWorkspaceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ReplicationvCentersClient) listByReplicationFabricsHandleResponse(resp *http.Response) (ReplicationvCentersClientListByReplicationFabricsResponse, error) {\n\tresult := ReplicationvCentersClientListByReplicationFabricsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VCenterCollection); err != nil {\n\t\treturn ReplicationvCentersClientListByReplicationFabricsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (c *GroupController) List(ctx *app.ListGroupContext) error {\n\t// GroupController_List: start_implement\n\n\tdataStore := &dal.DataStore{}\n\tdataStore.GetSession()\n\t// Close the session\n\tdefer dataStore.Close()\n\tdc := dal.NewDalGroup(dataStore)\n\n\tgroups, err := dc.FetchAll()\n\n\tif err != nil {\n\t\tctx.ResponseData.Service.LogError(\"InternalServerError\", \"req_id\", middleware.ContextRequestID(ctx), \"ctrl\", \"Group\", \"action\", \"List\", ctx.RequestData.Request.Method, ctx.RequestData.Request.URL, \"databaseError\", err.Error())\n\t\treturn ctx.InternalServerError()\n\t}\n\n\tres := make(app.GwentapiGroupCollection, len(*groups))\n\n\tlastModified := time.Time{}\n\tfor i, group := range *groups {\n\t\tg, _ := factory.CreateGroup(&group)\n\n\t\tif lastModified.Before(group.Last_Modified) {\n\t\t\tlastModified = group.Last_Modified\n\t\t}\n\n\t\tres[i] = g\n\t}\n\n\t// GroupController_List: end_implement\n\thelpers.LastModified(ctx.ResponseData, lastModified)\n\tif ctx.IfModifiedSince != nil {\n\t\tif !helpers.IsModified(*ctx.IfModifiedSince, lastModified) {\n\t\t\treturn ctx.NotModified()\n\t\t}\n\t}\n\treturn ctx.OK(res)\n}", "func (client *SapMonitorsClient) listHandleResponse(resp *http.Response) (SapMonitorsClientListResponse, error) {\n\tresult := SapMonitorsClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SapMonitorListResult); err != nil {\n\t\treturn SapMonitorsClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) {\n\tr := client.NewRequest(http.MethodGet, \"/compute/azure/group\")\n\tresp, err := client.RequireOK(s.Client.Do(ctx, r))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tgs, err := groupsFromHttpResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ListGroupsOutput{Groups: gs}, nil\n}", "func (client *RecordSetsClient) listHandleResponse(resp *azcore.Response) (RecordSetListResultResponse, error) {\n\tvar val *RecordSetListResult\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn RecordSetListResultResponse{}, err\n\t}\n\treturn RecordSetListResultResponse{RawResponse: resp.Response, RecordSetListResult: val}, nil\n}", "func (client IdentityClient) ListDynamicGroups(ctx context.Context, request ListDynamicGroupsRequest) (response ListDynamicGroupsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listDynamicGroups, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListDynamicGroupsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListDynamicGroupsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListDynamicGroupsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListDynamicGroupsResponse\")\n\t}\n\treturn\n}", "func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) {\n\tr := client.NewRequest(http.MethodGet, \"/azure/compute/group\")\n\tresp, err := client.RequireOK(s.Client.Do(ctx, r))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tgs, err := groupsFromHttpResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ListGroupsOutput{Groups: gs}, nil\n}", "func (client DataControllersClient) ListInGroupSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}", "func (client IdentityClient) listDynamicGroups(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/dynamicGroups\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListDynamicGroupsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (c *Console) List(group string) (err error) {\n\t_, err = fmt.Fprintf(c.conn, \"%v\\n\", toJSON([]string{\"list\", group}))\n\tif err == nil {\n\t\terr = <-c.Waiter\n\t}\n\treturn\n}", "func (client *VirtualMachineScaleSetVMRunCommandsClient) listHandleResponse(resp *http.Response) (VirtualMachineScaleSetVMRunCommandsListResponse, error) {\n\tresult := VirtualMachineScaleSetVMRunCommandsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineRunCommandsListResult); err != nil {\n\t\treturn VirtualMachineScaleSetVMRunCommandsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *SyncGroupsClient) listHubSchemasHandleResponse(resp *http.Response) (SyncGroupsClientListHubSchemasResponse, error) {\n\tresult := SyncGroupsClientListHubSchemasResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SyncFullSchemaPropertiesListResult); err != nil {\n\t\treturn SyncGroupsClientListHubSchemasResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedClustersClient) listHandleResponse(resp *http.Response) (ManagedClustersClientListResponse, error) {\n\tresult := ManagedClustersClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedClusterListResult); err != nil {\n\t\treturn ManagedClustersClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *ManagedInstancesClient) listHandleResponse(resp *http.Response) (ManagedInstancesClientListResponse, error) {\n\tresult := ManagedInstancesClientListResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedInstanceListResult); err != nil {\n\t\treturn ManagedInstancesClientListResponse{}, err\n\t}\n\treturn result, nil\n}", "func List(c messagebird.Client, options *messagebird.PaginationRequest) (*Groups, error) {\n\tgroupList := &Groups{}\n\tif err := c.Request(groupList, http.MethodGet, path+\"?\"+options.QueryParams(), nil); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn groupList, nil\n}", "func (client *AvailabilitySetsClient) listHandleResponse(resp *http.Response) (AvailabilitySetsListResponse, error) {\n\tresult := AvailabilitySetsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AvailabilitySetListResult); err != nil {\n\t\treturn AvailabilitySetsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (client *GalleryImageVersionsClient) listByGalleryImageHandleResponse(resp *azcore.Response) (GalleryImageVersionListResponse, error) {\n\tvar val *GalleryImageVersionList\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn GalleryImageVersionListResponse{}, err\n\t}\n\treturn GalleryImageVersionListResponse{RawResponse: resp.Response, GalleryImageVersionList: val}, nil\n}" ]
[ "0.7555561", "0.70824856", "0.70370066", "0.70273715", "0.7022419", "0.70222783", "0.70171577", "0.6928565", "0.6868567", "0.6856361", "0.6823024", "0.6822074", "0.68173724", "0.6772575", "0.677189", "0.67256725", "0.67240644", "0.67218256", "0.6714559", "0.67127115", "0.6708223", "0.66908777", "0.6687579", "0.66784835", "0.6672505", "0.66629374", "0.6636304", "0.66255164", "0.6620682", "0.6620363", "0.66161877", "0.6613272", "0.66020626", "0.6545836", "0.65427524", "0.65222245", "0.6521667", "0.652135", "0.65189075", "0.6517604", "0.65159607", "0.6498094", "0.64969933", "0.64955544", "0.6483556", "0.6482563", "0.6478493", "0.6472962", "0.64690405", "0.64225835", "0.6411927", "0.64091724", "0.6380853", "0.63379073", "0.62817556", "0.6260271", "0.6184453", "0.61503977", "0.6103927", "0.60424834", "0.6015009", "0.6011624", "0.60011196", "0.59753925", "0.59326655", "0.592703", "0.58493847", "0.58215153", "0.57955366", "0.5792208", "0.57914877", "0.57738245", "0.5760144", "0.5748455", "0.5731226", "0.5725642", "0.5688913", "0.5686198", "0.56794214", "0.567926", "0.56763154", "0.5673888", "0.5672037", "0.56335247", "0.56227857", "0.5612983", "0.56021714", "0.56000686", "0.5598056", "0.55749774", "0.5569196", "0.556728", "0.5565844", "0.5560484", "0.55519915", "0.55475044", "0.5538871", "0.5527458", "0.5520178", "0.55006224" ]
0.8073399
0