hexsha
stringlengths 40
40
| size
int64 2
991k
| ext
stringclasses 2
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
208
| max_stars_repo_name
stringlengths 6
106
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
33.5k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
208
| max_issues_repo_name
stringlengths 6
106
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
16.3k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
208
| max_forks_repo_name
stringlengths 6
106
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
6.91k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
991k
| avg_line_length
float64 1
36k
| max_line_length
int64 1
977k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9e4faf7951a7efa2653c10412f98f949a47765a6 | 6,481 | ex | Elixir | clients/speech/lib/google_api/speech/v1/api/speech.ex | chingor13/elixir-google-api | 85e13fa25c4c9f4618bb463ab4c79245fc6d2a7b | [
"Apache-2.0"
] | 1 | 2018-12-03T23:43:10.000Z | 2018-12-03T23:43:10.000Z | clients/speech/lib/google_api/speech/v1/api/speech.ex | chingor13/elixir-google-api | 85e13fa25c4c9f4618bb463ab4c79245fc6d2a7b | [
"Apache-2.0"
] | null | null | null | clients/speech/lib/google_api/speech/v1/api/speech.ex | chingor13/elixir-google-api | 85e13fa25c4c9f4618bb463ab4c79245fc6d2a7b | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the elixir code generator program.
# Do not edit the class manually.
defmodule GoogleApi.Speech.V1.Api.Speech do
@moduledoc """
API calls for all endpoints tagged `Speech`.
"""
alias GoogleApi.Speech.V1.Connection
alias GoogleApi.Gax.{Request, Response}
@doc """
Performs asynchronous speech recognition: receive results via the
google.longrunning.Operations interface. Returns either an
`Operation.error` or an `Operation.response` which contains
a `LongRunningRecognizeResponse` message.
For more information on asynchronous speech recognition, see the
[how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
## Parameters
* `connection` (*type:* `GoogleApi.Speech.V1.Connection.t`) - Connection to server
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:$.xgafv` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:body` (*type:* `GoogleApi.Speech.V1.Model.LongRunningRecognizeRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Speech.V1.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec speech_speech_longrunningrecognize(Tesla.Env.client(), keyword(), keyword()) ::
{:ok, GoogleApi.Speech.V1.Model.Operation.t()} | {:error, Tesla.Env.t()}
def speech_speech_longrunningrecognize(connection, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/v1/speech:longrunningrecognize", %{})
|> Request.add_optional_params(optional_params_config, optional_params)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Speech.V1.Model.Operation{}])
end
@doc """
Performs synchronous speech recognition: receive results after all audio
has been sent and processed.
## Parameters
* `connection` (*type:* `GoogleApi.Speech.V1.Connection.t`) - Connection to server
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:$.xgafv` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:body` (*type:* `GoogleApi.Speech.V1.Model.RecognizeRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Speech.V1.Model.RecognizeResponse{}}` on success
* `{:error, info}` on failure
"""
@spec speech_speech_recognize(Tesla.Env.client(), keyword(), keyword()) ::
{:ok, GoogleApi.Speech.V1.Model.RecognizeResponse.t()} | {:error, Tesla.Env.t()}
def speech_speech_recognize(connection, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/v1/speech:recognize", %{})
|> Request.add_optional_params(optional_params_config, optional_params)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Speech.V1.Model.RecognizeResponse{}])
end
end
| 45.640845 | 196 | 0.647894 |
9e4fcd230ed632b1c9eb782d3b2ded8d90de53e5 | 12,215 | exs | Elixir | apps/ewallet/test/ewallet/validators/transaction_consumption_validator_test.exs | jimpeebles/ewallet | ad4a9750ec8dc5adc4c0dfe6c22f0ef760825405 | [
"Apache-2.0"
] | null | null | null | apps/ewallet/test/ewallet/validators/transaction_consumption_validator_test.exs | jimpeebles/ewallet | ad4a9750ec8dc5adc4c0dfe6c22f0ef760825405 | [
"Apache-2.0"
] | null | null | null | apps/ewallet/test/ewallet/validators/transaction_consumption_validator_test.exs | jimpeebles/ewallet | ad4a9750ec8dc5adc4c0dfe6c22f0ef760825405 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 OmiseGO Pte Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
defmodule EWallet.TransactionConsumptionValidatorTest do
use EWallet.DBCase, async: true
import EWalletDB.Factory
alias EWallet.{TestEndpoint, TransactionConsumptionValidator}
alias EWalletDB.{Account, Repo, TransactionConsumption, TransactionRequest, User}
alias ActivityLogger.System
describe "validate_before_consumption/3" do
test "expires a transaction request if past expiration date" do
now = NaiveDateTime.utc_now()
request =
insert(:transaction_request, expiration_date: NaiveDateTime.add(now, -60, :second))
wallet = request.wallet
{:error, error} =
TransactionConsumptionValidator.validate_before_consumption(request, wallet, %{})
assert error == :expired_transaction_request
end
test "returns expiration reason if transaction request has expired" do
{:ok, request} = :transaction_request |> insert() |> TransactionRequest.expire(%System{})
wallet = request.wallet
{:error, error} =
TransactionConsumptionValidator.validate_before_consumption(request, wallet, %{})
assert error == :expired_transaction_request
end
test "returns unauthorized_amount_override amount when attempting to override illegally" do
request = insert(:transaction_request, allow_amount_override: false)
wallet = request.wallet
{:error, error} =
TransactionConsumptionValidator.validate_before_consumption(request, wallet, %{
"amount" => 100
})
assert error == :unauthorized_amount_override
end
test "returns the request, token and amount" do
request = insert(:transaction_request)
wallet = request.wallet
{:ok, request, token, amount} =
TransactionConsumptionValidator.validate_before_consumption(request, wallet, %{})
assert request.status == "valid"
assert token.uuid == request.token_uuid
assert amount == nil
end
end
describe "validate_before_confirmation/2" do
setup do
{:ok, pid} = TestEndpoint.start_link()
on_exit(fn ->
ref = Process.monitor(pid)
assert_receive {:DOWN, ^ref, _, _, _}
end)
:ok
end
test "returns unauthorized if the request is not owned by user" do
{:ok, user} = :user |> params_for() |> User.insert()
consumption = :transaction_consumption |> insert() |> Repo.preload([:transaction_request])
{status, res} =
TransactionConsumptionValidator.validate_before_confirmation(consumption, %{
end_user: user
})
assert status == :error
assert res == :unauthorized
end
test "returns unauthorized if the request is not owned by account" do
{:ok, account} = :account |> params_for() |> Account.insert()
consumption = :transaction_consumption |> insert() |> Repo.preload([:transaction_request])
{status, res} =
TransactionConsumptionValidator.validate_before_confirmation(consumption, %{
end_user: account
})
assert status == :error
assert res == :unauthorized
end
test "expires request if past expiration date" do
now = NaiveDateTime.utc_now()
{:ok, user} = :user |> params_for() |> User.insert()
wallet = User.get_primary_wallet(user)
request =
insert(
:transaction_request,
expiration_date: NaiveDateTime.add(now, -60, :second),
account_uuid: nil,
user_uuid: user.uuid,
wallet: wallet
)
consumption =
:transaction_consumption
|> insert(transaction_request_uuid: request.uuid)
|> Repo.preload([:transaction_request])
{status, res} =
TransactionConsumptionValidator.validate_before_confirmation(consumption, %{
end_user: user
})
assert status == :error
assert res == :expired_transaction_request
end
test "returns expiration reason if expired request" do
{:ok, user} = :user |> params_for() |> User.insert()
wallet = User.get_primary_wallet(user)
request =
insert(
:transaction_request,
status: "expired",
expiration_reason: "max_consumptions_reached",
account_uuid: nil,
user_uuid: user.uuid,
wallet: wallet
)
consumption =
:transaction_consumption
|> insert(transaction_request_uuid: request.uuid)
|> Repo.preload([:transaction_request])
{status, res} =
TransactionConsumptionValidator.validate_before_confirmation(consumption, %{
end_user: user
})
assert status == :error
assert res == :max_consumptions_reached
end
test "returns max_consumptions_per_user_reached if the max has been reached" do
{:ok, user_1} = :user |> params_for() |> User.insert()
{:ok, user_2} = :user |> params_for() |> User.insert()
wallet_1 = User.get_primary_wallet(user_1)
wallet_2 = User.get_primary_wallet(user_2)
request =
insert(
:transaction_request,
max_consumptions_per_user: 1,
account_uuid: nil,
user_uuid: user_1.uuid,
wallet: wallet_1
)
_consumption =
:transaction_consumption
|> insert(
account_uuid: nil,
user_uuid: user_2.uuid,
wallet_address: wallet_2.address,
transaction_request_uuid: request.uuid,
status: "confirmed"
)
consumption =
:transaction_consumption
|> insert(
account_uuid: nil,
wallet_address: wallet_2.address,
user_uuid: user_2.uuid,
transaction_request_uuid: request.uuid
)
|> Repo.preload([:transaction_request, :wallet])
{status, res} =
TransactionConsumptionValidator.validate_before_confirmation(consumption, %{
end_user: user_1
})
assert status == :error
assert res == :max_consumptions_per_user_reached
end
test "expires consumption if past expiration" do
now = NaiveDateTime.utc_now()
{:ok, user} = :user |> params_for() |> User.insert()
wallet = User.get_primary_wallet(user)
request =
insert(:transaction_request, account_uuid: nil, user_uuid: user.uuid, wallet: wallet)
consumption =
:transaction_consumption
|> insert(
expiration_date: NaiveDateTime.add(now, -60, :second),
transaction_request_uuid: request.uuid
)
|> Repo.preload([:transaction_request])
{status, res} =
TransactionConsumptionValidator.validate_before_confirmation(consumption, %{
end_user: user
})
assert status == :error
assert res == :expired_transaction_consumption
end
test "returns expired_transaction_consumption if the consumption has expired" do
{:ok, user} = :user |> params_for() |> User.insert()
wallet = User.get_primary_wallet(user)
request =
insert(:transaction_request, account_uuid: nil, user_uuid: user.uuid, wallet: wallet)
consumption =
:transaction_consumption
|> insert(status: "expired", transaction_request_uuid: request.uuid)
|> Repo.preload([:transaction_request])
{status, res} =
TransactionConsumptionValidator.validate_before_confirmation(consumption, %{
end_user: user
})
assert status == :error
assert res == :expired_transaction_consumption
end
test "returns the consumption if valid" do
{:ok, user} = :user |> params_for() |> User.insert()
wallet = User.get_primary_wallet(user)
request =
insert(:transaction_request, account_uuid: nil, user_uuid: user.uuid, wallet: wallet)
consumption =
:transaction_consumption
|> insert(transaction_request_uuid: request.uuid)
|> Repo.preload([:transaction_request])
{status, res} =
TransactionConsumptionValidator.validate_before_confirmation(consumption, %{
end_user: user
})
assert status == :ok
assert %TransactionConsumption{} = res
assert res.status == "pending"
end
end
describe "get_and_validate_token/2" do
test "returns the request's token if nil is passed" do
request = insert(:transaction_request)
{:ok, token} = TransactionConsumptionValidator.get_and_validate_token(request, nil)
assert token.uuid == request.token_uuid
end
test "returns a token_not_found error if given not existing token" do
request = insert(:transaction_request)
{:error, code} = TransactionConsumptionValidator.get_and_validate_token(request, "fake")
assert code == :token_not_found
end
test "returns a invalid_token_provided error if given a different token without pair" do
request = insert(:transaction_request)
token = insert(:token)
{:error, code} = TransactionConsumptionValidator.get_and_validate_token(request, token.id)
assert code == :exchange_pair_not_found
end
test "returns a invalid_token_provided error if given a different token with pair" do
token_1 = insert(:token)
token_2 = insert(:token)
_pair = insert(:exchange_pair, from_token: token_1, to_token: token_2)
request = insert(:transaction_request, token_uuid: token_1.uuid)
{:error, code} = TransactionConsumptionValidator.get_and_validate_token(request, token_2.id)
assert code == :exchange_pair_not_found
end
test "returns the specified token if valid" do
request = :transaction_request |> insert() |> Repo.preload([:token])
token = request.token
{:ok, token} = TransactionConsumptionValidator.get_and_validate_token(request, token.id)
assert token.uuid == request.token_uuid
end
end
describe "validate_max_consumptions_per_user/2" do
test "returns the wallet if max_consumptions_per_user is not set" do
request = insert(:transaction_request)
wallet = insert(:wallet)
{status, res} =
TransactionConsumptionValidator.validate_max_consumptions_per_user(request, wallet)
assert status == :ok
assert res == wallet
end
test "returns the wallet if the request is for an account" do
{:ok, account} = :account |> params_for() |> Account.insert()
wallet = Account.get_primary_wallet(account)
request = insert(:transaction_request, max_consumptions_per_user: 0)
{status, res} =
TransactionConsumptionValidator.validate_max_consumptions_per_user(request, wallet)
assert status == :ok
assert res == wallet
end
test "returns the wallet if the current number of active consumptions is lower
than the max_consumptions_per_user" do
{:ok, user} = :user |> params_for() |> User.insert()
wallet = User.get_primary_wallet(user)
request = insert(:transaction_request, max_consumptions_per_user: 1)
{status, res} =
TransactionConsumptionValidator.validate_max_consumptions_per_user(request, wallet)
assert status == :ok
assert res == wallet
end
test "returns max_consumptions_per_user_reached when it has been reached" do
{:ok, user} = :user |> params_for() |> User.insert()
wallet = User.get_primary_wallet(user)
request = insert(:transaction_request, max_consumptions_per_user: 0)
{status, res} =
TransactionConsumptionValidator.validate_max_consumptions_per_user(request, wallet)
assert status == :error
assert res == :max_consumptions_per_user_reached
end
end
end
| 32.573333 | 98 | 0.66885 |
9e4ffe3621bd553be7fc7e58858eee617ee2ab8b | 776 | exs | Elixir | test/soft_delete_migration_test.exs | petlove/ecto_soft_delete | 9a99a80037aa1dd726ca317b398bda4b778d0bf1 | [
"MIT"
] | null | null | null | test/soft_delete_migration_test.exs | petlove/ecto_soft_delete | 9a99a80037aa1dd726ca317b398bda4b778d0bf1 | [
"MIT"
] | null | null | null | test/soft_delete_migration_test.exs | petlove/ecto_soft_delete | 9a99a80037aa1dd726ca317b398bda4b778d0bf1 | [
"MIT"
] | null | null | null | defmodule Ecto.SoftDelete.Migration.Test do
use ExUnit.Case
use Ecto.Migration
alias Ecto.SoftDelete.Test.Repo
import Ecto.SoftDelete.Migration
alias Ecto.Migration.Runner
setup meta do
:ok = Ecto.Adapters.SQL.Sandbox.checkout(Repo)
{:ok, runner} = Runner.start_link({self(), Repo, __MODULE__, meta[:direction] || :forward, :up, %{level: false, sql: false}})
Runner.metadata(runner, meta)
{:ok, runner: runner}
end
test "soft_delete_columns adds deleted_at column", %{runner: runner} do
create table(:posts, primary_key: false) do
soft_delete_columns()
end
[create_command] = Agent.get(runner, & &1.commands)
flush()
assert {:create, _,
[{:add, :deleted_at, :utc_datetime, []}]} = create_command
end
end
| 27.714286 | 129 | 0.68299 |
9e504d2e5b62ab5c40eca102117698fdb08e7491 | 2,464 | exs | Elixir | priv/repo/migrations/20190306191610_fix_timestamps_type_all.exs | alchexmist/erlnote | e1f164e63616316e1d3869ebfae5ed2ae96c3ccd | [
"Apache-2.0"
] | null | null | null | priv/repo/migrations/20190306191610_fix_timestamps_type_all.exs | alchexmist/erlnote | e1f164e63616316e1d3869ebfae5ed2ae96c3ccd | [
"Apache-2.0"
] | 1 | 2019-11-02T13:46:12.000Z | 2019-11-02T13:46:12.000Z | priv/repo/migrations/20190306191610_fix_timestamps_type_all.exs | alchexmist/erlnote | e1f164e63616316e1d3869ebfae5ed2ae96c3ccd | [
"Apache-2.0"
] | null | null | null | defmodule Erlnote.Repo.Migrations.FixTimestampsTypeAll do
use Ecto.Migration
def change do
# execute "ALTER TABLE boards_users ALTER COLUMN inserted_at DROP NOT NULL"
# execute "ALTER TABLE boards_users ALTER COLUMN updated_at DROP NOT NULL"
alter table(:boards_users) do
remove :inserted_at
remove :updated_at
flush()
timestamps(type: :utc_datetime)
end
# execute "ALTER TABLE boards ALTER COLUMN inserted_at DROP NOT NULL"
# execute "ALTER TABLE boards ALTER COLUMN updated_at DROP NOT NULL"
alter table(:boards) do
remove :inserted_at
remove :updated_at
flush()
timestamps(type: :utc_datetime)
end
# execute "ALTER TABLE notes_tags ALTER COLUMN inserted_at DROP NOT NULL"
# execute "ALTER TABLE notes_tags ALTER COLUMN updated_at DROP NOT NULL"
alter table(:notes_tags) do
remove :inserted_at
remove :updated_at
flush()
timestamps(type: :utc_datetime)
end
# execute "ALTER TABLE notes_users ALTER COLUMN inserted_at DROP NOT NULL"
# execute "ALTER TABLE notes_users ALTER COLUMN updated_at DROP NOT NULL"
alter table(:notes_users) do
remove :inserted_at
remove :updated_at
flush()
timestamps(type: :utc_datetime)
end
# execute "ALTER TABLE notes ALTER COLUMN inserted_at DROP NOT NULL"
# execute "ALTER TABLE notes ALTER COLUMN updated_at DROP NOT NULL"
alter table(:notes) do
remove :inserted_at
remove :updated_at
flush()
timestamps(type: :utc_datetime)
end
# execute "ALTER TABLE notepads_tags ALTER COLUMN inserted_at DROP NOT NULL"
# execute "ALTER TABLE notepads_tags ALTER COLUMN updated_at DROP NOT NULL"
alter table(:notepads_tags) do
remove :inserted_at
remove :updated_at
flush()
timestamps(type: :utc_datetime)
end
# execute "ALTER TABLE notepads ALTER COLUMN inserted_at DROP NOT NULL"
# execute "ALTER TABLE notepads ALTER COLUMN updated_at DROP NOT NULL"
alter table(:notepads) do
remove :inserted_at
remove :updated_at
flush()
timestamps(type: :utc_datetime)
end
# execute "ALTER TABLE tags ALTER COLUMN inserted_at DROP NOT NULL"
# execute "ALTER TABLE tags ALTER COLUMN updated_at DROP NOT NULL"
alter table(:tags) do
remove :inserted_at
remove :updated_at
flush()
timestamps(type: :utc_datetime)
end
end
end
| 31.589744 | 80 | 0.696429 |
9e504d523af35b00aaa0f0526af633615a131ab0 | 9,538 | ex | Elixir | lib/level/groups.ex | davecremins/level | cc06b86d32e9bef954a199dc4a2b47561815fd4b | [
"Apache-2.0"
] | null | null | null | lib/level/groups.ex | davecremins/level | cc06b86d32e9bef954a199dc4a2b47561815fd4b | [
"Apache-2.0"
] | null | null | null | lib/level/groups.ex | davecremins/level | cc06b86d32e9bef954a199dc4a2b47561815fd4b | [
"Apache-2.0"
] | null | null | null | defmodule Level.Groups do
@moduledoc """
The Groups context.
"""
import Ecto.Query, warn: false
import Level.Gettext
import Ecto.Changeset, only: [change: 2, unique_constraint: 3]
alias Ecto.Multi
alias Level.Pubsub
alias Level.Repo
alias Level.Groups.Group
alias Level.Groups.GroupBookmark
alias Level.Groups.GroupUser
alias Level.Spaces.SpaceUser
alias Level.Users.User
@behaviour Level.DataloaderSource
@doc """
Generate the query for listing all accessible groups.
"""
@spec groups_base_query(SpaceUser.t()) :: Ecto.Query.t()
@spec groups_base_query(User.t()) :: Ecto.Query.t()
def groups_base_query(%SpaceUser{id: space_user_id, space_id: space_id}) do
from g in Group,
where: g.space_id == ^space_id,
left_join: gu in GroupUser,
on: gu.group_id == g.id and gu.space_user_id == ^space_user_id,
where: g.is_private == false or (g.is_private == true and not is_nil(gu.id))
end
def groups_base_query(%User{id: user_id}) do
from g in Group,
distinct: g.id,
join: su in SpaceUser,
on: su.space_id == g.space_id and su.user_id == ^user_id,
left_join: gu in GroupUser,
on: gu.group_id == g.id,
left_join: gsu in SpaceUser,
on: gu.space_user_id == gsu.id and gsu.user_id == ^user_id,
where:
g.is_private == false or
(g.is_private == true and not is_nil(gu.id) and not is_nil(gsu.id))
end
@doc """
Fetches a group by id.
"""
@spec get_group(SpaceUser.t(), String.t()) :: {:ok, Group.t()} | {:error, String.t()}
@spec get_group(User.t(), String.t()) :: {:ok, Group.t()} | {:error, String.t()}
def get_group(%SpaceUser{space_id: space_id} = member, id) do
case Repo.get_by(Group, id: id, space_id: space_id) do
%Group{} = group ->
if group.is_private do
case get_group_membership(group, member) do
{:ok, _} ->
{:ok, group}
_ ->
{:error, dgettext("errors", "Group not found")}
end
else
{:ok, group}
end
_ ->
{:error, dgettext("errors", "Group not found")}
end
end
def get_group(%User{} = user, id) do
case Repo.get_by(groups_base_query(user), id: id) do
%Group{} = group ->
{:ok, group}
_ ->
{:error, dgettext("errors", "Group not found")}
end
end
@doc """
Creates a group.
"""
@spec create_group(SpaceUser.t(), map()) ::
{:ok, %{group: Group.t(), group_user: GroupUser.t(), bookmarked: boolean()}}
| {:error, :group | :group_user | :bookmarked, any(),
%{optional(:group | :group_user | :bookmarked) => any()}}
def create_group(space_user, params \\ %{}) do
params_with_relations =
params
|> Map.put(:space_id, space_user.space_id)
|> Map.put(:creator_id, space_user.id)
changeset = Group.create_changeset(%Group{}, params_with_relations)
Multi.new()
|> Multi.insert(:group, changeset)
|> Multi.run(:group_user, fn %{group: group} ->
create_group_membership(group, space_user)
end)
|> Multi.run(:bookmarked, fn %{group: group} ->
case bookmark_group(group, space_user) do
:ok -> {:ok, true}
_ -> {:ok, false}
end
end)
|> Repo.transaction()
end
@doc """
Updates a group.
"""
@spec update_group(Group.t(), map()) :: {:ok, Group.t()} | {:error, Ecto.Changeset.t()}
def update_group(group, params) do
group
|> Group.update_changeset(params)
|> Repo.update()
|> after_update_group()
end
defp after_update_group({:ok, %Group{id: id} = group} = result) do
Pubsub.publish(:group_updated, id, group)
result
end
defp after_update_group(err), do: err
@doc """
Fetches a group membership by group and user.
"""
@spec get_group_membership(Group.t(), SpaceUser.t()) ::
{:ok, GroupUser.t()} | {:error, String.t()}
def get_group_membership(%Group{id: group_id}, %SpaceUser{id: space_user_id}) do
case Repo.get_by(GroupUser, space_user_id: space_user_id, group_id: group_id) do
%GroupUser{} = group_user ->
{:ok, group_user}
_ ->
{:error, dgettext("errors", "The user is a not a group member")}
end
end
@doc """
Lists featured group memberships (for display in the sidebar).
Currently returns the top ten users, ordered alphabetically.
"""
@spec list_featured_memberships(Group.t()) :: {:ok, [GroupUser.t()]} | no_return()
def list_featured_memberships(group) do
base_query =
from gu in GroupUser,
where: gu.group_id == ^group.id,
join: u in assoc(gu, :user),
select: %{gu | last_name: u.last_name}
query =
from gu in subquery(base_query),
order_by: {:asc, gu.last_name},
limit: 10
{:ok, Repo.all(query)}
end
@doc """
Creates a group membership.
"""
@spec create_group_membership(Group.t(), SpaceUser.t()) ::
{:ok, %{group_user: GroupUser.t(), bookmarked: boolean()}}
| {:error, :group_user | :bookmarked, any(),
%{optional(:group_user | :bookmarked) => any()}}
def create_group_membership(group, space_user) do
params = %{
space_id: group.space_id,
group_id: group.id,
space_user_id: space_user.id
}
Multi.new()
|> Multi.insert(:group_user, GroupUser.changeset(%GroupUser{}, params))
|> Multi.run(:bookmarked, fn _ ->
case bookmark_group(group, space_user) do
:ok -> {:ok, true}
_ -> {:ok, false}
end
end)
|> Repo.transaction()
end
@doc """
Deletes a group membership.
"""
@spec delete_group_membership(GroupUser.t()) ::
{:ok, GroupUser.t()} | {:error, Ecto.Changeset.t()}
def delete_group_membership(group_user) do
Repo.delete(group_user)
end
@doc """
Updates group membership state.
"""
@spec update_group_membership(Group.t(), SpaceUser.t(), String.t()) ::
{:ok, GroupUser.t()} | {:error, GroupUser.t(), Ecto.Changeset.t()}
def update_group_membership(group, space_user, state) do
case {get_group_membership(group, space_user), state} do
{{:ok, group_user}, "NOT_SUBSCRIBED"} ->
case delete_group_membership(group_user) do
{:ok, _} ->
group_user = not_subscribed_membership(group.space_id, space_user, group)
Pubsub.publish(:group_membership_updated, group.id, group_user)
{:ok, group_user}
{:error, changeset} ->
{:error, group_user, changeset}
end
{{:error, _}, "SUBSCRIBED"} ->
case create_group_membership(group, space_user) do
{:ok, %{group_user: group_user}} ->
Pubsub.publish(:group_membership_updated, group.id, group_user)
{:ok, group_user}
{:error, _, %Ecto.Changeset{} = changeset, _} ->
{:error, not_subscribed_membership(group.space_id, space_user, group), changeset}
end
{{:ok, group_user}, _} ->
{:ok, group_user}
{{:error, _}, _} ->
{:ok, not_subscribed_membership(group.space_id, space_user, group)}
end
end
defp not_subscribed_membership(space_id, space_user, group) do
%GroupUser{
state: "NOT_SUBSCRIBED",
space_id: space_id,
space_user: space_user,
group: group
}
end
@doc """
Bookmarks a group.
"""
@spec bookmark_group(Group.t(), SpaceUser.t()) :: :ok | {:error, String.t()}
def bookmark_group(group, space_user) do
params = %{
space_id: group.space_id,
space_user_id: space_user.id,
group_id: group.id
}
changeset =
%GroupBookmark{}
|> change(params)
|> unique_constraint(:uniqueness, name: :group_bookmarks_space_user_id_group_id_index)
case Repo.insert(changeset, on_conflict: :nothing) do
{:ok, _} ->
Pubsub.publish(:group_bookmarked, space_user.id, group)
:ok
{:error, %Ecto.Changeset{errors: [uniqueness: _]}} ->
:ok
{:error, _} ->
{:error, dgettext("errors", "An unexpected error occurred")}
end
end
@doc """
Unbookmarks a group.
"""
@spec unbookmark_group(Group.t(), SpaceUser.t()) :: :ok | no_return()
def unbookmark_group(group, space_user) do
{count, _} =
Repo.delete_all(
from b in GroupBookmark,
where: b.space_user_id == ^space_user.id and b.group_id == ^group.id
)
if count > 0 do
Pubsub.publish(:group_unbookmarked, space_user.id, group)
end
:ok
end
@doc """
Lists all bookmarked groups.
"""
@spec list_bookmarked_groups(SpaceUser.t()) :: [Group.t()] | no_return()
def list_bookmarked_groups(space_user) do
space_user
|> groups_base_query
|> join(
:inner,
[g],
b in GroupBookmark,
b.group_id == g.id and b.space_user_id == ^space_user.id
)
|> Repo.all()
end
@doc """
Closes a group.
"""
@spec close_group(Group.t()) :: {:ok, Group.t()} | {:error, Ecto.Changeset.t()}
def close_group(group) do
group
|> Ecto.Changeset.change(state: "CLOSED")
|> Repo.update()
end
@impl true
def dataloader_data(%{current_user: _user} = params) do
Dataloader.Ecto.new(Repo, query: &dataloader_query/2, default_params: params)
end
def dataloader_data(_), do: raise("authentication required")
@impl true
def dataloader_query(Group, %{current_user: user}), do: groups_base_query(user)
def dataloader_query(_, _), do: raise("query not valid for this context")
end
| 28.81571 | 93 | 0.613022 |
9e5055c6498de0c012a3c80cd30344e5f2e49998 | 1,658 | ex | Elixir | clients/kratos/elixir/lib/ory/model/ui_node_attributes.ex | ory/sdk-generator | 958314d130922ad6f20f439b5230141a832231a5 | [
"Apache-2.0"
] | null | null | null | clients/kratos/elixir/lib/ory/model/ui_node_attributes.ex | ory/sdk-generator | 958314d130922ad6f20f439b5230141a832231a5 | [
"Apache-2.0"
] | null | null | null | clients/kratos/elixir/lib/ory/model/ui_node_attributes.ex | ory/sdk-generator | 958314d130922ad6f20f439b5230141a832231a5 | [
"Apache-2.0"
] | null | null | null | # NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
# https://openapi-generator.tech
# Do not edit the class manually.
defmodule Ory.Model.UiNodeAttributes do
@moduledoc """
"""
@derive [Poison.Encoder]
defstruct [
:"disabled",
:"label",
:"name",
:"node_type",
:"onclick",
:"pattern",
:"required",
:"type",
:"value",
:"id",
:"text",
:"height",
:"src",
:"width",
:"href",
:"title",
:"async",
:"crossorigin",
:"integrity",
:"nonce",
:"referrerpolicy"
]
@type t :: %__MODULE__{
:"disabled" => boolean(),
:"label" => Ory.Model.UiText.t | nil,
:"name" => String.t,
:"node_type" => String.t,
:"onclick" => String.t | nil,
:"pattern" => String.t | nil,
:"required" => boolean() | nil,
:"type" => String.t,
:"value" => AnyType | nil,
:"id" => String.t,
:"text" => Ory.Model.UiText.t,
:"height" => integer(),
:"src" => String.t,
:"width" => integer(),
:"href" => String.t,
:"title" => Ory.Model.UiText.t,
:"async" => boolean(),
:"crossorigin" => String.t,
:"integrity" => String.t,
:"nonce" => String.t,
:"referrerpolicy" => String.t
}
end
defimpl Poison.Decoder, for: Ory.Model.UiNodeAttributes do
import Ory.Deserializer
def decode(value, options) do
value
|> deserialize(:"label", :struct, Ory.Model.UiText, options)
|> deserialize(:"value", :struct, Ory.Model.AnyType, options)
|> deserialize(:"text", :struct, Ory.Model.UiText, options)
|> deserialize(:"title", :struct, Ory.Model.UiText, options)
end
end
| 23.352113 | 91 | 0.562726 |
9e50748af4d35473e1169e721fc4c3c9b7cf4dcd | 148 | ex | Elixir | apps/cat_feeder_web/web/controllers/page_controller.ex | wsmoak/cat_feeder | e9157563ff3294fd9fd4c9d13f956cbe1dd718c2 | [
"MIT"
] | 20 | 2016-01-16T17:08:00.000Z | 2021-01-15T15:46:23.000Z | apps/cat_feeder_web/web/controllers/page_controller.ex | wsmoak/cat_feeder | e9157563ff3294fd9fd4c9d13f956cbe1dd718c2 | [
"MIT"
] | null | null | null | apps/cat_feeder_web/web/controllers/page_controller.ex | wsmoak/cat_feeder | e9157563ff3294fd9fd4c9d13f956cbe1dd718c2 | [
"MIT"
] | 5 | 2016-01-09T16:52:44.000Z | 2017-02-09T16:03:25.000Z | defmodule CatFeederWeb.PageController do
use CatFeederWeb.Web, :controller
def index(conn, _params) do
render conn, "index.html"
end
end
| 18.5 | 40 | 0.75 |
9e508e8729d18bff2061d0d007d32f1b04fe0b78 | 136 | ex | Elixir | lib/firestorm_web/web/admin/forums/category.ex | palindrom615/firestorm | 0690493c9dcae5c04c63c5321532a7db923e5be7 | [
"MIT"
] | null | null | null | lib/firestorm_web/web/admin/forums/category.ex | palindrom615/firestorm | 0690493c9dcae5c04c63c5321532a7db923e5be7 | [
"MIT"
] | null | null | null | lib/firestorm_web/web/admin/forums/category.ex | palindrom615/firestorm | 0690493c9dcae5c04c63c5321532a7db923e5be7 | [
"MIT"
] | 1 | 2020-03-20T12:58:37.000Z | 2020-03-20T12:58:37.000Z | defmodule FirestormWeb.ExAdmin.Forums.Category do
use ExAdmin.Register
register_resource FirestormWeb.Forums.Category do
end
end
| 19.428571 | 51 | 0.830882 |
9e5095a2f45ee618734a2465f03e08286d9734a2 | 1,797 | exs | Elixir | lib/mix/test/mix/tasks/compile_test.exs | ekosz/elixir | 62e375bc711b4072e1b68de776e96cc31f571d45 | [
"Apache-2.0"
] | 1 | 2017-10-29T16:37:08.000Z | 2017-10-29T16:37:08.000Z | lib/mix/test/mix/tasks/compile_test.exs | ekosz/elixir | 62e375bc711b4072e1b68de776e96cc31f571d45 | [
"Apache-2.0"
] | null | null | null | lib/mix/test/mix/tasks/compile_test.exs | ekosz/elixir | 62e375bc711b4072e1b68de776e96cc31f571d45 | [
"Apache-2.0"
] | null | null | null | Code.require_file "../../../test_helper", __FILE__
defmodule Mix.Tasks.CompileTest do
use MixTest.Case
test "mix compile --list without mixfile" do
in_fixture "no_mixfile", fn ->
Mix.Tasks.Compile.run ["--list"]
assert_received { :mix_shell, :info, ["\nEnabled compilers: elixir"] }
end
end
defmodule CustomApp do
def project do
[app: :custom_app, version: "0.1.0"]
end
end
defmodule CustomCompilers do
def project do
[compilers: [:elixir, :app, :custom]]
end
end
test "mix compile --list with mixfile" do
Mix.Project.push CustomApp
Mix.Tasks.Compile.run ["--list"]
assert_received { :mix_shell, :info, ["\nEnabled compilers: elixir, app"] }
assert_received { :mix_shell, :info, ["mix compile.elixir # " <> _] }
after
Mix.Project.pop
end
test "mix compile --list with custom mixfile" do
Mix.Project.push CustomCompilers
Mix.Tasks.Compile.run ["--list"]
assert_received { :mix_shell, :info, ["\nEnabled compilers: elixir, app, custom"] }
after
Mix.Project.pop
end
test "compile a project without mixfile" do
in_fixture "no_mixfile", fn ->
Mix.Tasks.Compile.run []
assert File.regular?("ebin/Elixir-A.beam")
assert_received { :mix_shell, :info, ["Compiled lib/a.ex"] }
end
after
purge [A, B, C]
end
test "compile a project with mixfile" do
Mix.Project.push CustomApp
in_fixture "no_mixfile", fn ->
Mix.Tasks.Compile.run []
assert File.regular?("ebin/Elixir-A.beam")
assert File.regular?("ebin/custom_app.app")
assert_received { :mix_shell, :info, ["Compiled lib/a.ex"] }
assert_received { :mix_shell, :info, ["Generated custom_app.app"] }
end
after
purge [A, B, C]
Mix.Project.pop
end
end | 27.227273 | 87 | 0.647746 |
9e50a0896acaa54bd265c9073c6dff1e9d7ca22d | 3,196 | ex | Elixir | lib/cryptopunk/derivation.ex | ayrat555/cryptopunk | 9d41028739dd13b762838912f9fa1e18ac1160a8 | [
"MIT"
] | 12 | 2021-10-31T07:10:48.000Z | 2022-02-21T18:13:17.000Z | lib/cryptopunk/derivation.ex | ayrat555/cryptopunk | 9d41028739dd13b762838912f9fa1e18ac1160a8 | [
"MIT"
] | null | null | null | lib/cryptopunk/derivation.ex | ayrat555/cryptopunk | 9d41028739dd13b762838912f9fa1e18ac1160a8 | [
"MIT"
] | null | null | null | defmodule Cryptopunk.Derivation do
@moduledoc """
Implements key derivation logic.
See https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
"""
alias Cryptopunk.Derivation.Path
alias Cryptopunk.Key
alias Cryptopunk.Utils
import Path, only: [is_normal: 1, is_hardened: 1]
@order 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
@spec derive(Key.t(), Path.t() | Path.raw_path()) :: Key.t()
def derive(key, %Path{} = path) do
raw_path = Path.to_raw_path(path)
derive(key, raw_path)
end
def derive(%Key{type: :public}, {:private, _}) do
raise ArgumentError, message: "Can not derive private key from public key"
end
def derive(%Key{type: :private} = key, {:public, path}) do
key
|> do_derive(path)
|> Key.public_from_private()
end
def derive(key, {_type, path}) do
do_derive(key, path)
end
def do_derive(key, []), do: key
def do_derive(%Key{chain_code: chain_code, type: :private} = private_key, [idx | tail])
when is_normal(idx) do
ser_public_key =
private_key
|> Key.public_from_private()
|> Utils.compress_public_key()
new_private_key =
chain_code
|> Utils.hmac_sha512(<<ser_public_key::binary, idx::32>>)
|> create_from_private_key(private_key, idx)
do_derive(new_private_key, tail)
end
def do_derive(%Key{chain_code: chain_code, key: key, type: :private} = private_key, [idx | tail])
when is_hardened(idx) do
new_private_key =
chain_code
|> Utils.hmac_sha512(<<0::8, key::binary, idx::32>>)
|> create_from_private_key(private_key, idx)
do_derive(new_private_key, tail)
end
def do_derive(%Key{chain_code: chain_code, type: :public} = public_key, [idx | tail])
when is_normal(idx) do
ser_public_key = Utils.compress_public_key(public_key)
new_private_key =
chain_code
|> Utils.hmac_sha512(<<ser_public_key::binary, idx::32>>)
|> create_from_public_key(public_key, idx)
do_derive(new_private_key, tail)
end
def do_derive(%Key{type: :public}, [idx | _tail]) when is_hardened(idx) do
raise ArgumentError, message: "Can not derive hardened key from public key"
end
defp create_from_public_key(
<<l_l::binary-32, l_r::binary>>,
%Key{key: key, type: :public} = parent_key,
idx
) do
{:ok, new_public_key} = ExSecp256k1.public_key_tweak_add(key, l_l)
Key.new_public(
key: new_public_key,
chain_code: l_r,
parent_key: parent_key,
index: idx
)
end
defp create_from_private_key(
<<new_key::256, new_chain::binary>>,
%Key{key: <<parent_key::256>>, type: :private} = parent_key_struct,
idx
) do
new_private_key =
new_key
|> Kernel.+(parent_key)
|> rem(@order)
|> :binary.encode_unsigned()
|> pad()
Key.new_private(
key: new_private_key,
chain_code: new_chain,
parent_key: parent_key_struct,
index: idx
)
end
defp pad(binary) when byte_size(binary) >= 32, do: binary
defp pad(binary) do
bits = (32 - byte_size(binary)) * 8
<<0::size(bits)>> <> binary
end
end
| 26.413223 | 99 | 0.652378 |
9e50cd43d4f5ba5bb494df0ade787b7914316c6f | 7,346 | ex | Elixir | lib/mix/tasks/compile.forcex.ex | Tapjoy/forcex | c51757e360f6e5bc416e758a8978f586becb7ce2 | [
"MIT"
] | null | null | null | lib/mix/tasks/compile.forcex.ex | Tapjoy/forcex | c51757e360f6e5bc416e758a8978f586becb7ce2 | [
"MIT"
] | null | null | null | lib/mix/tasks/compile.forcex.ex | Tapjoy/forcex | c51757e360f6e5bc416e758a8978f586becb7ce2 | [
"MIT"
] | 1 | 2021-08-20T08:16:03.000Z | 2021-08-20T08:16:03.000Z | defmodule Mix.Tasks.Compile.Forcex do
use Mix.Task
@recursive false
def run(_) do
{:ok, _} = Application.ensure_all_started(:forcex)
client = Forcex.Client.login
case client do
%{access_token: nil} -> IO.puts("Invalid configuration/credentials. Cannot generate SObjects.")
_ -> generate_modules(client)
end
end
defp generate_modules(client) do
client = Forcex.Client.locate_services(client)
sobjects =
client
|> Forcex.describe_global
|> Map.get("sobjects")
for sobject <- sobjects do
sobject
|> generate_module(client)
|> Code.compile_quoted
end
end
defp generate_module(sobject, client) do
name = Map.get(sobject, "name")
urls = Map.get(sobject, "urls")
describe_url = Map.get(urls, "describe")
sobject_url = Map.get(urls, "sobject")
row_template_url = Map.get(urls, "rowTemplate")
full_description = Forcex.describe_sobject(name, client)
quote location: :keep do
defmodule unquote(Module.concat(Forcex.SObject, name)) do
@moduledoc """
Dynamically generated module for `#{unquote(Map.get(full_description, "label"))}`
## Fields
#{unquote(for field <- Map.get(full_description, "fields"), do: docs_for_field(field))}
"""
@doc """
Retrieves extended metadata for `#{unquote(name)}`
See [SObject Describe](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_sobject_describe.htm)
"""
def describe(client) do
unquote(describe_url)
|> Forcex.get(client)
end
@doc """
Retrieves basic metadata for `#{unquote(name)}`
See [SObject Basic Information](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_sobject_basic_info.htm)
"""
def basic_info(client) do
unquote(sobject_url)
|> Forcex.get(client)
end
@doc """
Create a new `#{unquote(name)}`
Parameters
* `sobject` - a map of key/value pairs
See [SObject Basic Information](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_sobject_basic_info.htm)
"""
def create(sobject, client) when is_map(sobject) do
unquote(sobject_url)
|> Forcex.post(sobject, client)
end
@doc """
Update an existing `#{unquote(name)}`
Parameters
* `id` - 18 character SFDC identifier.
* `changeset` - map of key/value pairs *only* of elements changing
See [SObject Rows](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_sobject_retrieve.htm)
"""
def update(id, changeset, client) do
unquote(row_template_url)
|> String.replace("{ID}", id)
|> Forcex.patch(changeset, client)
end
@doc """
Delete an existing `#{unquote(name)}`
Parameters
* `id` - 18 character SFDC identifier
See [SObject Rows](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_sobject_retrieve.htm)
"""
def delete(id, client) do
unquote(row_template_url)
|> String.replace("{ID}", id)
|> Forcex.delete(client)
end
@doc """
Retrieve an existing `#{unquote(name)}`
Parameters
* `id` - 18 character SFDC identifier
See [SObject Rows](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_sobject_retrieve.htm)
"""
def get(id, client) do
unquote(row_template_url)
|> String.replace("{ID}", id)
|> Forcex.get(client)
end
@doc """
Retrieve the IDs of `#{unquote(name)}`s deleted between `start_date` and `end_date`
Parameters
* `start_date` - `Timex.Convertable` or ISO8601 string
* `end_date` - `Timex.Convertable` or ISO8601 string
See [SObject Get Deleted](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_getdeleted.htm)
"""
def deleted_between(start_date, end_date, client) when is_binary(start_date) and is_binary(end_date) do
params = %{"start" => start_date, "end" => end_date} |> URI.encode_query
unquote(sobject_url) <> "/deleted?#{params}"
|> Forcex.get(client)
end
def deleted_between(start_date, end_date, client) do
deleted_between(
Timex.format!(start_date, "{ISO8601z}"),
Timex.format!(end_date, "{ISO8601z}"),
client)
end
@doc """
Retrieve the IDs of `#{unquote(name)}`s updated between `start_date` and `end_date`
Parameters
* `start_date` - `Timex.Convertable` or ISO8601 string
* `end_date` - `Timex.Convertable` or ISO8601 string
See [SObject Get Updated](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_getupdated.htm)
"""
def updated_between(start_date, end_date, client) when is_binary(start_date) and is_binary(end_date) do
params = %{"start" => start_date, "end" => end_date} |> URI.encode_query
unquote(sobject_url) <> "/updated?#{params}"
|> Forcex.get(client)
end
def updated_between(start_date, end_date, client) do
updated_between(
Timex.format!(start_date, "{ISO}"),
Timex.format!(end_date, "{ISO}"),
client)
end
@doc """
Retrieve a binary field in `#{unquote(name)}`
Parameters
* `id` - 18 character SFDC identifier
* `field` - name of field with binary contents
See [SObject Blob Retrieve](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_sobject_blob_retrieve.htm)
"""
def get_blob(id, field, client) do
unquote(row_template_url) <> "/#{field}"
|> String.replace("{ID}", id)
|> Forcex.get(client)
end
@doc """
Retrieve `#{unquote(name)}` records based on external field `field` having value `value`
Parameters
* `field` - name of external field
* `value` - value of `field` for desired records
See [SObject Rows by External ID](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_sobject_upsert.htm)
"""
def by_external(field, value, client) do
unquote(sobject_url) <> "/#{field}/#{value}"
|> Forcex.get(client)
end
end
IO.puts "Generated #{unquote(Module.concat(Forcex.SObject, name))}"
end
end
defp docs_for_field(%{"name" => name, "type" => type, "label" => label, "picklistValues" => values}) when type in ["picklist", "multipicklist"] do
"""
* `#{name}` - `#{type}`, #{label}
#{for value <- values, do: docs_for_picklist_values(value)}
"""
end
defp docs_for_field(%{"name" => name, "type" => type, "label" => label}) do
"* `#{name}` - `#{type}`, #{label}\n"
end
defp docs_for_picklist_values(%{"value" => value, "active" => true}) do
" * `#{value}`\n"
end
defp docs_for_picklist_values(_), do: ""
end
| 33.543379 | 148 | 0.610536 |
9e50fed833d85a2eb9e121c0ffb49129daa1e1a1 | 2,297 | ex | Elixir | lib/xema/castable/helper.ex | garret-smith/xema | f9629d66591644d53ad4cb9c7a97de8b10f6fa5e | [
"MIT"
] | null | null | null | lib/xema/castable/helper.ex | garret-smith/xema | f9629d66591644d53ad4cb9c7a97de8b10f6fa5e | [
"MIT"
] | null | null | null | lib/xema/castable/helper.ex | garret-smith/xema | f9629d66591644d53ad4cb9c7a97de8b10f6fa5e | [
"MIT"
] | null | null | null | defmodule Xema.Castable.Helper do
@moduledoc false
import Xema.Utils, only: [to_existing_atom: 1]
defmacro __using__(_) do
quote do
import Xema.Castable.Helper
import Xema.Utils, only: [to_existing_atom: 1]
alias Xema.Schema
def cast(value, %Schema{type: :any}), do: {:ok, value}
def cast(value, %Schema{type: type})
when is_boolean(type),
do: {:ok, value}
def cast(value, %Schema{type: type, module: module} = schema),
do: cast(value, type, module, schema)
def cast(atom, types, module, schema) when is_list(types),
do:
types
|> Stream.map(fn type -> cast(atom, type, module, schema) end)
|> Enum.find(%{to: types, value: atom}, fn
{:ok, _} -> true
{:error, _} -> false
end)
end
end
def to_integer(str, type) when type in [:integer, :number] do
case Integer.parse(str) do
{int, ""} -> {:ok, int}
_ -> {:error, %{to: type, value: str}}
end
end
def to_float(str, type) when type in [:float, :number] do
case Float.parse(str) do
{int, ""} -> {:ok, int}
_ -> {:error, %{to: type, value: str}}
end
end
def module(module) do
if module == nil, do: :struct, else: module
end
def check_keyword(list, to) do
case Keyword.keyword?(list) do
true -> :ok
false -> {:error, %{to: to, value: list}}
end
end
def cast_key(value, :atoms) when is_binary(value) do
case to_existing_atom(value) do
nil -> :error
cast -> {:ok, cast}
end
end
def cast_key(value, :strings) when is_atom(value),
do: {:ok, Atom.to_string(value)}
def cast_key(value, _),
do: {:ok, value}
def to_struct(module, values) do
{:ok, struct!(module, values)}
rescue
error in KeyError ->
{:error, %{to: module, key: error.key, value: values}}
error in ArgumentError ->
{:error, %{to: module, value: values, error: error}}
end
def fields(map) do
Enum.reduce_while(map, {:ok, %{}}, fn {key, value}, {:ok, acc} ->
case cast_key(key, :atoms) do
{:ok, key} ->
{:cont, {:ok, Map.put(acc, key, value)}}
:error ->
{:halt, {:error, %{to: :struct, key: key}}}
end
end)
end
end
| 24.698925 | 72 | 0.561167 |
9e51225cd3e0efe32f6dfa93f9939f2a8b5f864a | 2,983 | ex | Elixir | clients/drive/lib/google_api/drive/v3/model/reply.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | 1 | 2018-12-03T23:43:10.000Z | 2018-12-03T23:43:10.000Z | clients/drive/lib/google_api/drive/v3/model/reply.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | clients/drive/lib/google_api/drive/v3/model/reply.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the elixir code generator program.
# Do not edit the class manually.
defmodule GoogleApi.Drive.V3.Model.Reply do
@moduledoc """
A reply to a comment on a file.
## Attributes
* `action` (*type:* `String.t`, *default:* `nil`) - The action the reply performed to the parent comment. Valid values are:
- resolve
- reopen
* `author` (*type:* `GoogleApi.Drive.V3.Model.User.t`, *default:* `nil`) - The user who created the reply.
* `content` (*type:* `String.t`, *default:* `nil`) - The plain text content of the reply. This field is used for setting the content, while htmlContent should be displayed. This is required on creates if no action is specified.
* `createdTime` (*type:* `DateTime.t`, *default:* `nil`) - The time at which the reply was created (RFC 3339 date-time).
* `deleted` (*type:* `boolean()`, *default:* `nil`) - Whether the reply has been deleted. A deleted reply has no content.
* `htmlContent` (*type:* `String.t`, *default:* `nil`) - The content of the reply with HTML formatting.
* `id` (*type:* `String.t`, *default:* `nil`) - The ID of the reply.
* `kind` (*type:* `String.t`, *default:* `drive#reply`) - Identifies what kind of resource this is. Value: the fixed string "drive#reply".
* `modifiedTime` (*type:* `DateTime.t`, *default:* `nil`) - The last time the reply was modified (RFC 3339 date-time).
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:action => String.t(),
:author => GoogleApi.Drive.V3.Model.User.t(),
:content => String.t(),
:createdTime => DateTime.t(),
:deleted => boolean(),
:htmlContent => String.t(),
:id => String.t(),
:kind => String.t(),
:modifiedTime => DateTime.t()
}
field(:action)
field(:author, as: GoogleApi.Drive.V3.Model.User)
field(:content)
field(:createdTime, as: DateTime)
field(:deleted)
field(:htmlContent)
field(:id)
field(:kind)
field(:modifiedTime, as: DateTime)
end
defimpl Poison.Decoder, for: GoogleApi.Drive.V3.Model.Reply do
def decode(value, options) do
GoogleApi.Drive.V3.Model.Reply.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Drive.V3.Model.Reply do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 40.863014 | 231 | 0.669125 |
9e5164e97b9bbc10c939b9fcacaa3f09dd3f8b48 | 29,777 | ex | Elixir | clients/compute/lib/google_api/compute/v1/api/resource_policies.ex | kolorahl/elixir-google-api | 46bec1e092eb84c6a79d06c72016cb1a13777fa6 | [
"Apache-2.0"
] | null | null | null | clients/compute/lib/google_api/compute/v1/api/resource_policies.ex | kolorahl/elixir-google-api | 46bec1e092eb84c6a79d06c72016cb1a13777fa6 | [
"Apache-2.0"
] | null | null | null | clients/compute/lib/google_api/compute/v1/api/resource_policies.ex | kolorahl/elixir-google-api | 46bec1e092eb84c6a79d06c72016cb1a13777fa6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Compute.V1.Api.ResourcePolicies do
@moduledoc """
API calls for all endpoints tagged `ResourcePolicies`.
"""
alias GoogleApi.Compute.V1.Connection
alias GoogleApi.Gax.{Request, Response}
@library_version Mix.Project.config() |> Keyword.get(:version, "")
@doc """
Retrieves an aggregated list of resource policies.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:filter` (*type:* `String.t`) - A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `>`, or `<`.
For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.
You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.
To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true) ```
* `:includeAllScopes` (*type:* `boolean()`) - Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.
* `:maxResults` (*type:* `integer()`) - The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)
* `:orderBy` (*type:* `String.t`) - Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.
You can also sort results in descending order based on the creation timestamp using `orderBy="creationTimestamp desc"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.
Currently, only sorting by `name` or `creationTimestamp desc` is supported.
* `:pageToken` (*type:* `String.t`) - Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.ResourcePolicyAggregatedList{}}` on success
* `{:error, info}` on failure
"""
@spec compute_resource_policies_aggregated_list(
Tesla.Env.client(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.ResourcePolicyAggregatedList.t()}
| {:ok, Tesla.Env.t()}
| {:error, any()}
def compute_resource_policies_aggregated_list(
connection,
project,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:filter => :query,
:includeAllScopes => :query,
:maxResults => :query,
:orderBy => :query,
:pageToken => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/{project}/aggregated/resourcePolicies", %{
"project" => URI.encode(project, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(
opts ++ [struct: %GoogleApi.Compute.V1.Model.ResourcePolicyAggregatedList{}]
)
end
@doc """
Deletes the specified resource policy.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `region` (*type:* `String.t`) - Name of the region for this request.
* `resource_policy` (*type:* `String.t`) - Name of the resource policy to delete.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:requestId` (*type:* `String.t`) - An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.
For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec compute_resource_policies_delete(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.Operation.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_resource_policies_delete(
connection,
project,
region,
resource_policy,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:requestId => :query
}
request =
Request.new()
|> Request.method(:delete)
|> Request.url("/{project}/regions/{region}/resourcePolicies/{resourcePolicy}", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"region" => URI.encode(region, &URI.char_unreserved?/1),
"resourcePolicy" => URI.encode(resource_policy, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Operation{}])
end
@doc """
Retrieves all information of the specified resource policy.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `region` (*type:* `String.t`) - Name of the region for this request.
* `resource_policy` (*type:* `String.t`) - Name of the resource policy to retrieve.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.ResourcePolicy{}}` on success
* `{:error, info}` on failure
"""
@spec compute_resource_policies_get(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.ResourcePolicy.t()}
| {:ok, Tesla.Env.t()}
| {:error, any()}
def compute_resource_policies_get(
connection,
project,
region,
resource_policy,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/{project}/regions/{region}/resourcePolicies/{resourcePolicy}", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"region" => URI.encode(region, &URI.char_unreserved?/1),
"resourcePolicy" => URI.encode(resource_policy, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.ResourcePolicy{}])
end
@doc """
Gets the access control policy for a resource. May be empty if no such policy or resource exists.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `region` (*type:* `String.t`) - The name of the region for this request.
* `resource` (*type:* `String.t`) - Name or id of the resource for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Policy{}}` on success
* `{:error, info}` on failure
"""
@spec compute_resource_policies_get_iam_policy(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) :: {:ok, GoogleApi.Compute.V1.Model.Policy.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_resource_policies_get_iam_policy(
connection,
project,
region,
resource,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/{project}/regions/{region}/resourcePolicies/{resource}/getIamPolicy", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"region" => URI.encode(region, &URI.char_unreserved?/1),
"resource" => URI.encode(resource, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Policy{}])
end
@doc """
Creates a new resource policy.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `region` (*type:* `String.t`) - Name of the region for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:requestId` (*type:* `String.t`) - An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.
For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
* `:body` (*type:* `GoogleApi.Compute.V1.Model.ResourcePolicy.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec compute_resource_policies_insert(
Tesla.Env.client(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.Operation.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_resource_policies_insert(
connection,
project,
region,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:requestId => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/{project}/regions/{region}/resourcePolicies", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"region" => URI.encode(region, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Operation{}])
end
@doc """
A list all the resource policies that have been configured for the specified project in specified region.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `region` (*type:* `String.t`) - Name of the region for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:filter` (*type:* `String.t`) - A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `>`, or `<`.
For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.
You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.
To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true) ```
* `:maxResults` (*type:* `integer()`) - The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)
* `:orderBy` (*type:* `String.t`) - Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.
You can also sort results in descending order based on the creation timestamp using `orderBy="creationTimestamp desc"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.
Currently, only sorting by `name` or `creationTimestamp desc` is supported.
* `:pageToken` (*type:* `String.t`) - Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.ResourcePolicyList{}}` on success
* `{:error, info}` on failure
"""
@spec compute_resource_policies_list(
Tesla.Env.client(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.ResourcePolicyList.t()}
| {:ok, Tesla.Env.t()}
| {:error, any()}
def compute_resource_policies_list(
connection,
project,
region,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:filter => :query,
:maxResults => :query,
:orderBy => :query,
:pageToken => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/{project}/regions/{region}/resourcePolicies", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"region" => URI.encode(region, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.ResourcePolicyList{}])
end
@doc """
Sets the access control policy on the specified resource. Replaces any existing policy.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `region` (*type:* `String.t`) - The name of the region for this request.
* `resource` (*type:* `String.t`) - Name or id of the resource for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:body` (*type:* `GoogleApi.Compute.V1.Model.RegionSetPolicyRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.Policy{}}` on success
* `{:error, info}` on failure
"""
@spec compute_resource_policies_set_iam_policy(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) :: {:ok, GoogleApi.Compute.V1.Model.Policy.t()} | {:ok, Tesla.Env.t()} | {:error, any()}
def compute_resource_policies_set_iam_policy(
connection,
project,
region,
resource,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/{project}/regions/{region}/resourcePolicies/{resource}/setIamPolicy", %{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"region" => URI.encode(region, &URI.char_unreserved?/1),
"resource" => URI.encode(resource, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.Policy{}])
end
@doc """
Returns permissions that a caller has on the specified resource.
## Parameters
* `connection` (*type:* `GoogleApi.Compute.V1.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID for this request.
* `region` (*type:* `String.t`) - The name of the region for this request.
* `resource` (*type:* `String.t`) - Name or id of the resource for this request.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:alt` (*type:* `String.t`) - Data format for the response.
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
* `:userIp` (*type:* `String.t`) - Deprecated. Please use quotaUser instead.
* `:body` (*type:* `GoogleApi.Compute.V1.Model.TestPermissionsRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Compute.V1.Model.TestPermissionsResponse{}}` on success
* `{:error, info}` on failure
"""
@spec compute_resource_policies_test_iam_permissions(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Compute.V1.Model.TestPermissionsResponse.t()}
| {:ok, Tesla.Env.t()}
| {:error, any()}
def compute_resource_policies_test_iam_permissions(
connection,
project,
region,
resource,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url(
"/{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions",
%{
"project" => URI.encode(project, &URI.char_unreserved?/1),
"region" => URI.encode(region, &URI.char_unreserved?/1),
"resource" => URI.encode(resource, &URI.char_unreserved?/1)
}
)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Compute.V1.Model.TestPermissionsResponse{}])
end
end
| 48.417886 | 511 | 0.641233 |
9e5188aa9f75acf5c40e2c737d60083ca2363bd4 | 66,498 | ex | Elixir | lib/ecto/repo.ex | williamthome/ecto | ad821b05746f23bdcd2069f6e9c68a2b4786830c | [
"Apache-2.0"
] | null | null | null | lib/ecto/repo.ex | williamthome/ecto | ad821b05746f23bdcd2069f6e9c68a2b4786830c | [
"Apache-2.0"
] | 4 | 2021-03-04T13:00:52.000Z | 2021-03-12T12:42:09.000Z | deps/ecto/lib/ecto/repo.ex | adrianomota/blog | ef3b2d2ed54f038368ead8234d76c18983caa75b | [
"MIT"
] | 2 | 2017-07-21T08:28:41.000Z | 2018-12-13T02:13:32.000Z | defmodule Ecto.Repo do
@moduledoc """
Defines a repository.
A repository maps to an underlying data store, controlled by the
adapter. For example, Ecto ships with a Postgres adapter that
stores data into a PostgreSQL database.
When used, the repository expects the `:otp_app` and `:adapter` as
option. The `:otp_app` should point to an OTP application that has
the repository configuration. For example, the repository:
defmodule Repo do
use Ecto.Repo,
otp_app: :my_app,
adapter: Ecto.Adapters.Postgres
end
Could be configured with:
config :my_app, Repo,
database: "ecto_simple",
username: "postgres",
password: "postgres",
hostname: "localhost"
Most of the configuration that goes into the `config` is specific
to the adapter. For this particular example, you can check
[`Ecto.Adapters.Postgres`](https://hexdocs.pm/ecto_sql/Ecto.Adapters.Postgres.html)
for more information. In spite of this, the following configuration values
are shared across all adapters:
* `:name`- The name of the Repo supervisor process
* `:priv` - the directory where to keep repository data, like
migrations, schema and more. Defaults to "priv/YOUR_REPO".
It must always point to a subdirectory inside the priv directory
* `:url` - an URL that specifies storage information. Read below
for more information
* `:log` - the log level used when logging the query with Elixir's
Logger. If false, disables logging for that repository.
Defaults to `:debug`
* `:pool_size` - the size of the pool used by the connection module.
Defaults to `10`
* `:telemetry_prefix` - we recommend adapters to publish events
using the `Telemetry` library. By default, the telemetry prefix
is based on the module name, so if your module is called
`MyApp.Repo`, the prefix will be `[:my_app, :repo]`. See the
"Telemetry Events" section to see which events we recommend
adapters to publish. Note that if you have multiple databases, you
should keep the `:telemetry_prefix` consistent for each repo and
use the `:repo` property in the event metadata for distinguishing
between repos.
## URLs
Repositories by default support URLs. For example, the configuration
above could be rewritten to:
config :my_app, Repo,
url: "ecto://postgres:postgres@localhost/ecto_simple"
The schema can be of any value. The path represents the database name
while options are simply merged in.
URL can include query parameters to override shared and adapter-specific
options, like `ssl`, `timeout` and `pool_size`. The following example
shows how to pass these configuration values:
config :my_app, Repo,
url: "ecto://postgres:postgres@localhost/ecto_simple?ssl=true&pool_size=10"
In case the URL needs to be dynamically configured, for example by
reading a system environment variable, such can be done via the
`c:init/2` repository callback:
def init(_type, config) do
{:ok, Keyword.put(config, :url, System.get_env("DATABASE_URL"))}
end
## Shared options
Almost all of the repository functions outlined in this module accept the following
options:
* `:timeout` - The time in milliseconds (as an integer) to wait for the query call to
finish. `:infinity` will wait indefinitely (default: `15_000`)
* `:log` - When false, does not log the query
* `:telemetry_event` - The telemetry event name to dispatch the event under.
See the next section for more information
* `:telemetry_options` - Extra options to attach to telemetry event name.
See the next section for more information
## Telemetry events
There are two types of telemetry events. The ones emitted by Ecto and the
ones that are adapter specific.
### Ecto telemetry events
The following events are emitted by all Ecto repositories:
* `[:ecto, :repo, :init]` - it is invoked whenever a repository starts.
The measurement is a single `system_time` entry in native unit. The
metadata is the `:repo` and all initialization options under `:opts`.
### Adapter-specific events
We recommend adapters to publish certain `Telemetry` events listed below.
Those events will use the `:telemetry_prefix` outlined above which defaults
to `[:my_app, :repo]`.
For instance, to receive all query events published by a repository called
`MyApp.Repo`, one would define a module:
defmodule MyApp.Telemetry do
def handle_event([:my_app, :repo, :query], measurements, metadata, config) do
IO.inspect binding()
end
end
Then, in the `Application.start/2` callback, attach the handler to this event using
a unique handler id:
:ok = :telemetry.attach("my-app-handler-id", [:my_app, :repo, :query], &MyApp.Telemetry.handle_event/4, %{})
For details, see [the telemetry documentation](https://hexdocs.pm/telemetry/).
Below we list all events developers should expect from Ecto. All examples
below consider a repository named `MyApp.Repo`:
#### `[:my_app, :repo, :query]`
This event should be invoked on every query sent to the adapter, including
queries that are related to the transaction management.
The `:measurements` map will include the following, all given in the
`:native` time unit:
* `:idle_time` - the time the connection spent waiting before being checked out for the query
* `:queue_time` - the time spent waiting to check out a database connection
* `:query_time` - the time spent executing the query
* `:decode_time` - the time spent decoding the data received from the database
* `:total_time` - the sum of the other measurements
All measurements are given in the `:native` time unit. You can read more
about it in the docs for `System.convert_time_unit/3`.
A telemetry `:metadata` map including the following fields. Each database
adapter may emit different information here. For Ecto.SQL databases, it
will look like this:
* `:type` - the type of the Ecto query. For example, for Ecto.SQL
databases, it would be `:ecto_sql_query`
* `:repo` - the Ecto repository
* `:result` - the query result
* `:params` - the query parameters
* `:query` - the query sent to the database as a string
* `:source` - the source the query was made on (may be nil)
* `:options` - extra options given to the repo operation under
`:telemetry_options`
## Read-only repositories
You can mark a repository as read-only by passing the `:read_only`
flag on `use`:
use Ecto.Repo, otp_app: ..., adapter: ..., read_only: true
By passing the `:read_only` option, none of the functions that perform
write operations, such as `c:insert/2`, `c:insert_all/3`, `c:update_all/3`,
and friends will be defined.
"""
@type t :: module
@doc """
Returns all running Ecto repositories.
The list is returned in no particular order. The list
contains either atoms, for named Ecto repositories, or
PIDs.
"""
@spec all_running() :: [atom() | pid()]
defdelegate all_running(), to: Ecto.Repo.Registry
@doc false
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@behaviour Ecto.Repo
{otp_app, adapter, behaviours} =
Ecto.Repo.Supervisor.compile_config(__MODULE__, opts)
@otp_app otp_app
@adapter adapter
@default_dynamic_repo opts[:default_dynamic_repo] || __MODULE__
@read_only opts[:read_only] || false
@before_compile adapter
@aggregates [:count, :avg, :max, :min, :sum]
def config do
{:ok, config} = Ecto.Repo.Supervisor.runtime_config(:runtime, __MODULE__, @otp_app, [])
config
end
def __adapter__ do
@adapter
end
def child_spec(opts) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [opts]},
type: :supervisor
}
end
def start_link(opts \\ []) do
Ecto.Repo.Supervisor.start_link(__MODULE__, @otp_app, @adapter, opts)
end
def stop(timeout \\ 5000) do
Supervisor.stop(get_dynamic_repo(), :normal, timeout)
end
def load(schema_or_types, data) do
Ecto.Repo.Schema.load(@adapter, schema_or_types, data)
end
def checkout(fun, opts \\ []) when is_function(fun) do
{adapter, meta} = Ecto.Repo.Registry.lookup(get_dynamic_repo())
adapter.checkout(meta, opts, fun)
end
def checked_out? do
{adapter, meta} = Ecto.Repo.Registry.lookup(get_dynamic_repo())
adapter.checked_out?(meta)
end
@compile {:inline, get_dynamic_repo: 0, with_default_options: 2}
def get_dynamic_repo() do
Process.get({__MODULE__, :dynamic_repo}, @default_dynamic_repo)
end
def put_dynamic_repo(dynamic) when is_atom(dynamic) or is_pid(dynamic) do
Process.put({__MODULE__, :dynamic_repo}, dynamic) || @default_dynamic_repo
end
def default_options(_operation), do: []
defoverridable default_options: 1
defp with_default_options(operation_name, opts) do
Keyword.merge(default_options(operation_name), opts)
end
## Transactions
if Ecto.Adapter.Transaction in behaviours do
def transaction(fun_or_multi, opts \\ []) do
Ecto.Repo.Transaction.transaction(__MODULE__, get_dynamic_repo(), fun_or_multi, with_default_options(:transaction, opts))
end
def in_transaction? do
Ecto.Repo.Transaction.in_transaction?(get_dynamic_repo())
end
@spec rollback(term) :: no_return
def rollback(value) do
Ecto.Repo.Transaction.rollback(get_dynamic_repo(), value)
end
end
## Schemas
if Ecto.Adapter.Schema in behaviours and not @read_only do
def insert(struct, opts \\ []) do
Ecto.Repo.Schema.insert(__MODULE__, get_dynamic_repo(), struct, with_default_options(:insert, opts))
end
def update(struct, opts \\ []) do
Ecto.Repo.Schema.update(__MODULE__, get_dynamic_repo(), struct, with_default_options(:update, opts))
end
def insert_or_update(changeset, opts \\ []) do
Ecto.Repo.Schema.insert_or_update(__MODULE__, get_dynamic_repo(), changeset, with_default_options(:insert_or_update, opts))
end
def delete(struct, opts \\ []) do
Ecto.Repo.Schema.delete(__MODULE__, get_dynamic_repo(), struct, with_default_options(:delete, opts))
end
def insert!(struct, opts \\ []) do
Ecto.Repo.Schema.insert!(__MODULE__, get_dynamic_repo(), struct, with_default_options(:insert, opts))
end
def update!(struct, opts \\ []) do
Ecto.Repo.Schema.update!(__MODULE__, get_dynamic_repo(), struct, with_default_options(:update, opts))
end
def insert_or_update!(changeset, opts \\ []) do
Ecto.Repo.Schema.insert_or_update!(__MODULE__, get_dynamic_repo(), changeset, with_default_options(:insert_or_update, opts))
end
def delete!(struct, opts \\ []) do
Ecto.Repo.Schema.delete!(__MODULE__, get_dynamic_repo(), struct, with_default_options(:delete, opts))
end
def insert_all(schema_or_source, entries, opts \\ []) do
Ecto.Repo.Schema.insert_all(__MODULE__, get_dynamic_repo(), schema_or_source, entries, with_default_options(:insert_all, opts))
end
end
## Queryable
if Ecto.Adapter.Queryable in behaviours do
if not @read_only do
def update_all(queryable, updates, opts \\ []) do
Ecto.Repo.Queryable.update_all(get_dynamic_repo(), queryable, updates, with_default_options(:update_all, opts))
end
def delete_all(queryable, opts \\ []) do
Ecto.Repo.Queryable.delete_all(get_dynamic_repo(), queryable, with_default_options(:delete_all, opts))
end
end
def all(queryable, opts \\ []) do
Ecto.Repo.Queryable.all(get_dynamic_repo(), queryable, with_default_options(:all, opts))
end
def stream(queryable, opts \\ []) do
Ecto.Repo.Queryable.stream(get_dynamic_repo(), queryable, with_default_options(:stream, opts))
end
def get(queryable, id, opts \\ []) do
Ecto.Repo.Queryable.get(get_dynamic_repo(), queryable, id, with_default_options(:all, opts))
end
def get!(queryable, id, opts \\ []) do
Ecto.Repo.Queryable.get!(get_dynamic_repo(), queryable, id, with_default_options(:all, opts))
end
def get_by(queryable, clauses, opts \\ []) do
Ecto.Repo.Queryable.get_by(get_dynamic_repo(), queryable, clauses, with_default_options(:all, opts))
end
def get_by!(queryable, clauses, opts \\ []) do
Ecto.Repo.Queryable.get_by!(get_dynamic_repo(), queryable, clauses, with_default_options(:all, opts))
end
def reload(queryable, opts \\ []) do
Ecto.Repo.Queryable.reload(get_dynamic_repo(), queryable, opts)
end
def reload!(queryable, opts \\ []) do
Ecto.Repo.Queryable.reload!(get_dynamic_repo(), queryable, opts)
end
def one(queryable, opts \\ []) do
Ecto.Repo.Queryable.one(get_dynamic_repo(), queryable, with_default_options(:all, opts))
end
def one!(queryable, opts \\ []) do
Ecto.Repo.Queryable.one!(get_dynamic_repo(), queryable, with_default_options(:all, opts))
end
def aggregate(queryable, aggregate, opts \\ [])
def aggregate(queryable, aggregate, opts)
when aggregate in [:count] and is_list(opts) do
Ecto.Repo.Queryable.aggregate(get_dynamic_repo(), queryable, aggregate, with_default_options(:all, opts))
end
def aggregate(queryable, aggregate, field)
when aggregate in @aggregates and is_atom(field) do
Ecto.Repo.Queryable.aggregate(get_dynamic_repo(), queryable, aggregate, field, with_default_options(:all, []))
end
def aggregate(queryable, aggregate, field, opts)
when aggregate in @aggregates and is_atom(field) and is_list(opts) do
Ecto.Repo.Queryable.aggregate(get_dynamic_repo(), queryable, aggregate, field, with_default_options(:all, opts))
end
def exists?(queryable, opts \\ []) do
Ecto.Repo.Queryable.exists?(get_dynamic_repo(), queryable, with_default_options(:all, opts))
end
def preload(struct_or_structs_or_nil, preloads, opts \\ []) do
Ecto.Repo.Preloader.preload(struct_or_structs_or_nil, get_dynamic_repo(), preloads, with_default_options(:preload, opts))
end
def prepare_query(operation, query, opts), do: {query, opts}
defoverridable prepare_query: 3
end
end
end
## User callbacks
@optional_callbacks init: 2
@doc """
A callback executed when the repo starts or when configuration is read.
The first argument is the context the callback is being invoked. If it
is called because the Repo supervisor is starting, it will be `:supervisor`.
It will be `:runtime` if it is called for reading configuration without
actually starting a process.
The second argument is the repository configuration as stored in the
application environment. It must return `{:ok, keyword}` with the updated
list of configuration or `:ignore` (only in the `:supervisor` case).
"""
@callback init(context :: :supervisor | :runtime, config :: Keyword.t()) ::
{:ok, Keyword.t()} | :ignore
## Ecto.Adapter
@doc """
Returns the adapter tied to the repository.
"""
@callback __adapter__ :: Ecto.Adapter.t()
@doc """
Returns the adapter configuration stored in the `:otp_app` environment.
If the `c:init/2` callback is implemented in the repository,
it will be invoked with the first argument set to `:runtime`.
"""
@callback config() :: Keyword.t()
@doc """
Starts any connection pooling or supervision and return `{:ok, pid}`
or just `:ok` if nothing needs to be done.
Returns `{:error, {:already_started, pid}}` if the repo is already
started or `{:error, term}` in case anything else goes wrong.
## Options
See the configuration in the moduledoc for options shared between adapters,
for adapter-specific configuration see the adapter's documentation.
"""
@callback start_link(opts :: Keyword.t()) ::
{:ok, pid}
| {:error, {:already_started, pid}}
| {:error, term}
@doc """
Shuts down the repository.
"""
@callback stop(timeout) :: :ok
@doc """
Checks out a connection for the duration of the function.
It returns the result of the function. This is useful when
you need to perform multiple operations against the repository
in a row and you want to avoid checking out the connection
multiple times.
`checkout/2` and `transaction/2` can be combined and nested
multiple times. If `checkout/2` is called inside the function
of another `checkout/2` call, the function is simply executed,
without checking out a new connection.
## Options
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
"""
@callback checkout((() -> result), opts :: Keyword.t()) :: result when result: var
@doc """
Returns true if a connection has been checked out.
This is true if inside a `c:Ecto.Repo.checkout/2` or
`c:Ecto.Repo.transaction/2`.
## Examples
MyRepo.checked_out?
#=> false
MyRepo.transaction(fn ->
MyRepo.checked_out? #=> true
end)
MyRepo.checkout(fn ->
MyRepo.checked_out? #=> true
end)
"""
@callback checked_out?() :: boolean
@doc """
Loads `data` into a struct or a map.
The first argument can be a a schema module, or a
map (of types) and determines the return value:
a struct or a map, respectively.
The second argument `data` specifies fields and values that are to be loaded.
It can be a map, a keyword list, or a `{fields, values}` tuple.
Fields can be atoms or strings.
Fields that are not present in the schema (or `types` map) are ignored.
If any of the values has invalid type, an error is raised.
To load data from non-database sources, use `Ecto.embedded_load/3`.
## Examples
iex> MyRepo.load(User, %{name: "Alice", age: 25})
%User{name: "Alice", age: 25}
iex> MyRepo.load(User, [name: "Alice", age: 25])
%User{name: "Alice", age: 25}
`data` can also take form of `{fields, values}`:
iex> MyRepo.load(User, {[:name, :age], ["Alice", 25]})
%User{name: "Alice", age: 25, ...}
The first argument can also be a `types` map:
iex> types = %{name: :string, age: :integer}
iex> MyRepo.load(types, %{name: "Alice", age: 25})
%{name: "Alice", age: 25}
This function is especially useful when parsing raw query results:
iex> result = Ecto.Adapters.SQL.query!(MyRepo, "SELECT * FROM users", [])
iex> Enum.map(result.rows, &MyRepo.load(User, {result.columns, &1}))
[%User{...}, ...]
"""
@callback load(
module_or_map :: module | map(),
data :: map() | Keyword.t() | {list, list}
) :: Ecto.Schema.t() | map()
@doc """
Returns the atom name or pid of the current repository.
See `c:put_dynamic_repo/1` for more information.
"""
@callback get_dynamic_repo() :: atom() | pid()
@doc """
Sets the dynamic repository to be used in further interactions.
Sometimes you may want a single Ecto repository to talk to
many different database instances. By default, when you call
`MyApp.Repo.start_link/1`, it will start a repository with
name `MyApp.Repo`. But if you want to start multiple repositories,
you can give each of them a different name:
MyApp.Repo.start_link(name: :tenant_foo, hostname: "foo.example.com")
MyApp.Repo.start_link(name: :tenant_bar, hostname: "bar.example.com")
You can also start repositories without names by explicitly
setting the name to nil:
MyApp.Repo.start_link(name: nil, hostname: "temp.example.com")
However, once the repository is started, you can't directly interact with
it, since all operations in `MyApp.Repo` are sent by default to the repository
named `MyApp.Repo`. You can change the default repo at compile time with:
use Ecto.Repo, default_dynamic_repo: :name_of_repo
Or you can change it anytime at runtime by calling `put_dynamic_repo/1`:
MyApp.Repo.put_dynamic_repo(:tenant_foo)
From this moment on, all future queries done by the current process will
run on `:tenant_foo`.
**Note this feature is experimental and may be changed or removed in future
releases.**
"""
@callback put_dynamic_repo(name_or_pid :: atom() | pid()) :: atom() | pid()
## Ecto.Adapter.Queryable
@optional_callbacks get: 3, get!: 3, get_by: 3, get_by!: 3, reload: 2, reload!: 2, aggregate: 3,
aggregate: 4, exists?: 2, one: 2, one!: 2, preload: 3, all: 2, stream: 2,
update_all: 3, delete_all: 2
@doc """
Fetches a single struct from the data store where the primary key matches the
given id.
Returns `nil` if no result was found. If the struct in the queryable
has no or more than one primary key, it will raise an argument error.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
MyRepo.get(Post, 42)
MyRepo.get(Post, 42, prefix: "public")
"""
@callback get(queryable :: Ecto.Queryable.t(), id :: term, opts :: Keyword.t()) ::
Ecto.Schema.t() | nil
@doc """
Similar to `c:get/3` but raises `Ecto.NoResultsError` if no record was found.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
MyRepo.get!(Post, 42)
MyRepo.get!(Post, 42, prefix: "public")
"""
@callback get!(queryable :: Ecto.Queryable.t(), id :: term, opts :: Keyword.t()) ::
Ecto.Schema.t()
@doc """
Fetches a single result from the query.
Returns `nil` if no result was found. Raises if more than one entry.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
MyRepo.get_by(Post, title: "My post")
MyRepo.get_by(Post, [title: "My post"], prefix: "public")
"""
@callback get_by(
queryable :: Ecto.Queryable.t(),
clauses :: Keyword.t() | map,
opts :: Keyword.t()
) :: Ecto.Schema.t() | nil
@doc """
Similar to `c:get_by/3` but raises `Ecto.NoResultsError` if no record was found.
Raises if more than one entry.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
MyRepo.get_by!(Post, title: "My post")
MyRepo.get_by!(Post, [title: "My post"], prefix: "public")
"""
@callback get_by!(
queryable :: Ecto.Queryable.t(),
clauses :: Keyword.t() | map,
opts :: Keyword.t()
) :: Ecto.Schema.t()
@doc """
Reloads a given schema or schema list from the database.
When using with lists, it is expected that all of the structs in the list belong
to the same schema. Ordering is guaranteed to be kept. Results not found in
the database will be returned as `nil`.
## Example
MyRepo.reload(post)
%Post{}
MyRepo.reload([post1, post2])
[%Post{}, %Post{}]
MyRepo.reload([deleted_post, post1])
[nil, %Post{}]
"""
@callback reload(
struct_or_structs :: Ecto.Schema.t() | [Ecto.Schema.t()],
opts :: Keyword.t()
) :: Ecto.Schema.t() | [Ecto.Schema.t() | nil] | nil
@doc """
Similar to `c:reload/2`, but raises when something is not found.
When using with lists, ordering is guaranteed to be kept.
## Example
MyRepo.reload!(post)
%Post{}
MyRepo.reload!([post1, post2])
[%Post{}, %Post{}]
"""
@callback reload!(struct_or_structs, opts :: Keyword.t()) :: struct_or_structs
when struct_or_structs: Ecto.Schema.t() | [Ecto.Schema.t()]
@doc """
Calculate the given `aggregate`.
If the query has a limit, offset, distinct or combination set, it will be
automatically wrapped in a subquery in order to return the
proper result.
Any preload or select in the query will be ignored in favor of
the column being aggregated.
The aggregation will fail if any `group_by` field is set.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Examples
# Returns the number of blog posts
Repo.aggregate(Post, :count)
# Returns the number of blog posts in the "private" schema path
# (in Postgres) or database (in MySQL)
Repo.aggregate(Post, :count, prefix: "private")
"""
@callback aggregate(
queryable :: Ecto.Queryable.t(),
aggregate :: :count,
opts :: Keyword.t()
) :: term | nil
@doc """
Calculate the given `aggregate` over the given `field`.
See `c:aggregate/3` for general considerations and options.
## Examples
# Returns the number of visits per blog post
Repo.aggregate(Post, :count, :visits)
# Returns the number of visits per blog post in the "private" schema path
# (in Postgres) or database (in MySQL)
Repo.aggregate(Post, :count, :visits, prefix: "private")
# Returns the average number of visits for the top 10
query = from Post, limit: 10
Repo.aggregate(query, :avg, :visits)
"""
@callback aggregate(
queryable :: Ecto.Queryable.t(),
aggregate :: :avg | :count | :max | :min | :sum,
field :: atom,
opts :: Keyword.t()
) :: term | nil
@doc """
Checks if there exists an entry that matches the given query.
Returns a boolean.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Examples
# checks if any posts exist
Repo.exists?(Post)
# checks if any posts exist in the "private" schema path (in Postgres) or
# database (in MySQL)
Repo.exists?(Post, schema: "private")
# checks if any post with a like count greater than 10 exists
query = from p in Post, where: p.like_count > 10
Repo.exists?(query)
"""
@callback exists?(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: boolean()
@doc """
Fetches a single result from the query.
Returns `nil` if no result was found. Raises if more than one entry.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Examples
Repo.one(from p in Post, join: c in assoc(p, :comments), where: p.id == ^post_id)
query = from p in Post, join: c in assoc(p, :comments), where: p.id == ^post_id
Repo.one(query, prefix: "private")
"""
@callback one(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) ::
Ecto.Schema.t() | nil
@doc """
Similar to `c:one/2` but raises `Ecto.NoResultsError` if no record was found.
Raises if more than one entry.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
"""
@callback one!(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) ::
Ecto.Schema.t()
@doc """
Preloads all associations on the given struct or structs.
This is similar to `Ecto.Query.preload/3` except it allows
you to preload structs after they have been fetched from the
database.
In case the association was already loaded, preload won't attempt
to reload it.
## Options
* `:force` - By default, Ecto won't preload associations that
are already loaded. By setting this option to true, any existing
association will be discarded and reloaded.
* `:in_parallel` - If the preloads must be done in parallel. It can
only be performed when we have more than one preload and the
repository is not in a transaction. Defaults to `true`.
* `:prefix` - the prefix to fetch preloads from. By default, queries
will use the same prefix as the one in the given collection. This
option allows the prefix to be changed.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Examples
# Use a single atom to preload an association
posts = Repo.preload posts, :comments
# Use a list of atoms to preload multiple associations
posts = Repo.preload posts, [:comments, :authors]
# Use a keyword list to preload nested associations as well
posts = Repo.preload posts, [comments: [:replies, :likes], authors: []]
# Use a keyword list to customize how associations are queried
posts = Repo.preload posts, [comments: from(c in Comment, order_by: c.published_at)]
# Use a two-element tuple for a custom query and nested association definition
query = from c in Comment, order_by: c.published_at
posts = Repo.preload posts, [comments: {query, [:replies, :likes]}]
The query given to preload may also preload its own associations.
"""
@callback preload(structs_or_struct_or_nil, preloads :: term, opts :: Keyword.t()) ::
structs_or_struct_or_nil
when structs_or_struct_or_nil: [Ecto.Schema.t()] | Ecto.Schema.t() | nil
@doc """
A user customizable callback invoked for query-based operations.
This callback can be used to further modify the query and options
before it is transformed and sent to the database.
This callback is invoked for all query APIs, including the `stream`
functions. It is also invoked for `insert_all` if a source query is
given. It is not invoked for any of the other schema functions.
## Examples
Let's say you want to filter out records that were "soft-deleted"
(have `deleted_at` column set) from all operations unless an admin
is running the query; you can define the callback like this:
@impl true
def prepare_query(_operation, query, opts) do
if opts[:admin] do
{query, opts}
else
query = from(x in query, where: is_nil(x.deleted_at))
{query, opts}
end
end
And then execute the query:
Repo.all(query) # only non-deleted records are returned
Repo.all(query, admin: true) # all records are returned
The callback will be invoked for all queries, including queries
made from associations and preloads. It is not invoked for each
individual join inside a query.
"""
@callback prepare_query(operation, query :: Ecto.Query.t(), opts :: Keyword.t()) ::
{Ecto.Query.t(), Keyword.t()}
when operation: :all | :update_all | :delete_all | :stream | :insert_all
@doc """
A user customizable callback invoked to retrieve default options
for operations.
This can be used to provide default values per operation that
have higher precedence than the values given on configuration
or when starting the repository. It can also be used to set
query specific options, such as `:prefix`.
This callback is invoked as the entry point for all repository
operations. For example, if you are executing a query with preloads,
this callback will be invoked once at the beginning, but the
options returned here will be passed to all following operations.
"""
@callback default_options(operation) :: Keyword.t()
when operation: :all | :insert_all | :update_all | :delete_all | :stream |
:transaction | :insert | :update | :delete | :insert_or_update
@doc """
Fetches all entries from the data store matching the given query.
May raise `Ecto.QueryError` if query validation fails.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
# Fetch all post titles
query = from p in Post,
select: p.title
MyRepo.all(query)
"""
@callback all(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: [Ecto.Schema.t()]
@doc """
Returns a lazy enumerable that emits all entries from the data store
matching the given query.
SQL adapters, such as Postgres and MySQL, can only enumerate a stream
inside a transaction.
May raise `Ecto.QueryError` if query validation fails.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This will be applied to all `from`
and `join`s in the query that did not have a prefix previously given
either via the `:prefix` option on `join`/`from` or via `@schema_prefix`
in the schema. For more information see the "Query Prefix" section of the
`Ecto.Query` documentation.
* `:max_rows` - The number of rows to load from the database as we stream.
It is supported at least by Postgres and MySQL and defaults to 500.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
# Fetch all post titles
query = from p in Post,
select: p.title
stream = MyRepo.stream(query)
MyRepo.transaction(fn() ->
Enum.to_list(stream)
end)
"""
@callback stream(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: Enum.t()
@doc """
Updates all entries matching the given query with the given values.
It returns a tuple containing the number of entries and any returned
result as second element. The second element is `nil` by default
unless a `select` is supplied in the update query. Note, however,
not all databases support returning data from UPDATEs.
Keep in mind this `update_all` will not update autogenerated
fields like the `updated_at` columns.
See `Ecto.Query.update/3` for update operations that can be
performed on fields.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This overrides the prefix set
in the query and any `@schema_prefix` set in the schema.
See the ["Shared options"](#module-shared-options) section at the module
documentation for remaining options.
## Examples
MyRepo.update_all(Post, set: [title: "New title"])
MyRepo.update_all(Post, inc: [visits: 1])
from(p in Post, where: p.id < 10, select: p.visits)
|> MyRepo.update_all(set: [title: "New title"])
from(p in Post, where: p.id < 10, update: [set: [title: "New title"]])
|> MyRepo.update_all([])
from(p in Post, where: p.id < 10, update: [set: [title: ^new_title]])
|> MyRepo.update_all([])
from(p in Post, where: p.id < 10, update: [set: [title: fragment("upper(?)", ^new_title)]])
|> MyRepo.update_all([])
"""
@callback update_all(
queryable :: Ecto.Queryable.t(),
updates :: Keyword.t(),
opts :: Keyword.t()
) :: {integer, nil | [term]}
@doc """
Deletes all entries matching the given query.
It returns a tuple containing the number of entries and any returned
result as second element. The second element is `nil` by default
unless a `select` is supplied in the delete query. Note, however,
not all databases support returning data from DELETEs.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This overrides the prefix set
in the query and any `@schema_prefix` set in the schema.
See the ["Shared options"](#module-shared-options) section at the module
documentation for remaining options.
## Examples
MyRepo.delete_all(Post)
from(p in Post, where: p.id < 10) |> MyRepo.delete_all
"""
@callback delete_all(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) ::
{integer, nil | [term]}
## Ecto.Adapter.Schema
@optional_callbacks insert_all: 3, insert: 2, insert!: 2, update: 2, update!: 2,
delete: 2, delete!: 2, insert_or_update: 2, insert_or_update!: 2,
prepare_query: 3
@doc """
Inserts all entries into the repository.
It expects a schema module (`MyApp.User`) or a source (`"users"`) or
both (`{"users", MyApp.User}`) as the first argument. The second
argument is a list of entries to be inserted, either as keyword
lists or as maps. The keys of the entries are the field names as
atoms and the value should be the respective value for the field
type or, optionally, an `Ecto.Query` that returns a single entry
with a single value.
It returns a tuple containing the number of entries
and any returned result as second element. If the database
does not support RETURNING in INSERT statements or no
return result was selected, the second element will be `nil`.
When a schema module is given, the entries given will be properly dumped
before being sent to the database. If the schema primary key has type
`:id` or `:binary_id`, it will be handled either at the adapter
or the storage layer. However any other primary key type or autogenerated
value, like `Ecto.UUID` and timestamps, won't be autogenerated when
using `c:insert_all/3`. You must set those fields explicitly. This is by
design as this function aims to be a more direct way to insert data into
the database without the conveniences of `c:insert/2`. This is also
consistent with `c:update_all/3` that does not handle auto generated
values as well.
It is also not possible to use `insert_all` to insert across multiple
tables, therefore associations are not supported.
If a source is given, without a schema module, the given fields are passed
as is to the adapter.
## Options
* `:returning` - selects which fields to return. When `true`,
returns all fields in the given schema. May be a list of
fields, where a struct is still returned but only with the
given fields. Or `false`, where nothing is returned (the default).
This option is not supported by all databases.
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This overrides the prefix set
in the query and any `@schema_prefix` set in the schema.
* `:on_conflict` - It may be one of `:raise` (the default), `:nothing`,
`:replace_all`, `{:replace_all_except, fields}`, `{:replace, fields}`,
a keyword list of update instructions or an `Ecto.Query`
query for updates. See the "Upserts" section for more information.
* `:conflict_target` - A list of column names to verify for conflicts.
It is expected those columns to have unique indexes on them that may conflict.
If none is specified, the conflict target is left up to the database.
It may also be `{:unsafe_fragment, binary_fragment}` to pass any
expression to the database without any sanitization, this is useful
for partial index or index with expressions, such as
`ON CONFLICT (coalesce(firstname, ""), coalesce(lastname, ""))`.
* `:placeholders` - A map with placeholders. This feature is not supported
by all databases. See the "Placeholders" section for more information.
See the ["Shared options"](#module-shared-options) section at the module
documentation for remaining options.
## Source query
A query can be given instead of a list with entries. This query needs to select
into a map containing only keys that are available as writeable columns in the
schema.
## Examples
MyRepo.insert_all(Post, [[title: "My first post"], [title: "My second post"]])
MyRepo.insert_all(Post, [%{title: "My first post"}, %{title: "My second post"}])
query = from p in Post,
join: c in assoc(p, :comments),
select: %{
author_id: p.author_id,
posts: count(p.id, :distinct),
interactions: sum(p.likes) + count(c.id)
},
group_by: p.author_id
MyRepo.insert_all(AuthorStats, query)
## Upserts
`c:insert_all/3` provides upserts (update or inserts) via the `:on_conflict`
option. The `:on_conflict` option supports the following values:
* `:raise` - raises if there is a conflicting primary key or unique index
* `:nothing` - ignores the error in case of conflicts
* `:replace_all` - replace **all** values on the existing row with the values
in the schema/changeset, including fields not explicitly set in the changeset,
such as IDs and autogenerated timestamps (`inserted_at` and `updated_at`).
Do not use this option if you have auto-incrementing primary keys, as they
will also be replaced. You most likely want to use `{:replace_all_except, [:id]}`
or `{:replace, fields}` explicitly instead. This option requires a schema
* `{:replace_all_except, fields}` - same as above except the given fields
are not replaced. This option requires a schema
* `{:replace, fields}` - replace only specific columns. This option requires
`:conflict_target`
* a keyword list of update instructions - such as the one given to
`c:update_all/3`, for example: `[set: [title: "new title"]]`
* an `Ecto.Query` that will act as an `UPDATE` statement, such as the
one given to `c:update_all/3`
Upserts map to "ON CONFLICT" on databases like Postgres and "ON DUPLICATE KEY"
on databases such as MySQL.
## Return values
By default, both Postgres and MySQL will return the number of entries
inserted on `c:insert_all/3`. However, when the `:on_conflict` option
is specified, Postgres and MySQL will return different results.
Postgres will only count a row if it was affected and will
return 0 if no new entry was added.
MySQL will return, at a minimum, the number of entries attempted. For example,
if `:on_conflict` is set to `:nothing`, MySQL will return
the number of entries attempted to be inserted, even when no entry
was added.
Also note that if `:on_conflict` is a query, MySQL will return
the number of attempted entries plus the number of entries modified
by the UPDATE query.
## Placeholders
Passing in a map for the `:placeholders` allows you to send less
data over the wire when you have many entries with the same value
for a field. To use a placeholder, replace its value in each of your
entries with `{:placeholder, key}`, where `key` is the key you
are using in the `:placeholders` option map. For example:
placeholders = %{blob: large_blob_of_text(...)}
entries = [
%{title: "v1", body: {:placeholder, :blob}},
%{title: "v2", body: {:placeholder, :blob}}
]
Repo.insert_all(Post, entries, placeholders: placeholders)
Keep in mind that:
* placeholders cannot be nested in other values. For example, you
cannot put a placeholder inside an array. Instead, the whole
array has to be the placeholder
* a placeholder key can only be used with columns of the same type
* placeholders require a database that supports index parameters,
so they are not currently compatible with MySQL
"""
@callback insert_all(
schema_or_source :: binary | {binary, module} | module,
entries_or_query :: [map | [{atom, term | Ecto.Query.t}]] | Ecto.Query.t,
opts :: Keyword.t()
) :: {integer, nil | [term]}
@doc """
Inserts a struct defined via `Ecto.Schema` or a changeset.
In case a struct is given, the struct is converted into a changeset
with all non-nil fields as part of the changeset.
In case a changeset is given, the changes in the changeset are
merged with the struct fields, and all of them are sent to the
database.
It returns `{:ok, struct}` if the struct has been successfully
inserted or `{:error, changeset}` if there was a validation
or a known constraint error.
## Options
* `:returning` - selects which fields to return. It accepts a list
of fields to be returned from the database. When `true`, returns
all fields. When `false`, no extra fields are returned. It will
always include all fields in `read_after_writes` as well as any
autogenerated id. Not all databases support this option and it
may not be available during upserts. See the "Upserts" section
for more information.
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This overrides the prefix set
in the query and any `@schema_prefix` set any schemas. Also, the
`@schema_prefix` for the parent record will override all default
`@schema_prefix`s set in any child schemas for associations.
* `:on_conflict` - It may be one of `:raise` (the default), `:nothing`,
`:replace_all`, `{:replace_all_except, fields}`, `{:replace, fields}`,
a keyword list of update instructions or an `Ecto.Query` query for updates.
See the "Upserts" section for more information.
* `:conflict_target` - A list of column names to verify for conflicts.
It is expected those columns to have unique indexes on them that may conflict.
If none is specified, the conflict target is left up to the database.
It may also be `{:unsafe_fragment, binary_fragment}` to pass any
expression to the database without any sanitization, this is useful
for partial index or index with expressions, such as
`ON CONFLICT (coalesce(firstname, ""), coalesce(lastname, ""))`.
* `:stale_error_field` - The field where stale errors will be added in
the returning changeset. This option can be used to avoid raising
`Ecto.StaleEntryError`.
* `:stale_error_message` - The message to add to the configured
`:stale_error_field` when stale errors happen, defaults to "is stale".
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Examples
A typical example is calling `MyRepo.insert/1` with a struct
and acting on the return value:
case MyRepo.insert %Post{title: "Ecto is great"} do
{:ok, struct} -> # Inserted with success
{:error, changeset} -> # Something went wrong
end
## Upserts
`c:insert/2` provides upserts (update or inserts) via the `:on_conflict`
option. The `:on_conflict` option supports the following values:
* `:raise` - raises if there is a conflicting primary key or unique index
* `:nothing` - ignores the error in case of conflicts
* `:replace_all` - replace **all** values on the existing row with the values
in the schema/changeset, including fields not explicitly set in the changeset,
such as IDs and autogenerated timestamps (`inserted_at` and `updated_at`).
Do not use this option if you have auto-incrementing primary keys, as they
will also be replaced. You most likely want to use `{:replace_all_except, [:id]}`
or `{:replace, fields}` explicitly instead. This option requires a schema
* `{:replace_all_except, fields}` - same as above except the given fields are
not replaced. This option requires a schema
* `{:replace, fields}` - replace only specific columns. This option requires
`:conflict_target`
* a keyword list of update instructions - such as the one given to
`c:update_all/3`, for example: `[set: [title: "new title"]]`
* an `Ecto.Query` that will act as an `UPDATE` statement, such as the
one given to `c:update_all/3`. Similarly to `c:update_all/3`, auto
generated values, such as timestamps are not automatically updated.
If the struct cannot be found, `Ecto.StaleEntryError` will be raised.
Upserts map to "ON CONFLICT" on databases like Postgres and "ON DUPLICATE KEY"
on databases such as MySQL.
As an example, imagine `:title` is marked as a unique column in
the database:
{:ok, inserted} = MyRepo.insert(%Post{title: "this is unique"})
Now we can insert with the same title but do nothing on conflicts:
{:ok, ignored} = MyRepo.insert(%Post{title: "this is unique"}, on_conflict: :nothing)
assert ignored.id == nil
Because we used `on_conflict: :nothing`, instead of getting an error,
we got `{:ok, struct}`. However the returned struct does not reflect
the data in the database. One possible mechanism to detect if an
insert or nothing happened in case of `on_conflict: :nothing` is by
checking the `id` field. `id` will be nil if the field is autogenerated
by the database and no insert happened.
For actual upserts, where an insert or update may happen, the situation
is slightly more complex, as the database does not actually inform us
if an insert or update happened. Let's insert a post with the same title
but use a query to update the body column in case of conflicts:
# In Postgres (it requires the conflict target for updates):
on_conflict = [set: [body: "updated"]]
{:ok, updated} = MyRepo.insert(%Post{title: "this is unique"},
on_conflict: on_conflict, conflict_target: :title)
# In MySQL (conflict target is not supported):
on_conflict = [set: [title: "updated"]]
{:ok, updated} = MyRepo.insert(%Post{id: inserted.id, title: "updated"},
on_conflict: on_conflict)
In the examples above, even though it returned `:ok`, we do not know
if we inserted new data or if we updated only the `:on_conflict` fields.
In case an update happened, the data in the struct most likely does
not match the data in the database. For example, autogenerated fields
such as `inserted_at` will point to now rather than the time the
struct was actually inserted.
If you need to guarantee the data in the returned struct mirrors the
database, you have three options:
* Use `on_conflict: :replace_all`, although that will replace all
fields in the database with the ones in the struct/changeset,
including autogenerated fields such as `inserted_at` and `updated_at`:
MyRepo.insert(%Post{title: "this is unique"},
on_conflict: :replace_all, conflict_target: :title)
* Specify `read_after_writes: true` in your schema for choosing
fields that are read from the database after every operation.
Or pass `returning: true` to `insert` to read all fields back:
MyRepo.insert(%Post{title: "this is unique"}, returning: true,
on_conflict: on_conflict, conflict_target: :title)
* Alternatively, read the data again from the database in a separate
query. This option requires the primary key to be generated by the
database:
{:ok, updated} = MyRepo.insert(%Post{title: "this is unique"}, on_conflict: on_conflict)
Repo.get(Post, updated.id)
Because of the inability to know if the struct is up to date or not,
inserting a struct with associations and using the `:on_conflict` option
at the same time is not recommended, as Ecto will be unable to actually
track the proper status of the association.
"""
@callback insert(
struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(),
opts :: Keyword.t()
) :: {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()}
@doc """
Updates a changeset using its primary key.
A changeset is required as it is the only mechanism for
tracking dirty changes. Only the fields present in the `changes` part
of the changeset are sent to the database. Any other, in-memory
changes done to the schema are ignored.
If the struct has no primary key, `Ecto.NoPrimaryKeyFieldError`
will be raised.
If the struct cannot be found, `Ecto.StaleEntryError` will be raised.
It returns `{:ok, struct}` if the struct has been successfully
updated or `{:error, changeset}` if there was a validation
or a known constraint error.
## Options
* `:returning` - selects which fields to return. It accepts a list
of fields to be returned from the database. When `true`, returns
all fields. When `false`, no extra fields are returned. It will
always include all fields in `read_after_writes`. Not all
databases support this option.
* `:force` - By default, if there are no changes in the changeset,
`c:update/2` is a no-op. By setting this option to true, update
callbacks will always be executed, even if there are no changes
(including timestamps).
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This overrides the prefix set
in the query and any `@schema_prefix` set any schemas. Also, the
`@schema_prefix` for the parent record will override all default
`@schema_prefix`s set in any child schemas for associations.
* `:stale_error_field` - The field where stale errors will be added in
the returning changeset. This option can be used to avoid raising
`Ecto.StaleEntryError`.
* `:stale_error_message` - The message to add to the configured
`:stale_error_field` when stale errors happen, defaults to "is stale".
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
post = MyRepo.get!(Post, 42)
post = Ecto.Changeset.change post, title: "New title"
case MyRepo.update post do
{:ok, struct} -> # Updated with success
{:error, changeset} -> # Something went wrong
end
"""
@callback update(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) ::
{:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()}
@doc """
Inserts or updates a changeset depending on whether the struct is persisted
or not.
The distinction whether to insert or update will be made on the
`Ecto.Schema.Metadata` field `:state`. The `:state` is automatically set by
Ecto when loading or building a schema.
Please note that for this to work, you will have to load existing structs from
the database. So even if the struct exists, this won't work:
struct = %Post{id: "existing_id", ...}
MyRepo.insert_or_update changeset
# => {:error, changeset} # id already exists
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This overrides the prefix set
in the query and any `@schema_prefix` set any schemas. Also, the
`@schema_prefix` for the parent record will override all default
`@schema_prefix`s set in any child schemas for associations.
* `:stale_error_field` - The field where stale errors will be added in
the returning changeset. This option can be used to avoid raising
`Ecto.StaleEntryError`. Only applies to updates.
* `:stale_error_message` - The message to add to the configured
`:stale_error_field` when stale errors happen, defaults to "is stale".
Only applies to updates.
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
result =
case MyRepo.get(Post, id) do
nil -> %Post{id: id} # Post not found, we build one
post -> post # Post exists, let's use it
end
|> Post.changeset(changes)
|> MyRepo.insert_or_update
case result do
{:ok, struct} -> # Inserted or updated with success
{:error, changeset} -> # Something went wrong
end
"""
@callback insert_or_update(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) ::
{:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()}
@doc """
Deletes a struct using its primary key.
If the struct has no primary key, `Ecto.NoPrimaryKeyFieldError`
will be raised. If the struct has been removed from db prior to
call, `Ecto.StaleEntryError` will be raised.
It returns `{:ok, struct}` if the struct has been successfully
deleted or `{:error, changeset}` if there was a validation
or a known constraint error.
## Options
* `:prefix` - The prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). This overrides the prefix set
in the query and any `@schema_prefix` set in the schema.
* `:stale_error_field` - The field where stale errors will be added in
the returning changeset. This option can be used to avoid raising
`Ecto.StaleEntryError`.
* `:stale_error_message` - The message to add to the configured
`:stale_error_field` when stale errors happen, defaults to "is stale".
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Example
post = MyRepo.get!(Post, 42)
case MyRepo.delete post do
{:ok, struct} -> # Deleted with success
{:error, changeset} -> # Something went wrong
end
"""
@callback delete(
struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(),
opts :: Keyword.t()
) :: {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()}
@doc """
Same as `c:insert/2` but returns the struct or raises if the changeset is invalid.
"""
@callback insert!(
struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(),
opts :: Keyword.t()
) :: Ecto.Schema.t()
@doc """
Same as `c:update/2` but returns the struct or raises if the changeset is invalid.
"""
@callback update!(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) ::
Ecto.Schema.t()
@doc """
Same as `c:insert_or_update/2` but returns the struct or raises if the changeset
is invalid.
"""
@callback insert_or_update!(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) ::
Ecto.Schema.t()
@doc """
Same as `c:delete/2` but returns the struct or raises if the changeset is invalid.
"""
@callback delete!(
struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(),
opts :: Keyword.t()
) :: Ecto.Schema.t()
## Ecto.Adapter.Transaction
@optional_callbacks transaction: 2, in_transaction?: 0, rollback: 1
@doc """
Runs the given function or `Ecto.Multi` inside a transaction.
## Use with function
`c:transaction/2` can be called with both a function of arity
zero or one. The arity zero function will just be executed as is,
while the arity one function will receive the repo of the transaction
as its first argument, similar to `Ecto.Multi.run/3`.
If an unhandled error occurs the transaction will be rolled back
and the error will bubble up from the transaction function.
If no error occurred the transaction will be committed when the
function returns. A transaction can be explicitly rolled back
by calling `c:rollback/1`, this will immediately leave the function
and return the value given to `rollback` as `{:error, value}`.
A successful transaction returns the value returned by the function
wrapped in a tuple as `{:ok, value}`.
If `c:transaction/2` is called inside another transaction, the function
is simply executed, without wrapping the new transaction call in any
way. If there is an error in the inner transaction and the error is
rescued, or the inner transaction is rolled back, the whole outer
transaction is marked as tainted, guaranteeing nothing will be committed.
## Use with Ecto.Multi
Besides functions, transactions can be used with an `Ecto.Multi` struct.
A transaction will be started, all operations applied and in case of
success committed returning `{:ok, changes}`. In case of any errors
the transaction will be rolled back and
`{:error, failed_operation, failed_value, changes_so_far}` will be
returned.
You can read more about using transactions with `Ecto.Multi` as well as
see some examples in the `Ecto.Multi` documentation.
## Options
See the ["Shared options"](#module-shared-options) section at the module
documentation for more options.
## Examples
import Ecto.Changeset, only: [change: 2]
MyRepo.transaction(fn ->
MyRepo.update!(change(alice, balance: alice.balance - 10))
MyRepo.update!(change(bob, balance: bob.balance + 10))
end)
# When passing a function of arity 1, it receives the repository itself
MyRepo.transaction(fn repo ->
repo.insert!(%Post{})
end)
# Roll back a transaction explicitly
MyRepo.transaction(fn ->
p = MyRepo.insert!(%Post{})
if not Editor.post_allowed?(p) do
MyRepo.rollback(:posting_not_allowed)
end
end)
# With Ecto.Multi
Ecto.Multi.new()
|> Ecto.Multi.insert(:post, %Post{})
|> MyRepo.transaction
"""
@callback transaction(fun_or_multi :: fun | Ecto.Multi.t(), opts :: Keyword.t()) ::
{:ok, any}
| {:error, any}
| {:error, Ecto.Multi.name(), any, %{Ecto.Multi.name() => any}}
@doc """
Returns true if the current process is inside a transaction.
If you are using the `Ecto.Adapters.SQL.Sandbox` in tests, note that even
though each test is inside a transaction, `in_transaction?/0` will only
return true inside transactions explicitly created with `transaction/2`. This
is done so the test environment mimics dev and prod.
If you are trying to debug transaction-related code while using
`Ecto.Adapters.SQL.Sandbox`, it may be more helpful to configure the database
to log all statements and consult those logs.
## Examples
MyRepo.in_transaction?
#=> false
MyRepo.transaction(fn ->
MyRepo.in_transaction? #=> true
end)
"""
@callback in_transaction?() :: boolean
@doc """
Rolls back the current transaction.
The transaction will return the value given as `{:error, value}`.
Note that calling `rollback` causes the code in the transaction to stop executing.
"""
@callback rollback(value :: any) :: no_return
end
| 37.527088 | 137 | 0.673945 |
9e51928d530aac50f11bd93d3eaeae9b74be228e | 5,554 | ex | Elixir | lib/faktory_worker/batch.ex | vitalsource/faktory_worker | 03633c0ddcd2752aa771afbe0757f6e03352254b | [
"MIT"
] | null | null | null | lib/faktory_worker/batch.ex | vitalsource/faktory_worker | 03633c0ddcd2752aa771afbe0757f6e03352254b | [
"MIT"
] | null | null | null | lib/faktory_worker/batch.ex | vitalsource/faktory_worker | 03633c0ddcd2752aa771afbe0757f6e03352254b | [
"MIT"
] | null | null | null | defmodule FaktoryWorker.Batch do
@moduledoc """
Supports Faktory Batch operations
[Batch support](https://github.com/contribsys/faktory/wiki/Ent-Batches) is a
Faktory Enterprise feature. It allows jobs to pushed as part of a batch. When
all jobs in a batch have completed, Faktory will queue a callback job. This
allows building complex job workflows with dependencies.
Jobs pushed as part of a batch _must_ be pushed synchronously. This can be
done using the `skip_pipeline: true` option when calling `perform_async/2`. If
a job isn't pushed synchronously, you may encounter a race condition where the
batch is committed before all jobs have been pushed.
## Creating a batch
A batch is created using `new!/1` and must provide a description and declare
one of the success or complete callbacks. The `new!/1` function returns the
batch ID (or `bid`) which identifies the batch for future commands.
Once created, jobs can be pushed to the batch by providing the `bid` in the
`custom` payload. These jobs must be pushed synchronously.
```
alias FaktoryWorker.Batch
{:ok, bid} = Batch.new!(on_success: {MyApp.EmailReportJob, [], []})
MyApp.Job.perform_async([1, 2], custom: %{"bid" => bid}, skip_pipeline: true)
MyApp.Job.perform_async([3, 4], custom: %{"bid" => bid}, skip_pipeline: true)
MyApp.Job.perform_async([5, 6], custom: %{"bid" => bid}, skip_pipeline: true)
Batch.commit(bid)
```
## Opening a batch
In order to open a batch, you must know the batch ID. Since FaktoryWorker
doesn't currently pass the job itself as a parameter to `perform` functions,
you must explicitly pass it as an argument in order to open the batch as part
of a job.
```
defmodule MyApp.Job do
use FaktoryWorker.Job
def perform(arg1, arg2, bid) do
Batch.open(bid)
MyApp.OtherJob.perform_async([1, 2], custom: %{"bid" => bid}, skip_pipeline: true)
Batch.commit(bid)
end
end
```
"""
alias FaktoryWorker.{ConnectionManager, Job, Pool}
@type bid :: String.t()
@default_timeout 5000
@doc """
Creates a new Faktory batch
Returns the batch ID (`bid`) which needs to be passed in the `:custom`
parameters of every job that should be part of this batch as well as to commit
the batch.
## Opts
Batch jobs must define a success or complete callback (or both). These
callbacks are passed as tuples to the `:on_success` and `:on_complete` opts.
They are defined as a tuple consisting of `{mod, args, opts}` where `mod` is a
module with a `perform` function that corresponds in arity to the length of `args`.
Any `opts` that can be passed to `perform_async/2` can be provided as `opts`
to the callback except for `:faktory_worker`.
If neither callback is provided, an error will be raised.
### `:on_success`
See above.
### `:on_complete`
See above.
### `:description`
The description, if provided, is shown in Faktory's Web UI on the batch
listing tab.
### `:parent_bid`
The parent batch ID--only used if you are creating a child batch.
### `:faktory_worker`
The name of the `FaktoryWorker` instance (determines which connection pool
will be used).
"""
@spec new!(Keyword.t()) :: {:ok, bid()} | {:error, any()}
def new!(opts \\ []) do
success = Keyword.get(opts, :on_success)
complete = Keyword.get(opts, :on_complete)
bid = Keyword.get(opts, :parent_id)
description = Keyword.get(opts, :description)
payload =
%{}
|> maybe_put_description(description)
|> maybe_put_parent_id(bid)
|> maybe_put_callback(:success, success)
|> maybe_put_callback(:complete, complete)
|> validate!()
send_command({:batch_new, payload}, opts)
end
@doc """
Commits the batch identified by `bid`
Faktory will begin scheduling jobs that are part of the batch before the batch
is committed, but
"""
def commit(bid, opts \\ []) do
send_command({:batch_commit, bid}, opts)
end
@doc """
Opens the batch identified by `bid`
An existing batch needs to be re-opened in order to add more jobs to it or to
add a child batch.
After opening the batch, it must be committed again using `commit/2`.
"""
def open(bid, opts \\ []) do
send_command({:batch_open, bid}, opts)
end
@doc """
Gets the status of a batch
Returns a map representing the status
"""
def status(bid, opts \\ []) do
send_command({:batch_status, bid}, opts)
end
defp send_command(command, opts) do
opts
|> Keyword.get(:faktory_name, FaktoryWorker)
|> Pool.format_pool_name()
|> :poolboy.transaction(
&ConnectionManager.Server.send_command(&1, command),
@default_timeout
)
end
defp maybe_put_description(payload, nil), do: payload
defp maybe_put_description(payload, description),
do: Map.put_new(payload, :description, description)
defp maybe_put_parent_id(payload, nil), do: payload
defp maybe_put_parent_id(payload, bid), do: Map.put_new(payload, :parent_bid, bid)
defp maybe_put_callback(payload, _type, nil), do: payload
defp maybe_put_callback(payload, type, {mod, job, opts}) do
job_payload = Job.build_payload(mod, job, opts)
Map.put_new(payload, type, job_payload)
end
defp validate!(payload) do
success = Map.get(payload, :success)
complete = Map.get(payload, :complete)
case {success, complete} do
{nil, nil} ->
raise("Faktory batch jobs must declare a success or complete callback")
{_, _} ->
payload
end
end
end
| 29.078534 | 88 | 0.688873 |
9e51a83e1eede902a415b7fa1eed9bce0ae188c2 | 2,043 | ex | Elixir | lib/raygun.ex | sreecodeslayer/raygun | 3dfc2b0ddad6c2f3424a05727b0ad98303f1846c | [
"Apache-2.0"
] | 23 | 2015-08-16T18:17:12.000Z | 2020-08-14T09:45:26.000Z | lib/raygun.ex | sreecodeslayer/raygun | 3dfc2b0ddad6c2f3424a05727b0ad98303f1846c | [
"Apache-2.0"
] | 23 | 2015-08-18T15:21:12.000Z | 2020-08-30T05:31:36.000Z | lib/raygun.ex | sreecodeslayer/raygun | 3dfc2b0ddad6c2f3424a05727b0ad98303f1846c | [
"Apache-2.0"
] | 22 | 2015-08-23T16:50:12.000Z | 2021-02-15T02:19:47.000Z | defmodule Raygun do
@moduledoc """
Send errors to Raygun. Errors can be captured in three different ways.
1. Any errors that are logged
2. Any exceptions that occur in a Plug
3. Programmatically
All the functions will return `:ok` or `{:error, reason}`
"""
@api_endpoint "https://api.raygun.io/entries"
@doc """
Reports a string message. This function is used by the Raygun.Logger but it
can also be used to report any string message.
"""
def report_message(msg, opts \\ []) do
Raygun.Format.message_payload(msg, opts) |> send_report
end
@deprecated "Use report_stacktrace/2 instead"
def report_exception(exception, opts \\ []) do
apply(:erlang, :get_stacktrace, [])
|> report_stacktrace(exception, opts)
end
@doc """
Reports an exception and its corresponding stacktrace to Raygun.
"""
def report_stacktrace(stacktrace, exception, opts \\ []) do
Raygun.Format.stacktrace_payload(stacktrace, exception, opts) |> send_report
end
@doc """
Reports an exception and its corresponding stacktrace to Raygun. Additionally
this captures some additional information about the environment in which
the exception occurred by retrieving some state from the Plug Conn.
"""
def report_plug(conn, stacktrace, exception, opts \\ []) do
Raygun.Format.conn_payload(conn, stacktrace, exception, opts) |> send_report
end
defp send_report(error) do
headers = %{
"Content-Type": "application/json; charset=utf-8",
Accept: "application/json",
"User-Agent": "Elixir Client",
"X-ApiKey": Raygun.Util.get_env(:raygun, :api_key)
}
opts = Application.get_env(:raygun, :httpoison_opts, [])
case HTTPoison.post(@api_endpoint, Jason.encode!(error), headers, opts) do
{:ok, %HTTPoison.Response{status_code: 202}} -> :ok
{:ok, %HTTPoison.Response{status_code: 400}} -> {:error, :bad_message}
{:ok, %HTTPoison.Response{status_code: 403}} -> {:error, :invalid_api_key}
{:error, _} -> {:error, :unexpected}
end
end
end
| 32.951613 | 80 | 0.69114 |
9e51b495eb2b2457efb14d984c4d2dd3d28d500e | 2,369 | ex | Elixir | lib/ambry_web/live/admin/audit_live/index.ex | froseph/ambry | 86c1a8528b9f3cc7e4a7debd8005df4116a7d1b1 | [
"MIT"
] | null | null | null | lib/ambry_web/live/admin/audit_live/index.ex | froseph/ambry | 86c1a8528b9f3cc7e4a7debd8005df4116a7d1b1 | [
"MIT"
] | null | null | null | lib/ambry_web/live/admin/audit_live/index.ex | froseph/ambry | 86c1a8528b9f3cc7e4a7debd8005df4116a7d1b1 | [
"MIT"
] | null | null | null | defmodule AmbryWeb.Admin.AuditLive.Index do
@moduledoc """
LiveView for audit admin interface.
"""
use AmbryWeb, :live_view
alias Ambry.{FileUtils, Media}
alias AmbryWeb.Admin.Components.AdminNav
alias Surface.Components.LiveRedirect
on_mount {AmbryWeb.UserLiveAuth, :ensure_mounted_current_user}
on_mount {AmbryWeb.Admin.Auth, :ensure_mounted_admin_user}
@impl Phoenix.LiveView
def mount(_params, _session, socket) do
{:ok, assign(socket, :page_title, "Auditing Media")}
end
@impl Phoenix.LiveView
def handle_params(_params, _url, socket) do
{:noreply, load_audit(socket)}
end
defp load_audit(socket) do
audit = Media.orphaned_files_audit()
deletable_files =
Map.new(audit.orphaned_media_files, fn file ->
{file.id, file.path}
end)
deletable_folders =
Map.new(audit.orphaned_source_folders, fn folder ->
{folder.id, folder.path}
end)
socket
|> assign(:audit, audit)
|> assign(:deletable_files, deletable_files)
|> assign(:deletable_folders, deletable_folders)
end
@impl Phoenix.LiveView
def handle_event("reload", _params, socket) do
{:noreply, load_audit(socket)}
end
def handle_event("delete-file", %{"id" => file_id}, socket) do
disk_path = Map.fetch!(socket.assigns.deletable_files, file_id)
case FileUtils.try_delete_file(disk_path) do
:ok ->
{:noreply,
socket
|> load_audit()
|> put_flash(:info, "File deleted.")}
{:error, posix} ->
{:noreply, put_flash(socket, :error, "Unable to delete file: #{posix}")}
end
end
def handle_event("delete-folder", %{"id" => folder_id}, socket) do
disk_path = Map.fetch!(socket.assigns.deletable_folders, folder_id)
case FileUtils.try_delete_folder(disk_path) do
:ok ->
{:noreply,
socket
|> load_audit()
|> put_flash(:info, "Folder deleted.")}
{:error, posix, path} ->
{:noreply, put_flash(socket, :error, "Unable to delete file/folder (#{posix}): #{path}")}
end
end
defp format_filesize(size) do
size |> FileSize.scale() |> FileSize.format()
end
defp no_problems(audit) do
case audit do
%{broken_media: [], orphaned_media_files: [], orphaned_source_folders: []} ->
true
_else ->
false
end
end
end
| 24.936842 | 97 | 0.650485 |
9e51bb137cfada04bb16221715a1085b45e7148f | 646 | exs | Elixir | test/mux/data/incidents_test.exs | mcrumm/mux-elixir | 07b8a655299474560906ae5ef278cfc7ffdd46d6 | [
"MIT"
] | 53 | 2018-06-29T22:49:29.000Z | 2022-03-23T00:01:44.000Z | test/mux/data/incidents_test.exs | mcrumm/mux-elixir | 07b8a655299474560906ae5ef278cfc7ffdd46d6 | [
"MIT"
] | 18 | 2019-05-12T00:41:57.000Z | 2022-01-09T13:46:59.000Z | test/mux/data/incidents_test.exs | mcrumm/mux-elixir | 07b8a655299474560906ae5ef278cfc7ffdd46d6 | [
"MIT"
] | 14 | 2019-01-24T23:44:34.000Z | 2022-03-21T15:28:42.000Z | defmodule Mux.Data.IncidentsTest do
use ExUnit.Case
import Tesla.Mock
doctest Mux.Data.Incidents
@base_url "https://api.mux.com/data/v1/incidents"
setup do
client = Mux.Base.new("token_id", "token_secret")
mock(fn
%{method: :get, url: @base_url} ->
%Tesla.Env{status: 200, body: Mux.Fixtures.incidents()}
%{method: :get, url: @base_url <> "/ABCD1234"} ->
%Tesla.Env{status: 200, body: Mux.Fixtures.incident()}
%{method: :get, url: @base_url <> "/ABCD1234/related"} ->
%Tesla.Env{status: 200, body: Mux.Fixtures.related_incidents()}
end)
{:ok, %{client: client}}
end
end
| 25.84 | 71 | 0.625387 |
9e51f59358bb489411c0b8d40f70d9526a2563f2 | 6,849 | exs | Elixir | test/timber_exceptions/translator_test.exs | mitchellhenke/timber-elixir-exceptions | 6749489a1b964ec23b01152d81dbccfff20cb518 | [
"0BSD"
] | null | null | null | test/timber_exceptions/translator_test.exs | mitchellhenke/timber-elixir-exceptions | 6749489a1b964ec23b01152d81dbccfff20cb518 | [
"0BSD"
] | null | null | null | test/timber_exceptions/translator_test.exs | mitchellhenke/timber-elixir-exceptions | 6749489a1b964ec23b01152d81dbccfff20cb518 | [
"0BSD"
] | null | null | null | defmodule Timber.Exceptions.TranslatorTest do
use ExUnit.Case
import Timber.Exceptions.TestHelpers
alias Timber.HTTPClients.Fake, as: FakeHTTPClient
alias Timber.LoggerBackends.InMemory
alias Timber.Exceptions.{TestGenServer, SimpleTestGenServer}
alias Timber.LoggerBackends.HTTP
alias Timber.Events.ErrorEvent
defp add_timber_logger_translator() do
:ok = Logger.add_translator({Timber.Exceptions.Translator, :translate})
ExUnit.Callbacks.on_exit(fn ->
Logger.remove_translator({Timber.Exceptions.Translator, :translate})
end)
end
test "logs errors from crashed Task" do
add_timber_logger_translator()
add_in_memory_logger_backend(self())
{:ok, pid} =
Task.start(fn ->
raise "Task Error"
end)
assert_receive :ok
[{:error, _pid, {Logger, _msg, _ts, metadata}}] = :gen_event.call(Logger, InMemory, :get)
assert %ErrorEvent{
name: "RuntimeError",
message: "Task Error",
backtrace: [_line1, _line2, _line3]
} = Keyword.get(metadata, :event)
assert Keyword.get(metadata, :pid) == pid
end
test "logs errors from GenServer throw" do
Process.flag(:trap_exit, true)
add_timber_logger_translator()
add_in_memory_logger_backend(self())
{:ok, pid} = TestGenServer.start_link(self())
TestGenServer.do_throw(pid)
assert_receive :ok
[{:error, _pid, {Logger, _msg, _ts, metadata}}] = :gen_event.call(Logger, InMemory, :get)
assert %ErrorEvent{
name: "ErlangError",
message: message
} = Keyword.get(metadata, :event)
assert message =~ ~r/Erlang error: "I am throwing"/i
assert Keyword.get(metadata, :pid) == pid
end
test "logs errors from GenServer abnormal exit" do
Process.flag(:trap_exit, true)
add_timber_logger_translator()
add_in_memory_logger_backend(self())
{:ok, pid} = TestGenServer.start_link(self())
TestGenServer.bad_exit(pid)
assert_receive :ok
[{:error, _pid, {Logger, _msg, _ts, metadata}}] = :gen_event.call(Logger, InMemory, :get)
assert %ErrorEvent{
name: "ErlangError",
message: message
} = Keyword.get(metadata, :event)
assert message =~ ~r/Erlang error: :bad_exit/i
assert Keyword.get(metadata, :pid) == pid
end
test "logs errors from GenServer handle_call crash" do
add_timber_logger_translator()
add_in_memory_logger_backend(self())
Process.flag(:trap_exit, true)
{:ok, pid} = TestGenServer.start_link(self())
assert catch_exit(TestGenServer.divide_call(pid, 1, 0))
assert_receive :ok
[{:error, _pid, {Logger, _msg, _ts, metadata}}] = :gen_event.call(Logger, InMemory, :get)
assert %ErrorEvent{
message: "bad argument in arithmetic expression",
name: "ArithmeticError",
backtrace: [_, _, _, _]
} = Keyword.get(metadata, :event)
assert Keyword.get(metadata, :pid) == pid
end
test "logs errors from GenServer handle_info crash" do
add_timber_logger_translator()
add_in_memory_logger_backend(self())
Process.flag(:trap_exit, true)
{:ok, pid} = TestGenServer.start_link(self())
TestGenServer.divide(pid, 1, 0)
assert_receive :ok
[{:error, _pid, {Logger, _msg, _ts, metadata}}] = :gen_event.call(Logger, InMemory, :get)
assert %ErrorEvent{
message: "bad argument in arithmetic expression",
name: "ArithmeticError",
backtrace: [_, _, _, _]
} = Keyword.get(metadata, :event)
assert Keyword.get(metadata, :pid) == pid
end
test "logs errors from GenServer raise" do
Process.flag(:trap_exit, true)
add_timber_logger_translator()
add_in_memory_logger_backend(self())
{:ok, pid} = TestGenServer.start_link(self())
TestGenServer.raise(pid)
assert_receive :ok
[{:error, _pid, {Logger, _msg, _ts, metadata}}] = :gen_event.call(Logger, InMemory, :get)
assert %ErrorEvent{
message: "raised error",
name: "RuntimeError",
backtrace: [_, _, _, _]
} = Keyword.get(metadata, :event)
assert Keyword.get(metadata, :pid) == pid
end
skip_min_elixir_version("1.4")
test "logs errors from GenServer unexpected message in handle_info/2" do
add_timber_logger_translator()
add_in_memory_logger_backend(self())
{:ok, pid} = SimpleTestGenServer.start_link(self())
send(pid, :unexpected)
assert_receive :ok
[{_level, _pid, {Logger, _msg, _ts, metadata}}] = :gen_event.call(Logger, InMemory, :get)
assert Keyword.get(metadata, :pid) == pid
end
test "logs errors arbitrary errors received by :error_logger" do
add_timber_logger_translator()
add_in_memory_logger_backend(self())
:error_logger.error_msg("Failed to start Ranch listener ~p", [self()])
assert_receive :ok
[{:error, _pid, {Logger, _msg, _ts, metadata}}] = :gen_event.call(Logger, InMemory, :get)
assert Keyword.get(metadata, :pid) == self()
end
test "logs errors from spawned process crash" do
add_timber_logger_translator()
add_in_memory_logger_backend(self())
spawn(fn ->
raise "Error"
end)
assert_receive :ok
[{:error, _pid, {Logger, _msg, _ts, metadata}}] = :gen_event.call(Logger, InMemory, :get)
assert %ErrorEvent{
backtrace: [
%{
file: "test/timber_exceptions/translator_test.exs",
function: _,
line: _
}
],
message: "Error",
name: "RuntimeError"
} = Keyword.get(metadata, :event)
end
test "Logger events are encodable by the HTTP backend" do
{:ok, state} = HTTP.init(HTTP, http_client: FakeHTTPClient)
add_timber_logger_translator()
add_in_memory_logger_backend(self())
Task.start(fn ->
Timber.Context.add(%{}, %{a: :b})
|> Timber.CurrentContext.save()
raise "Task Error"
end)
assert_receive :ok
[{:error, pid, {Logger, msg, ts, metadata}}] = :gen_event.call(Logger, InMemory, :get)
entry = {:error, pid, {Logger, msg, ts, metadata}}
{:ok, state} = HTTP.handle_event(entry, state)
HTTP.handle_event(:flush, state)
calls = FakeHTTPClient.get_async_request_calls()
assert length(calls) == 1
call = Enum.at(calls, 0)
assert elem(call, 0) == :post
assert elem(call, 1) == "https://logs.timber.io/frames"
vsn = Application.spec(:timber, :vsn)
assert elem(call, 2) == %{
"Authorization" => "Basic YXBpX2tleQ==",
"Content-Type" => "application/msgpack",
"User-Agent" => "timber-elixir/#{vsn}"
}
encoded_body = event_entry_to_msgpack(entry)
assert elem(call, 3) == encoded_body
end
end
| 28.5375 | 93 | 0.640385 |
9e522e3e9ad017c60ab89df690c5b1315c89d2e4 | 399 | exs | Elixir | priv/repo/migrations/20160801144208_create_chat.exs | luca-apostoli/ruru-chat | d6c62000b78592f03cf56f63acd23a6404df1924 | [
"MIT"
] | 1 | 2017-04-22T12:36:19.000Z | 2017-04-22T12:36:19.000Z | priv/repo/migrations/20160801144208_create_chat.exs | luca-apostoli/ruru-chat | d6c62000b78592f03cf56f63acd23a6404df1924 | [
"MIT"
] | null | null | null | priv/repo/migrations/20160801144208_create_chat.exs | luca-apostoli/ruru-chat | d6c62000b78592f03cf56f63acd23a6404df1924 | [
"MIT"
] | null | null | null | defmodule Ruru.Repo.Migrations.CreateChat do
use Ecto.Migration
def change do
create table(:chats) do
add :user_id, references(:users, on_delete: :nothing)
add :operator_id, references(:operators, on_delete: :nothing)
add :status, :boolean, default: false
timestamps()
end
create index(:chats, [:user_id])
create index(:chats, [:operator_id])
end
end
| 24.9375 | 67 | 0.679198 |
9e522fe7835777690152a23c35d6b43a50b1a9b0 | 3,846 | ex | Elixir | lib/chat_api_web/controllers/gmail_controller.ex | daskycodes/papercups | e716d94372b452d7545d9db79a23a44aee53676e | [
"MIT"
] | null | null | null | lib/chat_api_web/controllers/gmail_controller.ex | daskycodes/papercups | e716d94372b452d7545d9db79a23a44aee53676e | [
"MIT"
] | null | null | null | lib/chat_api_web/controllers/gmail_controller.ex | daskycodes/papercups | e716d94372b452d7545d9db79a23a44aee53676e | [
"MIT"
] | null | null | null | # TODO: rename to GmailController?
defmodule ChatApiWeb.GmailController do
use ChatApiWeb, :controller
require Logger
alias ChatApi.Google
@spec callback(Plug.Conn.t(), map()) :: Plug.Conn.t()
@doc """
This action is reached via `/api/gmail/oauth` is the the callback URL that
Google's OAuth2 provider will redirect the user back to with a `code` that will
be used to request an access token. The access token will then be used to
access protected resources on behalf of the user.
"""
def callback(conn, %{"code" => code}) do
with %{account_id: account_id, id: user_id} <- conn.assigns.current_user,
client <- Google.Auth.get_token!(code: code) do
Logger.debug("Gmail access token: #{inspect(client.token)}")
case Google.create_or_update_authorization(account_id, %{
account_id: account_id,
user_id: user_id,
access_token: client.token.access_token,
refresh_token: client.token.refresh_token,
token_type: client.token.token_type,
expires_at: client.token.expires_at,
scope: client.token.other_params["scope"] || "",
client: "gmail"
}) do
{:ok, _result} ->
json(conn, %{data: %{ok: true}})
error ->
Logger.error("Error saving gmail auth: #{inspect(error)}")
json(conn, %{data: %{ok: false}})
end
end
end
@spec authorization(Plug.Conn.t(), map()) :: Plug.Conn.t()
def authorization(conn, _payload) do
with %{account_id: account_id} <- conn.assigns.current_user do
case Google.get_authorization_by_account(account_id) do
nil ->
json(conn, %{data: nil})
auth ->
json(conn, %{
data: %{
created_at: auth.inserted_at,
account_id: auth.account_id,
user_id: auth.user_id,
scope: auth.scope
}
})
end
end
end
@spec send(Plug.Conn.t(), map()) :: Plug.Conn.t()
def send(conn, %{"recipient" => recipient, "subject" => subject, "message" => message}) do
with %{account_id: account_id, email: email} <- conn.assigns.current_user,
%{refresh_token: refresh_token} <-
Google.get_authorization_by_account(account_id),
%{token: %{access_token: access_token}} <-
Google.Auth.get_token!(refresh_token: refresh_token) do
ChatApi.Emails.send_via_gmail(
to: recipient,
from: email,
subject: subject,
message: message,
access_token: access_token
)
|> case do
{:ok, result} ->
conn
|> notify_slack()
|> json(%{ok: true, data: result})
error ->
Logger.error("Error sending email via gmail: #{inspect(error)}")
json(conn, %{ok: false})
end
end
end
@spec index(Plug.Conn.t(), map()) :: Plug.Conn.t()
def index(conn, _params) do
scope = "https://www.googleapis.com/auth/gmail.modify"
redirect(conn,
external:
Google.Auth.authorize_url!(scope: scope, prompt: "consent", access_type: "offline")
)
end
@spec auth(Plug.Conn.t(), map()) :: Plug.Conn.t()
def auth(conn, _params) do
scope = "https://www.googleapis.com/auth/gmail.modify"
url = Google.Auth.authorize_url!(scope: scope, prompt: "consent", access_type: "offline")
json(conn, %{data: %{url: url}})
end
@spec notify_slack(Conn.t()) :: Conn.t()
defp notify_slack(conn) do
with %{email: email} <- conn.assigns.current_user do
# Putting in an async Task for now, since we don't care if this succeeds
# or fails (and we also don't want it to block anything)
Task.start(fn ->
ChatApi.Slack.log("#{email} successfully linked Gmail!")
end)
end
conn
end
end
| 31.785124 | 93 | 0.602964 |
9e52456b9d50f7b40b840fa9fed181108823e289 | 3,191 | ex | Elixir | lib/opentelemetry_ecto.ex | wuunder/opentelemetry_ecto | d4f8898f8eb782359a076355bc4f47adbda183a6 | [
"Apache-2.0"
] | 23 | 2020-03-31T19:41:50.000Z | 2021-10-06T00:02:52.000Z | lib/opentelemetry_ecto.ex | wuunder/opentelemetry_ecto | d4f8898f8eb782359a076355bc4f47adbda183a6 | [
"Apache-2.0"
] | 14 | 2020-03-29T15:52:25.000Z | 2021-10-15T15:51:21.000Z | lib/opentelemetry_ecto.ex | wuunder/opentelemetry_ecto | d4f8898f8eb782359a076355bc4f47adbda183a6 | [
"Apache-2.0"
] | 19 | 2020-04-06T15:11:42.000Z | 2021-09-28T21:51:14.000Z | defmodule OpentelemetryEcto do
@moduledoc """
Telemetry handler for creating OpenTelemetry Spans from Ecto query events.
"""
require OpenTelemetry.Tracer
@doc """
Attaches the OpentelemetryEcto handler to your repo events. This should be called
from your application behaviour on startup.
Example:
OpentelemetryEcto.setup([:blog, :repo])
You may also supply the following options in the second argument:
* `:time_unit` - a time unit used to convert the values of query phase
timings, defaults to `:microsecond`. See `System.convert_time_unit/3`
* `:span_prefix` - the first part of the span name, as a `String.t`,
defaults to the concatenation of the event name with periods, e.g.
`"blog.repo.query"`. This will always be followed with a colon and the
source (the table name for SQL adapters).
"""
def setup(event_prefix, config \\ []) do
# register the tracer. just re-registers if called for multiple repos
_ = OpenTelemetry.register_application_tracer(:opentelemetry_ecto)
event = event_prefix ++ [:query]
:telemetry.attach({__MODULE__, event}, event, &__MODULE__.handle_event/4, config)
end
@doc false
def handle_event(
event,
measurements,
%{query: query, source: source, result: query_result, repo: repo, type: type},
config
) do
# Doing all this even if the span isn't sampled so the sampler
# could technically use the attributes to decide if it should sample or not
total_time = measurements.total_time
end_time = :opentelemetry.timestamp()
start_time = end_time - total_time
database = repo.config()[:database]
url =
case repo.config()[:url] do
nil ->
# TODO: add port
URI.to_string(%URI{scheme: "ecto", host: repo.config()[:hostname]})
url ->
url
end
span_name =
case Keyword.fetch(config, :span_prefix) do
{:ok, prefix} -> prefix
:error -> Enum.join(event, ".")
end <> ":#{source}"
time_unit = Keyword.get(config, :time_unit, :microsecond)
db_type =
case type do
:ecto_sql_query -> :sql
_ -> type
end
result =
case query_result do
{:ok, _} -> []
_ -> [error: true]
end
# TODO: need connection information to complete the required attributes
# net.peer.name or net.peer.ip and net.peer.port
base_attributes =
Keyword.merge(result,
"db.type": db_type,
"db.statement": query,
source: source,
"db.instance": database,
"db.url": url,
"total_time_#{time_unit}s": System.convert_time_unit(total_time, :native, time_unit)
)
attributes =
measurements
|> Enum.into(%{})
|> Map.take(~w(decode_time query_time queue_time)a)
|> Enum.reject(&is_nil(elem(&1, 1)))
|> Enum.map(fn {k, v} ->
{String.to_atom("#{k}_#{time_unit}s"), System.convert_time_unit(v, :native, time_unit)}
end)
s = OpenTelemetry.Tracer.start_span(span_name, %{start_time: start_time, attributes: attributes ++ base_attributes})
OpenTelemetry.Span.end_span(s)
end
end
| 30.390476 | 120 | 0.640552 |
9e527eba006562334e352c534da0d22b60de5581 | 1,393 | ex | Elixir | test/support/data_case.ex | drapadubok/multiauth | 9ca9294402311c06a082e6972144a2eb8935a797 | [
"MIT"
] | 1 | 2018-06-01T02:08:12.000Z | 2018-06-01T02:08:12.000Z | test/support/data_case.ex | drapadubok/multiauth | 9ca9294402311c06a082e6972144a2eb8935a797 | [
"MIT"
] | null | null | null | test/support/data_case.ex | drapadubok/multiauth | 9ca9294402311c06a082e6972144a2eb8935a797 | [
"MIT"
] | null | null | null | defmodule Multiauth.DataCase do
@moduledoc """
This module defines the setup for tests requiring
access to the application's data layer.
You may define functions here to be used as helpers in
your tests.
Finally, if the test case interacts with the database,
it cannot be async. For this reason, every test runs
inside a transaction which is reset at the beginning
of the test unless the test case is marked as async.
"""
use ExUnit.CaseTemplate
using do
quote do
alias Multiauth.Repo
import Ecto
import Ecto.Changeset
import Ecto.Query
import Multiauth.DataCase
end
end
setup tags do
:ok = Ecto.Adapters.SQL.Sandbox.checkout(Multiauth.Repo)
unless tags[:async] do
Ecto.Adapters.SQL.Sandbox.mode(Multiauth.Repo, {:shared, self()})
end
:ok
end
@doc """
A helper that transform changeset errors to a map of messages.
assert {:error, changeset} = Accounts.create_user(%{password: "short"})
assert "password is too short" in errors_on(changeset).password
assert %{password: ["password is too short"]} = errors_on(changeset)
"""
def errors_on(changeset) do
Ecto.Changeset.traverse_errors(changeset, fn {message, opts} ->
Enum.reduce(opts, message, fn {key, value}, acc ->
String.replace(acc, "%{#{key}}", to_string(value))
end)
end)
end
end
| 25.796296 | 77 | 0.681981 |
9e52a57a24a643ff7a91c4244564416d1fc678cc | 5,343 | ex | Elixir | clients/ad_mob/lib/google_api/ad_mob/v1/model/mediation_report_spec.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/ad_mob/lib/google_api/ad_mob/v1/model/mediation_report_spec.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/ad_mob/lib/google_api/ad_mob/v1/model/mediation_report_spec.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.AdMob.V1.Model.MediationReportSpec do
@moduledoc """
The specification for generating an AdMob Mediation report.
For example, the specification to get observed ECPM sliced by ad source and
app for the 'US' and 'CN' countries can look like the following example:
{
"date_range": {
"start_date": {"year": 2018, "month": 9, "day": 1},
"end_date": {"year": 2018, "month": 9, "day": 30}
},
"dimensions": ["AD_SOURCE", "APP", "COUNTRY"],
"metrics": ["OBSERVED_ECPM"],
"dimension_filters": [
{
"dimension": "COUNTRY",
"matches_any": {"values": [{"value": "US", "value": "CN"}]}
}
],
"sort_conditions": [
{"dimension":"APP", order: "ASCENDING"}
],
"localization_settings": {
"currency_code": "USD",
"language_code": "en-US"
}
}
For a better understanding, you can treat the preceding specification like
the following pseudo SQL:
SELECT AD_SOURCE, APP, COUNTRY, OBSERVED_ECPM
FROM MEDIATION_REPORT
WHERE DATE >= '2018-09-01' AND DATE <= '2018-09-30'
AND COUNTRY IN ('US', 'CN')
GROUP BY AD_SOURCE, APP, COUNTRY
ORDER BY APP ASC;
## Attributes
* `dateRange` (*type:* `GoogleApi.AdMob.V1.Model.DateRange.t`, *default:* `nil`) - The date range for which the report is generated.
* `dimensionFilters` (*type:* `list(GoogleApi.AdMob.V1.Model.MediationReportSpecDimensionFilter.t)`, *default:* `nil`) - Describes which report rows to match based on their dimension values.
* `dimensions` (*type:* `list(String.t)`, *default:* `nil`) - List of dimensions of the report. The value combination of these dimensions
determines the row of the report. If no dimensions are specified, the
report returns a single row of requested metrics for the entire account.
* `localizationSettings` (*type:* `GoogleApi.AdMob.V1.Model.LocalizationSettings.t`, *default:* `nil`) - Localization settings of the report.
* `maxReportRows` (*type:* `integer()`, *default:* `nil`) - Maximum number of report data rows to return. If the value is not set, the
API returns as many rows as possible, up to 100000. Acceptable values are
1-100000, inclusive. Any other values are treated as 100000.
* `metrics` (*type:* `list(String.t)`, *default:* `nil`) - List of metrics of the report. A report must specify at least one metric.
* `sortConditions` (*type:* `list(GoogleApi.AdMob.V1.Model.MediationReportSpecSortCondition.t)`, *default:* `nil`) - Describes the sorting of report rows. The order of the condition in the
list defines its precedence; the earlier the condition, the higher its
precedence. If no sort conditions are specified, the row ordering is
undefined.
* `timeZone` (*type:* `String.t`, *default:* `nil`) - A report time zone. Accepts an IANA TZ name values, such as
"America/Los_Angeles." If no time zone is defined, the account default
takes effect. Check default value by the get account action.
**Warning:** The "America/Los_Angeles" is the only supported value at
the moment.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:dateRange => GoogleApi.AdMob.V1.Model.DateRange.t(),
:dimensionFilters =>
list(GoogleApi.AdMob.V1.Model.MediationReportSpecDimensionFilter.t()),
:dimensions => list(String.t()),
:localizationSettings => GoogleApi.AdMob.V1.Model.LocalizationSettings.t(),
:maxReportRows => integer(),
:metrics => list(String.t()),
:sortConditions => list(GoogleApi.AdMob.V1.Model.MediationReportSpecSortCondition.t()),
:timeZone => String.t()
}
field(:dateRange, as: GoogleApi.AdMob.V1.Model.DateRange)
field(:dimensionFilters,
as: GoogleApi.AdMob.V1.Model.MediationReportSpecDimensionFilter,
type: :list
)
field(:dimensions, type: :list)
field(:localizationSettings, as: GoogleApi.AdMob.V1.Model.LocalizationSettings)
field(:maxReportRows)
field(:metrics, type: :list)
field(:sortConditions,
as: GoogleApi.AdMob.V1.Model.MediationReportSpecSortCondition,
type: :list
)
field(:timeZone)
end
defimpl Poison.Decoder, for: GoogleApi.AdMob.V1.Model.MediationReportSpec do
def decode(value, options) do
GoogleApi.AdMob.V1.Model.MediationReportSpec.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.AdMob.V1.Model.MediationReportSpec do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 42.744 | 194 | 0.679206 |
9e52a7d5bf5f7e667c592954287811c628cec21e | 1,768 | ex | Elixir | lib/absinthe_generator/schema_builder.ex | MikaAK/absinthe_generator | 2025daeac092c5a2fa8030bcddf770fd3104cd0f | [
"MIT"
] | 1 | 2022-02-28T14:58:08.000Z | 2022-02-28T14:58:08.000Z | lib/absinthe_generator/schema_builder.ex | MikaAK/absinthe_generator | 2025daeac092c5a2fa8030bcddf770fd3104cd0f | [
"MIT"
] | null | null | null | lib/absinthe_generator/schema_builder.ex | MikaAK/absinthe_generator | 2025daeac092c5a2fa8030bcddf770fd3104cd0f | [
"MIT"
] | null | null | null | defmodule AbsintheGenerator.SchemaBuilder do
alias AbsintheGenerator.{Type, Mutation, Query, Schema}
@dataloader_regex ~r/^dataloader\(([^,]+)/
def generate(app_name, schema_items) do
{type_items, schema_items} = Enum.split_with(schema_items, &is_struct(&1, Type))
{mutation_items, schema_items} = Enum.split_with(schema_items, &is_struct(&1, Mutation))
query_items = Enum.filter(schema_items, &is_struct(&1, Query))
%Schema{
app_name: app_name,
types: Enum.map(type_items, &("Types.#{Macro.camelize(&1.type_name)}")),
mutations: Enum.map(mutation_items, &(&1.mutation_name)),
queries: Enum.map(query_items, &(&1.query_name)),
data_sources: extract_data_sources(type_items)
}
end
defp extract_data_sources(type_items) do
type_items
|> Stream.flat_map(&filter_fields_with_dataloader/1)
|> Stream.map(&dataloader_source_module/1)
|> Stream.uniq()
|> Enum.map(&build_data_source_struct/1)
end
defp filter_fields_with_dataloader(%Type{objects: objects}) do
objects
|> Stream.flat_map(&(&1.fields))
|> Enum.filter(&(not is_nil(&1.resolver) and &1.resolver =~ "dataloader"))
end
defp dataloader_source_module(%Type.Object.Field{resolver: resolver_string}) do
case Regex.run(@dataloader_regex, resolver_string, [capture: :all_but_first]) do
[data_source] -> data_source
_ -> raise "Data source could not be found from #{inspect resolver_string}"
end
end
defp build_data_source_struct(data_source) do
%Schema.DataSource{
source: data_source,
query: """
Dataloader.Ecto.new(
BlitzPG.Repo.Apex,
query: &EctoShorts.CommonFilters.convert_params_to_filter/2
)
"""
}
end
end
| 33.358491 | 92 | 0.686652 |
9e52af97e14f83b6f46dfd2e7354734475be2726 | 385 | ex | Elixir | platform/target/bootstrap/protocols.ex | defcon201/farmbot_os | acc22702afbb13be461c9d80591604958117ff75 | [
"MIT"
] | null | null | null | platform/target/bootstrap/protocols.ex | defcon201/farmbot_os | acc22702afbb13be461c9d80591604958117ff75 | [
"MIT"
] | null | null | null | platform/target/bootstrap/protocols.ex | defcon201/farmbot_os | acc22702afbb13be461c9d80591604958117ff75 | [
"MIT"
] | 1 | 2020-12-16T16:39:32.000Z | 2020-12-16T16:39:32.000Z | defmodule Farmbot.Target.Protocols do
@moduledoc false
use Farmbot.Logger
def start_link(_, _) do
Logger.busy(3, "Loading consolidated protocols.")
for beamfile <- Path.wildcard("/srv/erlang/lib/farmbot-*/consolidated/*.beam") do
beamfile
|> String.replace_suffix(".beam", "")
|> to_charlist()
|> :code.load_abs()
end
:ignore
end
end
| 21.388889 | 85 | 0.646753 |
9e52d068ad1753a6879a0bcb71e26d782488f47f | 346 | exs | Elixir | priv/repo/seeds.exs | appdojolabs/myapp | 41887a60f7ea86db3c70470631b703455c865042 | [
"MIT"
] | 41 | 2017-05-21T14:33:28.000Z | 2022-03-28T22:29:56.000Z | priv/repo/seeds.exs | DMeechan/deploy-elixir-docker-example | b63a46453629ced5134aba039575a3c010a6f2a1 | [
"MIT"
] | 2 | 2017-07-23T07:06:38.000Z | 2018-07-16T23:53:12.000Z | priv/repo/seeds.exs | appdojolabs/myapp | 41887a60f7ea86db3c70470631b703455c865042 | [
"MIT"
] | 16 | 2017-05-21T22:35:10.000Z | 2022-03-28T22:30:04.000Z | # Script for populating the database. You can run it as:
#
# mix run priv/repo/seeds.exs
#
# Inside the script, you can read and write to any of your
# repositories directly:
#
# Myapp.Repo.insert!(%Myapp.SomeModel{})
#
# We recommend using the bang functions (`insert!`, `update!`
# and so on) as they will fail if something goes wrong.
| 28.833333 | 61 | 0.702312 |
9e52f766154562bd22cf291603454d997d9d6874 | 2,966 | ex | Elixir | lib/day_02_bathroom_code.ex | scmx/advent-of-code-2016-elixir | 774145cfee448998d3c35314872bff9afad4d287 | [
"MIT"
] | 1 | 2016-12-07T07:48:01.000Z | 2016-12-07T07:48:01.000Z | lib/day_02_bathroom_code.ex | scmx/advent-of-code-2016-elixir | 774145cfee448998d3c35314872bff9afad4d287 | [
"MIT"
] | null | null | null | lib/day_02_bathroom_code.ex | scmx/advent-of-code-2016-elixir | 774145cfee448998d3c35314872bff9afad4d287 | [
"MIT"
] | null | null | null | defmodule Adventofcode.Day02BathroomCode do
# Position as a grid
# 1 2 3 {0,0} {1,0} {2,0}
# 4 5 6 {0,1} {1,1} {2,1}
# 7 8 9 {0,2} {1,2} {2,2}
# Thus the starting position "5" is...
@start_pos {1, 1}
@grid_width 3
@grid_height 3
@max_x @grid_width - 1
@max_y @grid_height - 1
def bathroom_code(instructions) do
instructions
|> String.strip
|> String.split("\n")
|> Enum.map(&String.to_charlist/1)
|> do_bathroom_code
end
defp do_bathroom_code(instructions, position \\ @start_pos, result \\ "")
defp do_bathroom_code([], _, result), do: result
defp do_bathroom_code([[] | tail], position, result),
do: do_bathroom_code(tail, position, result <> button_from_pos(position))
defp do_bathroom_code([[direction | rest] | tail], position, result),
do: do_bathroom_code([rest | tail], move(direction, position), result)
defp button_from_pos({x, y}), do: "#{@grid_height * y + x + 1}"
defp move(?D, {x, @max_y}), do: {x, @max_y}
defp move(?D, {x, y}), do: {x, y + 1}
defp move(?R, {@max_x, y}), do: {@max_x, y}
defp move(?R, {x, y}), do: {x + 1, y}
defp move(?L, {0, y}), do: {0, y}
defp move(?L, {x, y}), do: {x - 1, y}
defp move(?U, {x, 0}), do: {x, 0}
defp move(?U, {x, y}), do: {x, y - 1}
# Position as a grid
# 1 {0,-2}
# 2 3 4 {-1,-1} {0,-1} {1,-1}
# 5 6 7 8 9 {-2,0} {-1, 0} {0, 0} {1, 0} {2,0}
# A B C {-1, 1} {0, 1} {1, 1}
# D {0, 2}
# Thus the starting position "5" is...
@start_pos {-2, 0}
def insane_code(instructions) do
instructions
|> String.strip
|> String.split("\n")
|> Enum.map(&String.to_charlist/1)
|> do_insane_code
end
defp do_insane_code(instructions, position \\ @start_pos, result \\ "")
defp do_insane_code([], _, result), do: result
defp do_insane_code([[] | tail], position, result),
do: do_insane_code(tail, position, result <> insane_button_pos(position))
defp do_insane_code([[direction | rest] | tail], position, result),
do: do_insane_code([rest | tail], insane_move(direction, position), result)
defp insane_move(?D, {x, y}) when abs(x) + abs(y + 1) > 2, do: {x, y}
defp insane_move(?D, {x, y}), do: {x, y + 1}
defp insane_move(?R, {x, y}) when abs(x + 1) + abs(y) > 2, do: {x, y}
defp insane_move(?R, {x, y}), do: {x + 1, y}
defp insane_move(?L, {x, y}) when abs(x - 1) + abs(y) > 2, do: {x, y}
defp insane_move(?L, {x, y}), do: {x - 1, y}
defp insane_move(?U, {x, y}) when abs(x) + abs(y - 1) > 2, do: {x, y}
defp insane_move(?U, {x, y}), do: {x, y - 1}
defp insane_button_pos({x, y}) do
case {x, y} do
{0, -2} -> "1"
{-1, -1} -> "2"
{0, -1} -> "3"
{1, -1} -> "4"
{-2, 0} -> "5"
{-1, 0} -> "6"
{0, 0} -> "7"
{1, 0} -> "8"
{2, 0} -> "9"
{-1, 1} -> "A"
{0, 1} -> "B"
{1, 1} -> "C"
{0, 2} -> "D"
end
end
end
| 32.593407 | 79 | 0.523601 |
9e5369238ed739398612f0614eff7238abf6a996 | 213 | exs | Elixir | config/test.exs | davidwebster48/mnesiac | 1ca8e52d843fc68cf351212b65d116df6ee6bac6 | [
"MIT"
] | 97 | 2018-07-28T01:54:23.000Z | 2022-03-20T06:28:39.000Z | config/test.exs | astutecat/mnesiac | b547c45dca79861d7c54b9b7a86af8fcb8950966 | [
"MIT"
] | 71 | 2018-07-30T05:46:26.000Z | 2021-12-08T08:25:16.000Z | config/test.exs | astutecat/mnesiac | b547c45dca79861d7c54b9b7a86af8fcb8950966 | [
"MIT"
] | 17 | 2018-10-30T14:59:38.000Z | 2022-02-22T08:42:58.000Z | use Mix.Config
config :mnesia,
dir: to_charlist(Path.join(File.cwd!(), to_string(node())))
config :mnesiac,
stores: [Mnesiac.Support.ExampleStore],
schema_type: :disc_copies,
table_load_timeout: 600_000
| 21.3 | 61 | 0.741784 |
9e538b59f88b882610d0cbb8b43f0d6ef058bf01 | 4,165 | ex | Elixir | lib/lcd_display/driver/hd44780_util.ex | philipgiuliani/lcd_display | 41ec73eb554d97c02d7ff16489fa4bec88618f3e | [
"MIT"
] | 8 | 2021-01-18T21:24:56.000Z | 2022-02-04T08:14:31.000Z | lib/lcd_display/driver/hd44780_util.ex | philipgiuliani/lcd_display | 41ec73eb554d97c02d7ff16489fa4bec88618f3e | [
"MIT"
] | null | null | null | lib/lcd_display/driver/hd44780_util.ex | philipgiuliani/lcd_display | 41ec73eb554d97c02d7ff16489fa4bec88618f3e | [
"MIT"
] | 1 | 2021-01-06T15:39:56.000Z | 2021-01-06T15:39:56.000Z | defmodule LcdDisplay.HD44780.Util do
@moduledoc """
A collection of utility functions that are used for display drivers.
"""
@type row_col_pos :: {non_neg_integer, non_neg_integer}
@typedoc """
Typically 2x16 or 4x20.
"""
@type display_config :: %{
required(:rows) => LcdDisplay.HD44780.Driver.num_rows(),
required(:cols) => LcdDisplay.HD44780.Driver.num_cols(),
any => any
}
@doc """
Determines a Display Data RAM (DDRAM) address based on the display configuration (rows and columns)
and the zero-indexed cursor position (row and column).
## Examples
iex> LcdDisplay.HD44780.Util.determine_ddram_address({0,0}, %{rows: 2, cols: 16})
0
iex> LcdDisplay.HD44780.Util.determine_ddram_address({0,15}, %{rows: 2, cols: 16})
15
iex> LcdDisplay.HD44780.Util.determine_ddram_address({1,0}, %{rows: 2, cols: 16})
64
iex> LcdDisplay.HD44780.Util.determine_ddram_address({1,15}, %{rows: 2, cols: 16})
79
"""
@spec determine_ddram_address(row_col_pos, display_config) :: non_neg_integer
def determine_ddram_address({row_pos, col_pos} = _row_col_pos, %{rows: num_rows, cols: num_cols} = _display_config)
when is_number(num_rows) and is_number(num_cols) and
is_number(row_pos) and is_number(col_pos) and
num_rows >= 1 and num_rows >= 1 and
row_pos >= 0 and col_pos >= 0 do
col_pos = min(col_pos, num_cols - 1)
row_pos = min(row_pos, num_rows - 1)
num_cols
|> ddram_row_offsets()
|> elem(row_pos)
|> Kernel.+(col_pos)
end
@doc """
Determine a list of row offsets based on how many columns the display has.
```
0x00: | ROW 0 | ROW 2 |
0x40: | ROW 1 | ROW 3 |
```
For more info, please refer to [Hitachi HD44780 datasheet](https://cdn-shop.adafruit.com/datasheets/HD44780.pdf) page 10.
## Examples
iex> LcdDisplay.HD44780.Util.ddram_row_offsets(8)
{0, 64, 8, 72}
iex> LcdDisplay.HD44780.Util.ddram_row_offsets(16)
{0, 64, 16, 80}
iex> LcdDisplay.HD44780.Util.ddram_row_offsets(20)
{0, 64, 20, 84}
"""
@spec ddram_row_offsets(LcdDisplay.HD44780.Driver.num_cols()) :: {0, 64, pos_integer, pos_integer}
def ddram_row_offsets(num_cols) when is_number(num_cols) and num_cols >= 1 do
{
0x00,
0x40,
0x00 + num_cols,
0x40 + num_cols
}
end
@doc """
Adjusts the backlight-related values in the display driver state.
## Examples
# Default to the white LED when no color is specified.
iex> LcdDisplay.HD44780.Util.adjust_backlight_config(%{backlight: true, red: false, green: false, blue: false})
%{backlight: true, blue: true, green: true, red: true}
# Turn off all colors when the backlight is turned off.
iex> LcdDisplay.HD44780.Util.adjust_backlight_config(%{backlight: false, red: true, green: true, blue: true})
%{backlight: false, blue: false, green: false, red: false}
# Else do nothing
iex> LcdDisplay.HD44780.Util.adjust_backlight_config(%{backlight: true, red: true, green: false, blue: false})
%{backlight: true, blue: false, green: false, red: true}
"""
@spec adjust_backlight_config(map) :: map
def adjust_backlight_config(%{backlight: backlight, red: red, green: green, blue: blue} = display) do
display
|> Map.merge(
# Step 1: Default to the white LED when no color is specified.
if(!red && !green && !blue, do: %{red: true, green: true, blue: true}, else: %{})
)
|> Map.merge(
# Step 2: Turn off all colors when the backlight is turned off.
if(backlight, do: %{}, else: %{red: false, green: false, blue: false})
)
end
@doc """
Shuffles the RGB boolean values in the display driver state.
"""
@spec shuffle_color(map) :: map
def shuffle_color(display) do
display
|> Map.merge(
~w(red green blue)a
|> Enum.zip(
# Exclude white and none
[[true, false, false], [true, true, false]]
|> Enum.shuffle()
|> Enum.at(0)
|> Enum.shuffle()
)
|> Enum.into(%{})
)
end
end
| 32.038462 | 123 | 0.636255 |
9e53c5611cfedb3526312fed46841241cdfc2fd1 | 382 | ex | Elixir | lib/ex_diet_web/graphql/queries/accounts.ex | mugimaru/ex_diet | 9602c07af27255decbb32fd7ae0c12b3ffe662a3 | [
"Apache-2.0"
] | 2 | 2020-06-25T11:51:46.000Z | 2020-09-30T14:00:40.000Z | lib/ex_diet_web/graphql/queries/accounts.ex | mugimaru/ex_diet | 9602c07af27255decbb32fd7ae0c12b3ffe662a3 | [
"Apache-2.0"
] | null | null | null | lib/ex_diet_web/graphql/queries/accounts.ex | mugimaru/ex_diet | 9602c07af27255decbb32fd7ae0c12b3ffe662a3 | [
"Apache-2.0"
] | 1 | 2020-01-29T08:43:07.000Z | 2020-01-29T08:43:07.000Z | defmodule ExDietWeb.GraphQL.Queries.Accounts do
@moduledoc false
use Absinthe.Schema.Notation
use Absinthe.Relay.Schema.Notation, :modern
alias ExDietWeb.GraphQL.Middleware.RequireAuth
object :accounts_queries do
field :me, :user do
middleware(RequireAuth)
resolve(fn _, _, %{context: %{user: user}} ->
{:ok, user}
end)
end
end
end
| 20.105263 | 51 | 0.683246 |
9e53cdf6a6cef836454c643178d6825dffca0710 | 593 | exs | Elixir | mix.exs | wombatsecurity/airbrakex | aca900123b658797110033d3523c094d262684c0 | [
"MIT"
] | null | null | null | mix.exs | wombatsecurity/airbrakex | aca900123b658797110033d3523c094d262684c0 | [
"MIT"
] | 1 | 2017-07-25T20:14:30.000Z | 2017-07-25T20:14:30.000Z | mix.exs | wombatsecurity/airbrakex | aca900123b658797110033d3523c094d262684c0 | [
"MIT"
] | null | null | null | defmodule Airbrakex.Mixfile do
use Mix.Project
def project do
[
app: :airbrakex,
version: "0.0.6",
elixir: "~> 1.0",
description: "Airbrake Elixir Notifier",
package: package,
deps: deps
]
end
def package do
[
maintainers: ["Michał Kalbarczyk"],
licenses: ["MIT"],
links: %{github: "https://github.com/fazibear/airbrakex"}
]
end
def application do
[
applications: [:idna, :hackney, :httpoison]
]
end
defp deps do
[
{:httpoison, "~> 0.7"},
{:poison, "~> 1.5"}
]
end
end
| 16.472222 | 63 | 0.541315 |
9e540941d70320b4e662951b027f4b42223675ff | 1,107 | ex | Elixir | lib/find_a_fluff_web/controllers/region_controller.ex | SylvainBigonneau/FindAFluff | c88c18cc085254e15445312f41b06b826449b251 | [
"MIT"
] | 1 | 2018-06-03T15:37:23.000Z | 2018-06-03T15:37:23.000Z | lib/find_a_fluff_web/controllers/region_controller.ex | SylvainBigonneau/FindAFluff | c88c18cc085254e15445312f41b06b826449b251 | [
"MIT"
] | null | null | null | lib/find_a_fluff_web/controllers/region_controller.ex | SylvainBigonneau/FindAFluff | c88c18cc085254e15445312f41b06b826449b251 | [
"MIT"
] | null | null | null | defmodule FindAFluffWeb.RegionController do
use FindAFluffWeb, :controller
alias FindAFluff.Region
def index(conn, params) do
species = Dict.get(params, "species", "%")
race = Dict.get(params, "race", "%")
photo = Dict.get(params, "photo", nil)
age = Dict.get(params, "age", nil)
query = from r in Region,
join: sh in assoc(r, :shelters),
join: p in assoc(sh, :pets),
join: sp in assoc(p, :species),
join: ra in assoc(p, :race),
where: like(fragment("to_char(?, 'FM999999999999')", sp.id), ^species)
and like(fragment("to_char(?, 'FM999999999999')", ra.id), ^race)
if age do
{age, _} = Integer.parse(age)
query = query
|> where([r, sh, p], p.birthdate > ago(^age, "year"))
end
if photo == "true" do
query = query
|> where([r, sh, p], p.img_url != "")
end
query = query
|> group_by([r], r.id)
|> select([r, sh, p], %{id: r.id, name: r.name, pet_count: count(p.id)})
regions = Repo.all(query)
render(conn, "index.json", regions: regions)
end
end
| 27.675 | 77 | 0.564589 |
9e541cb4644d9bc3b39040849d120be75fc7c648 | 1,366 | exs | Elixir | config/config.exs | francieleportugal/banking-api | 846c81dde3816042f32c2182985a2060485c5e7c | [
"Apache-2.0"
] | null | null | null | config/config.exs | francieleportugal/banking-api | 846c81dde3816042f32c2182985a2060485c5e7c | [
"Apache-2.0"
] | null | null | null | config/config.exs | francieleportugal/banking-api | 846c81dde3816042f32c2182985a2060485c5e7c | [
"Apache-2.0"
] | null | null | null | # This file is responsible for configuring your umbrella
# and **all applications** and their dependencies with the
# help of Mix.Config.
#
# Note that all applications in your umbrella share the
# same configuration and dependencies, which is why they
# all use the same configuration file. If you want different
# configurations or dependencies per app, it is best to
# move said applications out of the umbrella.
import Config
# Configure Mix tasks and generators
config :banking_api,
ecto_repos: [BankingApi.Repo]
config :banking_api_web,
ecto_repos: [BankingApi.Repo],
generators: [context_app: :banking_api, binary_id: true]
# Configures the endpoint
config :banking_api_web, BankingApiWeb.Endpoint,
url: [host: "localhost"],
secret_key_base: "fEIwylZ4T2oLlyX1OMq/aI40/a/vJzeQRBrQX1XAZWWTEw5uZU7EF1LHiA9+bbnU",
render_errors: [view: BankingApiWeb.ErrorView, accepts: ~w(json), layout: false],
pubsub_server: BankingApi.PubSub,
live_view: [signing_salt: "XrzYzcmc"]
# Configures Elixir's Logger
config :logger, :console,
format: "$time $metadata[$level] $message\n",
metadata: [:request_id]
# Use Jason for JSON parsing in Phoenix
config :phoenix, :json_library, Jason
# Import environment specific config. This must remain at the bottom
# of this file so it overrides the configuration defined above.
import_config "#{Mix.env()}.exs"
| 35.025641 | 86 | 0.771596 |
9e5428f88222c8df9e367cf1ab3269ceb8e73f28 | 2,924 | ex | Elixir | lib/yatapp/translations_downloader.ex | JeneaVranceanu/yatapp-elixir | 7297936ab2d53a28d7b0cd6b94ad2ab6206928ca | [
"MIT"
] | 2 | 2019-08-23T09:42:30.000Z | 2019-10-29T14:38:36.000Z | lib/yatapp/translations_downloader.ex | JeneaVranceanu/yatapp-elixir | 7297936ab2d53a28d7b0cd6b94ad2ab6206928ca | [
"MIT"
] | 6 | 2019-03-27T13:30:05.000Z | 2022-01-14T21:12:29.000Z | lib/yatapp/translations_downloader.ex | JeneaVranceanu/yatapp-elixir | 7297936ab2d53a28d7b0cd6b94ad2ab6206928ca | [
"MIT"
] | 2 | 2019-11-14T13:51:42.000Z | 2022-01-14T09:18:03.000Z | defmodule Yatapp.TranslationsDownloader do
@moduledoc """
TranslationsDownloader module.
"""
require Logger
alias Yatapp.Config
@api_end_point_url "https://api.yatapp.net/api/v1/project/:project_id/:lang/:format"
@doc """
Downloads all translations and saves all locales.
Returns `:ok`.
## Examples
iex> Yatapp.TranslationsDownloader.download()
:ok
"""
@spec download() :: :ok
def download() do
Enum.each(Config.get(:locales), fn lang ->
Logger.info("Getting translation for #{lang}")
save_file(lang)
Logger.info("#{lang}.#{Config.get(:translations_format)} saved")
end)
end
@spec download_and_store() :: :ok
def download_and_store() do
Enum.each(Config.get(:locales), fn locale ->
%HTTPoison.Response{body: body} = get_response(locale, "json", false, false)
parser = Config.get(:json_parser)
body
|> parser.decode!()
|> create_translations(locale)
end)
end
defp create_translations(translations, locale) do
Enum.each(translations, fn {key, value} -> create_translation("#{locale}.#{key}", value) end)
end
defp create_translation(key, value) when is_map(value) do
Enum.each(value, fn {k, v} -> create_translation("#{key}.#{k}", v) end)
end
defp create_translation(key, value) do
store = Config.store()
store.put(key, value)
end
defp download_url(lang, format, root, strip_empty) do
@api_end_point_url
|> String.replace(":project_id", Config.get(:project_id))
|> String.replace(":lang", lang)
|> String.replace(":format", format)
|> String.replace_suffix(
"",
"?apiToken=#{Config.get(:api_key)}&root=#{root}&strip_empty=#{strip_empty}"
)
end
defp get_response(lang, format, root, strip_empty) do
HTTPoison.start()
download_url(lang, format, root, strip_empty)
|> HTTPoison.get!([],
timeout: Config.get(:http).timeout,
recv_timeout: Config.get(:http).recv_timeout
)
end
defp save_file(lang) do
%HTTPoison.Response{body: body} =
get_response(
lang,
Config.get(:translations_format),
Config.get(:root),
Config.get(:strip_empty)
)
unless File.exists?(Config.get(:save_to_path)) do
File.mkdir(Config.get(:save_to_path))
end
File.write!("#{Config.get(:save_to_path)}#{lang}.#{Config.get(:translations_format)}", body)
end
@spec load_from_files() :: :ok
def load_from_files do
otp_app = Config.get(:otp_app)
parser = Config.get(:translation_file_parser)
Enum.each(Config.get(:locales), fn locale ->
path_to_file = "#{Config.get(:save_to_path)}#{locale}.#{Config.get(:translations_format)}"
path = Application.app_dir(otp_app, path_to_file)
if File.exists?(path) do
path
|> File.read!()
|> parser.decode!()
|> create_translations(locale)
end
end)
end
end
| 26.581818 | 97 | 0.647743 |
9e544f184c7a90f25fb3631a0e83a842d87fe4cf | 7,063 | ex | Elixir | lib/phoenix/endpoint/instrument.ex | arkgil/phoenix | b5d82814154b5fb87c0870e25c1c2243c9384d9e | [
"MIT"
] | null | null | null | lib/phoenix/endpoint/instrument.ex | arkgil/phoenix | b5d82814154b5fb87c0870e25c1c2243c9384d9e | [
"MIT"
] | null | null | null | lib/phoenix/endpoint/instrument.ex | arkgil/phoenix | b5d82814154b5fb87c0870e25c1c2243c9384d9e | [
"MIT"
] | null | null | null | defmodule Phoenix.Endpoint.Instrument do
@moduledoc false
# This is the arity that event callbacks in the instrumenter modules must
# have.
@event_callback_arity 3
@doc false
def definstrument(otp_app, endpoint) do
app_instrumenters = app_instrumenters(otp_app, endpoint)
quote bind_quoted: [app_instrumenters: app_instrumenters] do
require Logger
@doc """
Instruments the given function.
`event` is the event identifier (usually an atom) that specifies which
instrumenting function to call in the instrumenter modules. `runtime` is
metadata to be associated with the event at runtime (e.g., the query being
issued if the event to instrument is a DB query).
## Examples
instrument :render_view, %{view: "index.html"}, fn ->
render(conn, "index.html")
end
"""
defmacro instrument(event, runtime \\ Macro.escape(%{}), fun) do
compile = Macro.escape(Phoenix.Endpoint.Instrument.strip_caller(__CALLER__))
quote do
unquote(__MODULE__).instrument(
unquote(event),
unquote(compile),
unquote(runtime),
unquote(fun)
)
end
end
# For each event in any of the instrumenters, we must generate a
# clause of the `instrument/4` function. It'll look like this:
#
# def instrument(:my_event, compile, runtime, fun) do
# res0 = Inst0.my_event(:start, compile, runtime)
# ...
#
# start = :erlang.monotonic_time
# try do
# fun.()
# after
# diff = ...
# Inst0.my_event(:stop, diff, res0)
# ...
# end
# end
#
@doc false
def instrument(event, compile, runtime, fun)
for {event, instrumenters} <- app_instrumenters do
def instrument(unquote(event), var!(compile), var!(runtime), fun)
when is_map(var!(compile)) and is_map(var!(runtime)) and is_function(fun, 0) do
unquote(Phoenix.Endpoint.Instrument.compile_start_callbacks(event, instrumenters))
start = :erlang.monotonic_time()
try do
fun.()
after
var!(diff) = :erlang.monotonic_time() - start
unquote(Phoenix.Endpoint.Instrument.compile_stop_callbacks(event, instrumenters))
end
end
end
# Catch-all clause
def instrument(event, compile, runtime, fun)
when is_atom(event) and is_map(compile) and is_map(runtime) and is_function(fun, 0) do
fun.()
end
end
end
# Reads a list of the instrumenters from the config of `otp_app` and finds all
# events in those instrumenters. The return value is a list of `{event,
# instrumenters}` tuples, one for each event defined by any instrumenters
# (with no duplicated events); `instrumenters` is the list of instrumenters
# interested in `event`.
defp app_instrumenters(otp_app, endpoint) do
config = Application.get_env(otp_app, endpoint, [])
instrumenters = config[:instrumenters] || []
unless is_list(instrumenters) and Enum.all?(instrumenters, &is_atom/1) do
raise ":instrumenters must be a list of instrumenter modules"
end
events_to_instrumenters([Phoenix.Logger | instrumenters])
end
# Strips a `Macro.Env` struct, leaving only interesting compile-time metadata.
@doc false
@spec strip_caller(Macro.Env.t()) :: %{}
def strip_caller(%Macro.Env{module: mod, function: fun, file: file, line: line}) do
caller = %{module: mod, function: form_fa(fun), file: file, line: line}
if app = Application.get_env(:logger, :compile_time_application) do
Map.put(caller, :application, app)
else
caller
end
end
defp form_fa({name, arity}), do: Atom.to_string(name) <> "/" <> Integer.to_string(arity)
defp form_fa(nil), do: nil
# called by Phoenix.Endpoint.instrument/4, see docs there
@doc false
@spec extract_endpoint(Plug.Conn.t() | Phoenix.Socket.t() | module) :: module | nil
def extract_endpoint(endpoint_or_conn_or_socket) do
case endpoint_or_conn_or_socket do
%Plug.Conn{private: %{phoenix_endpoint: endpoint}} -> endpoint
%Phoenix.Socket{endpoint: endpoint} -> endpoint
%{__struct__: struct} when struct in [Plug.Conn, Phoenix.Socket] -> nil
endpoint -> endpoint
end
end
# Returns the AST for all the calls to the "start event" callbacks in the given
# list of `instrumenters`.
# Each function call looks like this:
#
# res0 = Instr0.my_event(:start, compile, runtime)
#
@doc false
@spec compile_start_callbacks(term, [module]) :: Macro.t()
def compile_start_callbacks(event, instrumenters) do
Enum.map(Enum.with_index(instrumenters), fn {inst, index} ->
error_prefix = "Instrumenter #{inspect(inst)}.#{event}/3 failed.\n"
quote do
unquote(build_result_variable(index)) =
try do
unquote(inst).unquote(event)(:start, var!(compile), var!(runtime))
catch
kind, error ->
Logger.error([
unquote(error_prefix),
Exception.format(kind, error, System.stacktrace())
])
end
end
end)
end
# Returns the AST for all the calls to the "stop event" callbacks in the given
# list of `instrumenters`.
# Each function call looks like this:
#
# Instr0.my_event(:stop, diff, res0)
#
@doc false
@spec compile_stop_callbacks(term, [module]) :: Macro.t()
def compile_stop_callbacks(event, instrumenters) do
Enum.map(Enum.with_index(instrumenters), fn {inst, index} ->
error_prefix = "Instrumenter #{inspect(inst)}.#{event}/3 failed.\n"
quote do
try do
unquote(inst).unquote(event)(:stop, var!(diff), unquote(build_result_variable(index)))
catch
kind, error ->
Logger.error(unquote(error_prefix) <> Exception.format(kind, error))
end
end
end)
end
# Takes a list of instrumenter modules and returns a list of `{event,
# instrumenters}` tuples where each tuple represents an event and all the
# modules interested in that event.
defp events_to_instrumenters(instrumenters) do
# [Ins1, Ins2, ...]
instrumenters
# [{Ins1, e1}, {Ins2, e1}, ...]
|> instrumenters_and_events()
# %{e1 => [{Ins1, e1}, ...], ...}
|> Enum.group_by(fn {_inst, e} -> e end)
# [{e1, [Ins1, Ins2]}, ...]
|> Enum.map(fn {e, insts} -> {e, strip_events(insts)} end)
end
defp instrumenters_and_events(instrumenters) do
# We're only interested in functions (events) with the given arity.
for inst <- instrumenters,
{event, @event_callback_arity} <- inst.__info__(:functions),
do: {inst, event}
end
defp strip_events(instrumenters) do
for {inst, _evt} <- instrumenters, do: inst
end
defp build_result_variable(index) when is_integer(index) do
"res#{index}" |> String.to_atom() |> Macro.var(nil)
end
end
| 33.794258 | 96 | 0.636274 |
9e5476ec7693496e3633d32fd9b08809bec1543d | 4,239 | ex | Elixir | test/support.ex | noizu/que | 065b069c3dafe0f90bf858ae77080af7b310ae62 | [
"MIT"
] | null | null | null | test/support.ex | noizu/que | 065b069c3dafe0f90bf858ae77080af7b310ae62 | [
"MIT"
] | null | null | null | test/support.ex | noizu/que | 065b069c3dafe0f90bf858ae77080af7b310ae62 | [
"MIT"
] | null | null | null | ## Namespace all test related modules under Que.Test.Meta
## ======================================================
defmodule Que.Test.Meta do
require Logger
# Test workers for handling Jobs
# ==============================
defmodule TestWorker do
use Que.Worker
def perform(args), do: Logger.debug("#{__MODULE__} - perform: #{inspect(args)}")
end
defmodule TestShardWorker do
use Que.ShardWorker, shards: 10
def perform(args) do
Process.sleep(20)
Logger.debug("#{__MODULE__} - perform: #{inspect(args)}")
end
end
defmodule ConcurrentWorker do
use Que.Worker, concurrency: 4
def perform(args), do: Logger.debug("#{__MODULE__} - perform: #{inspect(args)}")
end
defmodule SuccessWorker do
use Que.Worker
def perform(args), do: Logger.debug("#{__MODULE__} - perform: #{inspect(args)}")
def on_success(args), do: Logger.debug("#{__MODULE__} - success: #{inspect(args)}")
end
defmodule FailureWorker do
use Que.Worker
def perform(args) do
Logger.debug("#{__MODULE__} - perform: #{inspect(args)}")
raise "some error"
end
def on_failure(args, _err), do: Logger.debug("#{__MODULE__} - failure: #{inspect(args)}")
end
defmodule SleepWorker do
use Que.Worker
def perform(args) do
Process.sleep(1000)
Logger.debug("#{__MODULE__} - perform: #{inspect(args)}")
end
def on_success(args), do: Logger.debug("#{__MODULE__} - success: #{inspect(args)}")
def on_failure(args, _err), do: Logger.debug("#{__MODULE__} - failure: #{inspect(args)}")
end
defmodule SetupAndTeardownWorker do
use Que.Worker
def perform(args), do: Logger.debug("#{__MODULE__} - perform: #{inspect(args)}")
def on_setup(job), do: Logger.debug("#{__MODULE__} - on_setup: #{inspect(job)}")
def on_teardown(job), do: Logger.debug("#{__MODULE__} - on_teardown: #{inspect(job)}")
end
# Helper Module for Tests
# =======================
defmodule Helpers do
# Sleeps for 2ms
def wait(ms \\ 3) do
:timer.sleep(ms)
end
def wait_for_children do
Task.Supervisor.children(Que.TaskSupervisor)
|> Enum.map(&Process.monitor/1)
|> Enum.each(fn ref ->
receive do
{:DOWN, ^ref, _, _, _} -> nil
end
end)
end
# Captures IO output
def capture_io(fun) do
ExUnit.CaptureIO.capture_io(fun)
end
# Captures logged text to IO
def capture_log(fun) do
ExUnit.CaptureLog.capture_log(fun)
end
# Captures everything
def capture_all(fun) do
capture_io(fn ->
IO.puts capture_log(fn -> fun |> capture_io |> IO.puts end)
end)
end
end
# App-specific helpers
# ====================
defmodule Helpers.App do
# Restarts app and resets DB
def reset do
stop()
Helpers.Mnesia.reset
start()
:ok
end
def start do
Application.start(:que)
end
def stop do
Helpers.capture_log(fn ->
Application.stop(:que)
end)
end
end
# Mnesia-specific helpers
# =======================
defmodule Helpers.Mnesia do
@adapter (Application.get_env(:que, :persistence_strategy) || Que.Persistence.Mnesia)
# Cleans up Mnesia DB
def reset do
@adapter.reset()
end
# Deletes the Mnesia DB from disk and creates a fresh one in memory
def reset! do
Helpers.capture_log(fn ->
@adapter.reset!()
end)
end
# Creates sample Mnesia jobs
def create_sample_jobs do
[
%Que.Job{id: 1, node: node(), priority: :pri0, status: :completed, worker: TestWorker },
%Que.Job{id: 2, node: node(), priority: :pri0, status: :completed, worker: SuccessWorker },
%Que.Job{id: 3, node: node(), priority: :pri0, status: :failed, worker: FailureWorker },
%Que.Job{id: 4, node: node(), priority: :pri0, status: :started, worker: TestWorker },
%Que.Job{id: 5, node: node(), priority: :pri0, status: :queued, worker: SuccessWorker },
%Que.Job{id: 6, node: node(), priority: :pri0, status: :queued, worker: FailureWorker }
] |> Enum.map(&@adapter.insert/1)
end
end
end
| 23.949153 | 99 | 0.597547 |
9e54988b6b97df95fc599107939fc9ab12f8395b | 41,197 | ex | Elixir | lib/phoenix/endpoint.ex | shritesh/phoenix | 4bf53ecaae5a9057ea57c248964490dfdee312af | [
"MIT"
] | null | null | null | lib/phoenix/endpoint.ex | shritesh/phoenix | 4bf53ecaae5a9057ea57c248964490dfdee312af | [
"MIT"
] | null | null | null | lib/phoenix/endpoint.ex | shritesh/phoenix | 4bf53ecaae5a9057ea57c248964490dfdee312af | [
"MIT"
] | null | null | null | defmodule Phoenix.Endpoint do
@moduledoc ~S"""
Defines a Phoenix endpoint.
The endpoint is the boundary where all requests to your
web application start. It is also the interface your
application provides to the underlying web servers.
Overall, an endpoint has three responsibilities:
* to provide a wrapper for starting and stopping the
endpoint as part of a supervision tree
* to define an initial plug pipeline for requests
to pass through
* to host web specific configuration for your
application
## Endpoints
An endpoint is simply a module defined with the help
of `Phoenix.Endpoint`. If you have used the `mix phx.new`
generator, an endpoint was automatically generated as
part of your application:
defmodule YourApp.Endpoint do
use Phoenix.Endpoint, otp_app: :your_app
# plug ...
# plug ...
plug YourApp.Router
end
Endpoints must be explicitly started as part of your application
supervision tree. Endpoints are added by default
to the supervision tree in generated applications. Endpoints can be
added to the supervision tree as follows:
children = [
YourApp.Endpoint
]
## Endpoint configuration
All endpoints are configured in your application environment.
For example:
config :your_app, YourApp.Endpoint,
secret_key_base: "kjoy3o1zeidquwy1398juxzldjlksahdk3"
Endpoint configuration is split into two categories. Compile-time
configuration means the configuration is read during compilation
and changing it at runtime has no effect. The compile-time
configuration is mostly related to error handling.
Runtime configuration, instead, is accessed during or
after your application is started and can be read through the
`c:config/2` function:
YourApp.Endpoint.config(:port)
YourApp.Endpoint.config(:some_config, :default_value)
### Dynamic configuration
For dynamically configuring the endpoint, such as loading data
from environment variables or configuration files, Phoenix invokes
the `init/2` callback on the endpoint, passing the atom `:supervisor`
as the first argument and the endpoint configuration as second.
All of Phoenix configuration, except the Compile-time configuration
below can be set dynamically from the `c:init/2` callback.
### Compile-time configuration
* `:code_reloader` - when `true`, enables code reloading functionality.
For code the list of code reloader configuration options see
`Phoenix.CodeReloader.reload!/1`
* `:debug_errors` - when `true`, uses `Plug.Debugger` functionality for
debugging failures in the application. Recommended to be set to `true`
only in development as it allows listing of the application source
code during debugging. Defaults to `false`
* `:render_errors` - responsible for rendering templates whenever there
is a failure in the application. For example, if the application crashes
with a 500 error during a HTML request, `render("500.html", assigns)`
will be called in the view given to `:render_errors`. Defaults to:
[view: MyApp.ErrorView, accepts: ~w(html), layout: false]
The default format is used when none is set in the connection
### Runtime configuration
* `:adapter` - which webserver adapter to use for serving web requests.
See the "Adapter configuration" section below
* `:cache_static_manifest` - a path to a json manifest file that contains
static files and their digested version. This is typically set to
"priv/static/cache_manifest.json" which is the file automatically generated
by `mix phx.digest`
* `:check_origin` - configure transports to check `origin` header or not. May
be `false`, `true`, a list of hosts that are allowed, or a function provided as
MFA tuple. Hosts also support wildcards.
For example, using a list of hosts:
check_origin: ["//phoenixframework.org", "//*.example.com"]
or a custom MFA function:
check_origin: {MyAppWeb.Auth, :my_check_origin?, []}
The MFA is invoked with the request `%URI{}` as the first argument,
followed by arguments in the MFA list
Defaults to `true`.
* `:force_ssl` - ensures no data is ever sent via HTTP, always redirecting
to HTTPS. It expects a list of options which are forwarded to `Plug.SSL`.
By default it sets the "strict-transport-security" header in HTTPS requests,
forcing browsers to always use HTTPS. If an unsafe request (HTTP) is sent,
it redirects to the HTTPS version using the `:host` specified in the `:url`
configuration. To dynamically redirect to the `host` of the current request,
set `:host` in the `:force_ssl` configuration to `nil`
* `:secret_key_base` - a secret key used as a base to generate secrets
for encrypting and signing data. For example, cookies and tokens
are signed by default, but they may also be encrypted if desired.
Defaults to `nil` as it must be set per application
* `:server` - when `true`, starts the web server when the endpoint
supervision tree starts. Defaults to `false`. The `mix phx.server`
task automatically sets this to `true`
* `:url` - configuration for generating URLs throughout the app.
Accepts the `:host`, `:scheme`, `:path` and `:port` options. All
keys except `:path` can be changed at runtime. Defaults to:
[host: "localhost", path: "/"]
The `:port` option requires either an integer, string, or
`{:system, "ENV_VAR"}`. When given a tuple like `{:system, "PORT"}`,
the port will be referenced from `System.get_env("PORT")` at runtime
as a workaround for releases where environment specific information
is loaded only at compile-time.
The `:host` option requires a string or `{:system, "ENV_VAR"}`. Similar
to `:port`, when given a tuple like `{:system, "HOST"}`, the host
will be referenced from `System.get_env("HOST")` at runtime.
The `:scheme` option accepts `"http"` and `"https"` values. Default value
is infered from top level `:http` or `:https` option. It is useful
when hosting Phoenix behind a load balancer or reverse proxy and
terminating SSL there.
The `:path` option can be used to override root path. Useful when hosting
Phoenix behind a reverse proxy with URL rewrite rules
* `:static_url` - configuration for generating URLs for static files.
It will fallback to `url` if no option is provided. Accepts the same
options as `url`
* `:watchers` - a set of watchers to run alongside your server. It
expects a list of tuples containing the executable and its arguments.
Watchers are guaranteed to run in the application directory, but only
when the server is enabled. For example, the watcher below will run
the "watch" mode of the webpack build tool when the server starts.
You can configure it to whatever build tool or command you want:
[node: ["node_modules/webpack/bin/webpack.js", "--mode", "development",
"--watch-stdin"]]
The `:cd` option can be used on a watcher to override the folder from
which the watcher will run. By default this will be the project's root:
`File.cwd!()`
[node: ["node_modules/webpack/bin/webpack.js", "--mode", "development",
"--watch-stdin"], cd: "my_frontend"]
* `:live_reload` - configuration for the live reload option.
Configuration requires a `:patterns` option which should be a list of
file patterns to watch. When these files change, it will trigger a reload.
If you are using a tool like [pow](http://pow.cx) in development,
you may need to set the `:url` option appropriately.
live_reload: [
url: "ws://localhost:4000",
patterns: [
~r{priv/static/.*(js|css|png|jpeg|jpg|gif)$},
~r{web/views/.*(ex)$},
~r{web/templates/.*(eex)$}
]
]
* `:pubsub_server` - the name of the pubsub server to use in channels
and via the Endpoint broadcast funtions. The PubSub server is typically
started in your supervision tree.
### Adapter configuration
Phoenix allows you to choose which webserver adapter to use. The default
is `Phoenix.Endpoint.Cowboy2Adapter` which can be configured via the
following options.
* `:http` - the configuration for the HTTP server. It accepts all options
as defined by [`Plug.Cowboy`](https://hexdocs.pm/plug_cowboy/). Defaults
to `false`
* `:https` - the configuration for the HTTPS server. It accepts all options
as defined by [`Plug.Cowboy`](https://hexdocs.pm/plug_cowboy/). Defaults
to `false`
* `:drainer` - a drainer process that triggers when your application is
shutting to wait for any on-going request to finish. It accepts all
options as defined by [`Plug.Cowboy`](https://hexdocs.pm/plug_cowboy/Plug.Cowboy.Drainer.httml).
Defaults to `[]` and can be disabled by setting it to false.
## Endpoint API
In the previous section, we have used the `c:config/2` function that is
automatically generated in your endpoint. Here's a list of all the functions
that are automatically defined in your endpoint:
* for handling paths and URLs: `c:struct_url/0`, `c:url/0`, `c:path/1`,
`c:static_url/0`,`c:static_path/1`, and `c:static_integrity/1`
* for broadcasting to channels: `c:broadcast/3`, `c:broadcast!/3`,
`c:broadcast_from/4`, `c:broadcast_from!/4`, `c:local_broadcast/3`,
and `c:local_broadcast_from/4`
* for configuration: `c:start_link/0`, `c:config/2`, and `c:config_change/2`
* as required by the `Plug` behaviour: `c:Plug.init/1` and `c:Plug.call/2`
## Instrumentation
Phoenix uses the `:telemetry` library for instrumentation. The following events
are published by Phoenix with the following measurements and metadata:
* `[:phoenix, :endpoint, :start]` - dispatched by `Plug.Telemetry` in your
endpoint at the beginning of every request.
* Measurement: `%{time: System.monotonic_time}`
* Metadata: `%{conn: Plug.Conn.t}`
* `[:phoenix, :endpoint, :stop]` - dispatched by `Plug.Telemetry` in your
endpoint whenever the response is sent
* Measurement: `%{duration: native_time}`
* Metadata: `%{conn: Plug.Conn.t}`
* `[:phoenix, :router_dispatch, :start]` - dispatched by `Phoenix.Router`
before dispatching to a matched route
* Measurement: `%{time: System.monotonic_time}`
* Metadata: `%{conn: Plug.Conn.t, route: binary, plug: module, plug_opts: term, path_params: map, pipe_through: [atom]}`
* `[:phoenix, :router_dispatch, :stop]` - dispatched by `Phoenix.Router`
after successfully dispatching to a matched route
* Measurement: `%{duration: native_time}`
* Metadata: `%{conn: Plug.Conn.t, route: binary, plug: module, plug_opts: term, path_params: map, pipe_through: [atom]}`
* `[:phoenix, :error_rendered]` - dispatched at the end of an error view being rendered
* Measurement: `%{duration: native_time}`
* Metadata: `%{status: Plug.Conn.status, kind: Exception.kind, reason: term, stacktrace: Exception.stacktrace}`
* `[:phoenix, :socket_connected]` - dispatched at the end of a socket connection
* Measurement: `%{duration: native_time}`
* Metadata: `%{endpoint: atom, transport: atom, params: term, connect_info: map, vsn: binary, user_socket: atom, result: :ok | :error, serializer: atom}`
* `[:phoenix, :channel_joined]` - dispatched at the end of a channel join
* Measurement: `%{duration: native_time}`
* Metadata: `%{params: term, socket: Phoenix.Socket.t}`
* `[:phoenix, :channel_handled_in]` - dispatched at the end of a channel handle in
* Measurement: `%{duration: native_time}`
* Metadata: `%{event: binary, params: term, socket: Phoenix.Socket.t}`
"""
@type topic :: String.t
@type event :: String.t
@type msg :: map
require Logger
# Configuration
@doc """
Starts the endpoint supervision tree.
Starts endpoint's configuration cache and possibly the servers for
handling requests.
"""
@callback start_link() :: Supervisor.on_start
@doc """
Access the endpoint configuration given by key.
"""
@callback config(key :: atom, default :: term) :: term
@doc """
Reload the endpoint configuration on application upgrades.
"""
@callback config_change(changed :: term, removed :: term) :: term
@doc """
Initialize the endpoint configuration.
Invoked when the endpoint supervisor starts, allows dynamically
configuring the endpoint from system environment or other runtime sources.
"""
@callback init(:supervisor, config :: Keyword.t) :: {:ok, Keyword.t}
# Paths and URLs
@doc """
Generates the endpoint base URL, but as a `URI` struct.
"""
@callback struct_url() :: URI.t
@doc """
Generates the endpoint base URL without any path information.
"""
@callback url() :: String.t
@doc """
Generates the path information when routing to this endpoint.
"""
@callback path(path :: String.t) :: String.t
@doc """
Generates the static URL without any path information.
"""
@callback static_url() :: String.t
@doc """
Generates a route to a static file in `priv/static`.
"""
@callback static_path(path :: String.t) :: String.t
@doc """
Generates an integrity hash to a static file in `priv/static`.
"""
@callback static_integrity(path :: String.t) :: String.t | nil
@doc """
Generates a two item tuple containing the `static_path` and `static_integrity`.
"""
@callback static_lookup(path :: String.t) :: {String.t, String.t} | {String.t, nil}
# Channels
@doc """
Broadcasts a `msg` as `event` in the given `topic` to all nodes.
"""
@callback broadcast(topic, event, msg) :: :ok | {:error, term}
@doc """
Broadcasts a `msg` as `event` in the given `topic` to all nodes.
Raises in case of failures.
"""
@callback broadcast!(topic, event, msg) :: :ok | no_return
@doc """
Broadcasts a `msg` from the given `from` as `event` in the given `topic` to all nodes.
"""
@callback broadcast_from(from :: pid, topic, event, msg) :: :ok | {:error, term}
@doc """
Broadcasts a `msg` from the given `from` as `event` in the given `topic` to all nodes.
Raises in case of failures.
"""
@callback broadcast_from!(from :: pid, topic, event, msg) :: :ok | no_return
@doc """
Broadcasts a `msg` as `event` in the given `topic` within the current node.
"""
@callback local_broadcast(topic, event, msg) :: :ok
@doc """
Broadcasts a `msg` from the given `from` as `event` in the given `topic` within the current node.
"""
@callback local_broadcast_from(from :: pid, topic, event, msg) :: :ok
@doc false
defmacro __using__(opts) do
quote do
@behaviour Phoenix.Endpoint
unquote(config(opts))
unquote(pubsub())
unquote(plug())
unquote(server())
end
end
defp config(opts) do
quote do
@otp_app unquote(opts)[:otp_app] || raise "endpoint expects :otp_app to be given"
var!(config) = Phoenix.Endpoint.Supervisor.config(@otp_app, __MODULE__)
var!(code_reloading?) = var!(config)[:code_reloader]
# Avoid unused variable warnings
_ = var!(code_reloading?)
@doc false
def init(_key, config) do
{:ok, config}
end
defoverridable init: 2
end
end
defp pubsub() do
quote do
@deprecated "#{inspect(__MODULE__)}.subscribe/2 is deprecated, please call Phoenix.PubSub directly instead"
def subscribe(topic, opts \\ []) when is_binary(topic) do
Phoenix.PubSub.subscribe(pubsub_server!(), topic, [])
end
@deprecated "#{inspect(__MODULE__)}.unsubscribe/1 is deprecated, please call Phoenix.PubSub directly instead"
def unsubscribe(topic) do
Phoenix.PubSub.unsubscribe(pubsub_server!(), topic)
end
def broadcast_from(from, topic, event, msg) do
Phoenix.Channel.Server.broadcast_from(pubsub_server!(), from, topic, event, msg)
end
def broadcast_from!(from, topic, event, msg) do
Phoenix.Channel.Server.broadcast_from!(pubsub_server!(), from, topic, event, msg)
end
def broadcast(topic, event, msg) do
Phoenix.Channel.Server.broadcast(pubsub_server!(), topic, event, msg)
end
def broadcast!(topic, event, msg) do
Phoenix.Channel.Server.broadcast!(pubsub_server!(), topic, event, msg)
end
def local_broadcast(topic, event, msg) do
Phoenix.Channel.Server.local_broadcast(pubsub_server!(), topic, event, msg)
end
def local_broadcast_from(from, topic, event, msg) do
Phoenix.Channel.Server.local_broadcast_from(pubsub_server!(), from, topic, event, msg)
end
defp pubsub_server! do
config(:pubsub_server) ||
raise ArgumentError, "no :pubsub_server configured for #{inspect(__MODULE__)}"
end
end
end
defp plug() do
quote location: :keep do
use Plug.Builder, init_mode: Phoenix.plug_init_mode()
import Phoenix.Endpoint
Module.register_attribute(__MODULE__, :phoenix_sockets, accumulate: true)
if force_ssl = Phoenix.Endpoint.__force_ssl__(__MODULE__, var!(config)) do
plug Plug.SSL, force_ssl
end
if var!(config)[:debug_errors] do
use Plug.Debugger,
otp_app: @otp_app,
banner: {Phoenix.Endpoint.RenderErrors, :__debugger_banner__, []},
style: [
primary: "#EB532D",
logo: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJEAAABjCAYAAACbguIxAAAAAXNSR0IArs4c6QAAAAlwSFlzAAALEwAACxMBAJqcGAAAHThJREFUeAHtPWlgVOW197vbLNkTFoFQlixAwpIVQZ8ooE+tRaBWdoK4VF5tfe2r1tb2ta611r6n9b1Xd4GETRGxIuJSoKACAlkIkD0hsiRoIHtmues7J3LpOJ2Z3Jm5yUxi5s+991vOOd+5Z777fWf7CGXA79Ct46ZGmyPnshw9WaX5qTSlJBCKjqU51aoohKVUivaIRqUUmlactEK3iCp1gablTztsnZ9kbK16w2P7wcKw5AAJhKqiBWlzIyIjVrKsnKtQ7HiiqiaGZQOC5Qm/JAkiUekqSha2X7/x2JP1FOXw1G6wLDw4oPvFl94+ZVmkib9HJnQuy7MRfUW+qoqSLMtHWi60PzB9Z+2BvsI7iEc/B3wK0d8Wjk8dHRX7B5hjbqBZU6R+sMa3VBWFUiSxqLmhdc303XVHjMcwCDFQDngUosO3JF0VPzz2eSKRLJrjPLbxhVARYYXDUCKlKAJFMV00yw731d6fOlWVKadT/mjSxsIb/ek32Lb3OPANAdl/c3La8CExmziGnUYYz2thd1JwhpBk5RDDyBccTuWgKNpqWxzCsdk76iuwbdXiyd/nIqO2ufcL9lmVBZvgcP5k4pYTrwcLa7B/cBy4LESVeVlvsxS9wN+ZR1Jkioi2B5M3nPiTJ1LqVuXaCcuaPdUZUSbJjg9T1hXfZASsQRiBcYDULJ/2OM1zDxOa0zf1eMFDROmcQ5Jeam7peE+iKOfQ+IjFHM//gqF7T4A0UhD3dflHkusHd3EaS/r0SupWZO+lCHWFwislio2Kpi30cKKQZEKYGEL7L1e4ZqFkRSWs/2upYEauSpKjpblldvaOmkPBwBns6z8HLn/O3Lsenjs+N2pU7G94hr6JpjnevT4cn0GQ1HZb29JBZWXfvh2vQuRCBg2z1W5i4q9zKQvfW1mmOrrsy6duPb4pfIkcWJTp+V4p4zcUzrY72h9SJCX8R88wVGSEdWPZkskrw5/YgUGhnpno8khLbk9dHBMZu4Wimctl4XqjKCrV4ehcmbH5xAZXGsuWTLpFdSpylyC1t3RIjQfLv2h6pInqdG0zeO8fB/wSIgR9clnGw1aL5Un/0ISmtSorVJe97cYpb1R8pFFQtSzzBc5iXoPPMqyhCKOqlEycKqW2gHL0vCqRvR1S146srRX7tD6DV98c8FuIEFxlXnYxz/EZvkGHR60kSUrjVy1TZu2qKdMoqr4j8wOWMXvVeOMsJqlyB0vkfRdPtz42aGbROOf5GpAQIai61Tlgiw1Ot+SZJONLFUUU5q49GlPvokequStzM0OZl/SEDWczmLIq2mwdv8rcVvVOT+2/jfV6FtYe+SJQ9CseK8KwEFUUu1flNLqSlvxa8VKH0/msa5mnezT/EJ6fGBubsL1qdfahVxOj4z21+zaXBTwTIdNq7siVGIYN/1X2pTcsCY6alILiFNcXfmxR+qrICMsrIGica7m3e0WWRFWyP+zNzOOt30AuD3gmQqbAwnRPf2IOy5uTa1dlfuxK87Q3T64/V9o0RhLFBtdyb/c0w3KMKeqZyhVZu721+baVByVELS3tv+pvDANT3vUVt019xpXuWYVfNKbkHx0liM7tuKjW8+NNpjk1q6af/9vkcYa5uejBG45tgvqc4YCq83I6WY7rM09Ho5jY1n5xiSfzCOqRLBbrWormh+rBBYt20emw/yht88lX9bQfiG2CmomQIYqifN4fGRMZGb1p46QRY9xpT9tSvnPc2sJhotjxgiLLTvd692dcS1ms0a9U5uW85173bXkOWohssrSjPzKLAfXEjNzEclfa86cOH4aRK1iWmn/iR0nrDpslQdiqqKLo2s7TPc9xt1Tm5bafXDL1fk/1A7ks6M/Z7mmJo8ZmjDpLs0HLY0j4jAtqXA8hclzfjM+M/7ugCqUTNxxf7EIQe3LFlGdZYlrC89wQl3KPt7IoXJAVeqfU1b4lfXvlB66Ntt88OmnikJhFxEbH7zt+4el7qxouuNb3x/ughQgHXZU3vZPjmH63LtJemCRIx1IKjnRr4E8unHCTJTZ2l6jIdRPWH03S2mjX0vmp3zVbI+6jeeYqQjGxPf15upWVYFNBPytCE4jAU0WiKC2CxHz44aHa+++vaW7XYPfXqzFCtHz6Kc7MjO2vTEC6FcX5XtLaonl4j4JkjY/fJUO0UofofCBzc+lzWO7+++yWpMnDYyMXixQ7nefIBAjFjCZEtUA7FvTcDAM7PZUhqqLS4OyptqhELBEd4sa0LScK3GH152dDhKhmedZ+xmy6pj8zAmmXFfHl5LVH78X76vkTfsAOid+K9+h+2253/EKvj9IPR1LW5fEjEzY2N1x8uYGyIYxgfwe/m3JldBSXwUhsMmdhR6gmlVFE9UvJQVU7VMeJUBqMDRGiyhW563gTuypYRoVD/06b8NSUzYUPIy0YqcKazW9prr4oTJIsrE3eeOw/e5tWnOVi46z3WhjTXIUm42iKNnt1V4ZgCZjuHLIqldrt0p/1CrtRYzBEiMpXZDxiNll+ZxRRoYYjO2xPaIKCbsJxo4fsZxnGrNGFBl14bcVSl1yQ9mYJ2hAhvi74H35G+cjIOxWKzOYYZojesC13zIIk1rWdbV7SV94HhggR2p+io6LXuQ+mPz/bHfYn0zaW/AbH8MhQKnLZTbnlHM8muo+JyJIsqmoDuCaVU4rzI8Uhnjxc/OWh1fWtre5tXZ9xVzs0Ne5as4WZrlDMbI6iU2iOxfWUIT8VTHyCKP9u4qbixw0B6AOIIUKkLUR94OmXVXab49W0zcX3aMR3x+Yx/EKa9s02FCxYU4sQ8yIwtGSTZGJHGDRLWWSFtcLim4f9Gs+yva8XcQqdz00sOP4zbQy9cfXNDZ0YcdE3fHj8Ia/fbJ1wwrGZ6LTtSN1w7FaNtuOLJ/5rpDVig16ziNYvlFdvJh6jaOqfGkKjRq8DDmeyzqtbmX1Zs42utmgWcbZ2/QnSlTh0gAh5k8iImI29SYQhQoQ2SAr0aAP1h05paGg+sWhitx4JxzlxW+mDKesOW9DGJshSR6jHjv7i3mhAn6+qpZk7vdUHW27I5wxtTtdkjWkA9VrYOqih5lhQpFJVkbfbZaUyyuYUO62mRCvDzuNYMoMwvLUnZn6dvEJ6KzW/8Hb3tjUrJj8AMNaAFns85B4whK/uOLRnRQTHcVWqVwh3UHYIn6uivbZVkM7yFjbJyloywI63EN7EFML8Y82F4V7791XG9bTg13D4czVksOEuROiN2NLWNidne9Wn3phTtiLzVRPN3KknoQVkzGlz2OwPpb9R9pI7vP3ZY0YMGR/zM85ims8Q6jtGJbNAtQJYTqpE1bFpUsGJpwGvzyBAtAOOzorfBgEVV2s0uipTtTIjroYIUbcRNvuK0zQJP8d9zFrS0dl+nR6NLuqEYkYl7OY5NkoPc0X498s222OTtp1EXZHH3/GFk25gIyw3w7phGsXQYymVDCUU7MwYiqMU0s1/lIbudQUDzwqoDVFHrqgCTOunZUqusovC2+7xcx6ReSgsWzTlZ+ZIy39DbgUK0vE0jV9XOMxDs6CKDBGitWNjY6+ZlXKB4cLP3xomoYbk9V9b6fVyqvaOnHqa4cbobY8vxympG/YfPv97vVZ5nL2ThltGMhZyeUZRRIYRz9guXHui4Yxe3HradQedRidswU96/s7Po4wO1jREiHAgdXfmOAjhTHoG1Zdt0OV1Qn7R9/3FWbUyq4jjTZn+9MMYN0LJpwVZ3c112D5I+WvlW/707822WtCmvbP1vrQ3yv9iJC7DhKhq1ZVtHEtHG0mcEbCCUbZVrZy6jeMj/BZAjW70AiCM0qnI9JegYHTSKjFJolSTurl4IbQxxFSi4dJzxYRjsIcrSc0/MlNPe71tDNnidyNTlLD0i6EJ/0+mCr3MSS0ovc3W2bYGdkPdGme9/bR2+HmnaT6G5dhUCBKZAnvw0QorVUE9uIb0/U9S7WtZosYYjZk1CiCjyhAc+M+2JaPgBwqHZugZgfbFfpd2YC/V5GW9D9v3G8C+5RfPcDsuU9RRsaP9UXcvx2DoCqRvU2PnywmJVuMmjktEGPY5q1s1rYCw1hWBDK43+2Am250H6mKN8CAcS1HmD1ZOeYol3DzwaExUVdbkyY4GubedlKie6pKo7fM2Fz5W7xK+3Ztj1QkbhejyYl5nH5/NDBOiikVpa0xRMS/4xBaiStQqo+O90egP35oyK9JqGqPS7GgTeDR2KOpFkypWY8SI0bjCGZ5hQoRKtsSpVzSEoxEWbVxoogjnF9GfaTNMiJAJvb1DU2UJwtxAXQfmFU+fEV8vwuG0PzppQ8kjvtqEYx266UrRXApR2RRCkUTw9rfAuToyHMDDKERtpmS5pNPpKMp9q/KvoaLfUCGqzMvYx3OWWUYORpLEM6oqvS122D+4UN1xsq7T1pGenpAWHRN5K01Mi/UGCOACNyn/iK6kDUbS7y8sNPJyZutqnqZmKoRO0JtoApSqqDKoVFXnxpT842gW6bOfoUJkpIcjWqVFxf5rsBM95YsbR34wYX6cNfJVhuN7jAdzCo59EwuKr/MFLxR1Y2HB/uGK3BdZTlmAKoFgacBgS0mit0zIP5wXLCw9/Q0VIkRYuypXhLM8/NoGeyLU2dVxlz9HLmC2D0zW4AmWa1lHe2fYZJZFc9Gs2eMLCKFvAm2/XzzDODb4qAk0kbp1TiohrAofejjiC/LPX9rFC6Iqs9QrEMFyH/Cg13RThgtR9cqsz1jedJXri/P3Xpac9cnri8b52w8t8RaT+S5f/XBddfb4V4mYCcRXu96uQ1rNPLPKH+FR0K6iSkWdorwZ/mR7Zrx7qtSFThoScMWOHh8XMzLBmsxwplQ+klkNm/mhXTbHbzGFjktbQ28NFyI8oWjoFcM+C4ZKm93+6/RNJb8PBEb58mmPms3W3/rqK4pyV2r+4ZAcvYWpkU1m8/+AgVf3Z0sGn20wnr696+CpuwPRd2F2t7vPtjf74kkwdYYLERKDeXvAmW54oIS12ZvnZGyq3Btof83Y6Ks/+Oc0J609muCrjZF16N8zNjPufYY3ZfkDV1aFwvrDzbdcf+LUl/7068u2fn2H9RLW0tV275CY+ICTZEp2VdSLy1O71E3F/1a1Ytoo9I/2VI9lsOuJr12dc3H/3pqk3vD2c8VbtjTzFRPP3uHPWhHdSzpsjgf9+Qx1H6URa8kgVjqNU7mhAk1FgXdSE22XWxy8cszW6jh51a6aYlfajLjvlZkICTuVl9NAcdyIQIhsbb240IhMrTV5OccZjpvsiwZURDrs7fNdc137ao8OeFFjLEnT363e76sdfkKuuibpaTPPrvDHu1EW5Xan0/mX9DeO/coXfK2uaOnUpVaWuZejSTZk843sSdkrgj88ZJeoUJ32Fye+WfaiBieYa68J0Wc3jM0Y+Z0RAUm9e7xXMAOsyZvexnCMTxeV7qNBKflyHL4vfHiw4BVD416jCRmnggZQkZWzhBJr4R/vlAlrg8wfQ3mangauiqP1enriwTaCSmpkwfG/6VtKn/eFX6srvy39Hi4y4vFglg2YxEsUxCcgwPEJDW4g114TIiSmdnXWDpo2fc9fwsCH+XzS2sKAZjF3XC+ljhxy/b+M/FLPC0UvyPY2W17WO2U9JfVkIe/jU6yVW6TSdKK/QYiqgnGNik0SmQrZ4dxbfKLp/5aXN37hTrunZ5wJvzNtxB50L/FU76kM13+gbH2v1WF/W7VLTSxnspis/JUmhr5NUdh40tn2YDAOdL0qRDggzB6m12dZYwDODAcPnR6rl7FaP29X1AJHRMW9663etRxxy7JwuLGpY7VrFn7XNu73JcsmzDbRlmsZmeSqHD2SAidprQ3ogOw0JbfQRL5oF0m5U1VONR/v2BPIQrlsefoveM76e3/SPjud9rUTN5TcqdHj6YqCOffY2XOe6vSUXR6snsaBtMETrcdHJ1T4G0YD/9BPkjcWGWZCqcrLeA6yK/673jHIqKijSKHN1vakEeszvXi9tatcPmUTb45c6q3evRz/DA5H5z19kZC014UIB1e2NP1uTI7pPlCfz3Bu2UcHzg7V6/juE9alyupVmQfgONqZetq6tsHPgSyre5wdtpenbC//2LXOqHuczd75uPKIJyf6QOh2tLb/0FcUyt55YycOi7TOZNSvEwtA7s1aPRExnsbbJ0KEiDF3tCk24gFPRHgrc4py9cT8w7q//d7guJYHs2tEOKiohN1NOVGEUggCeOfcefuJG/d/ccoVh5573L3NzB0x3RJtXi6ppoWQ+OGLgp1FV7oLUc3KrEJ/dUvePBZQBRA7LOYRxkxfDUe0Rmt5l7rpxRxHRHGCD1+F0yH80Z8cR30mREho1fLM5zmz+Sd6mKy1sXd0/kfam8ef1Z6NuNbdkd2lJ+JVDy70nKSI0gX/505RZZqJIrdCfqEmVRWcsIPr1sMRlhcVSTXD+mg47OiGQXhZDFTEqpeOtMBt95Ej5ya4rwErV+Ye4Xk2Rw8dWhvB0bl5wsbjy7RnvKIVIT5h6HaGI7pjzmCTcRxCrVAx2qPNrU+FCAd0cknG73gL/wir8+A9zLNTfaopKZB/O+Lz9EMHulGTh532R/nnCY4RZbLorE3OL0p2hxWIW43qFP6Op2S6w8IASlOk5WmQdhqickeBX1KCnkhfUHjaGptar7x6Z+0Jd5iuz30uRIgc09hRJvMmjtMXp4YnTc9ZfySu3kBf5cJ5yTPihsR+FsrjtgSnc8+EDUVzXV8I3mNQABhQb3Yv9/UsCNLRCQVHcn210epwszM6KvYPNGHm96SewLCnpgutV898v/pzrb/7NSRChERgcsxfzs0uxIwb7kR5eobptXXD+0dHu68ZPLXVW4bTfNyQ+E96YqReeHrboSeB3SE+lr6l5FH3PoEEPHibgdxhuz/vuCExZdLIkZ/0pLBEA/AXxY1jvKkBQiZE2oDQ6s6x3C8hLovXyrxdMf6rtaVlTvaOmkPe2vhbjovN+MT4T/Xg9xe2p/b4+Spv/OrmeR+frXavDySBqt3peC1tQ/Hd7rD8edZjHkLtdlNz03Q395NuNCEXokuDZcvzsraxhPleT7OCih41qvP51PySn/rDKF9tUdkGQQYlerLl+4Ljq04QpQ74LP/Rm4mhekXGetZk0e2JCCcBdHXZ2+/ydMiNLzq81ek5khXTCNrsnfe7h2GHRIhqV2RtQAvzpPyi+a6DwgNbcrOHga+N+UZIreNzZsKMHJJof9jIxOIVKzP/buLN17rSFOw9mNQ6HYK4Ln3Dca+7UvgD/dXMmS6n9POJE5SgDqLscOedax+c0RhemSyLlB08IKsdsrTHwvHfx5wExbdm326NoZZPKChc4NoH74GOg0BHj8GeuHMTnI5nzjR0fFp/XuwIiRBholBzbNwuyBvU0FDUMMNTFoyy5RlP8DSzElKRj2YgXb37gC8/y87zTkFef7a0/dlATAmX4Vy6wQwaUdaYP8POLWB/qG4HREWt7pKEF71l49fwYio/PetCXJfIinKoqvHL1Z4+hRo8vKJ2Hs4huZ+wNLG3dz3DmLlUnufnj3vtIKlZlXMOPt0j8d61j3ZftXzaa6CQXY19tTJvV/DlVhw26bEeG3oDEGw5OtijzxEkXgJ7q7gudeMxj26t3ZrVmKj7TLTpOkJIErg6WLy5O6AbBbgAnmJU54Zgj9fEvD6syXQv6HrA1dR3yhxcKKu0bANdUBmRlY++OHHxRW+LUI1v5Usn/5znLY+DsFq0MvcrWvchQqoRkhZt37u75rf+eCeiioBWuWw4sySyenXOFpbmFquCUAG+2BPgEHfq+oKj1novu11MxD4kPvYFjqZzwPHqG0nYUS8G1mMbZD+pFBTnG3/7vPHFkAkRMszVlRU1wZCt/jktd7Q7Q7Vn3JrTkdYZVsaUQdFyNOg8INQd5is4RoMGDZ9EMZLd2bbLqLUC5rBePCt9KYmOyIY1wTCwwIugFuBoRemQiFThlKgzpSebPsor/fIrjUYvVxr0NXMjovk8WeUWuh80iMm4OPj2SApzUaSEOiKp75e3XNi0cNeZWi/wfBZXrcypAKVmEoZJVa7M/oTlyFXdngzwOVRoqu1Ue/OV12+vw+QSPn/IbytvmiIR1gwa7YtfSV1H3fuFVIiQend3EVUWbaJEth74tPqnRnscfjhrzLjEkXF5LA/+PpSSAAkavoLPRNn59rbNs3fUV/jkZpCVOKOOiI170cTAQTLwg7nrNBw5dBoOFGnsghONlE7bodt21JTUe5kd/EWP6xueIZPApSYWTSegKQfNs/Q2CKmFZbkft7W1LfCVftAffCEXIiQW/imwM+Lhxf7jh2sAilZKhC7b6+67gX+06vkO/YnmZI/4JTHTi2mFHuXtW48KTYck/ldPM2HPGL22wI0CBhj2yQ/HnWyhTfhZ3Td55Ojq1s4u7XOIBwO+fvRUjVGH14SFECFXcfrleK77X+rOZZjjBULEGkhk+LkiObcVH2s94W5n0vog865Kj8lkIsyLzTR7DXgaJvnKagvCI6m0coHIdLtDFrf2ohBpJA64a9gIEXJW704FF3eEhu0roRzgCGbHvuA4bGJpxQzJNa16vBhReOwO4U96fZkRx+DPMwfCSoiQRNiClsIWdIpncg0qlWW5tu1CmvsC0SDo3zowl+Jtw2fc4H4wFQ2TvUmRCruTQQEyjsNhJ0Q4NLRsi6L9zzpcWQLiBCT9jUdvy4A6D3b6Jw6E3efMlcLi21IXREbFbnY9sM61Pph79EEWRNubX5W3/zTUcfnBjCMc+oa1EF1iEF+Tl1sEWuP03mAYqu7BqHsKZqdDHc7OHbZOpWrZrpryeoP0Nb1Bc7jB7A9C1M0z9Ig0W9iHIfzZp2E2WAbjDKVSYECRaYEBtbGsgm8Bo0CkDy3CQXcXVFUpkxSpvKK5OT9QbXKwNIZb/34jRJcYx4JNaDdP87NA9xNSXqJdC+wsLaD5PnDxq7anpu+sPRBSgkKIvL8JUTer0CMRDISvEZaZCKkLQ8i+r1Hj7KXIYm2LrevnocydGCpG9Esh0piFsVoRTMQTkAcUzivT0oNptaG5gvXkYMr64qCSfIWG8sCx9msh0oaNJ/bMmHLFU7BcgjPGSEJvzU5oaWcUOEtKwUOBARPtWUOCRuTGppYeoyQ0+vv7dUAIketLQNeFyLj4H0Es2NUwNyX6sxDH0GnI5iECU2yQ//AcIVKjSHO1YofzJMU4K+0XhJb2aKoN8VkddERUNDuUoUgyy/LZkBA9FRIjTwJfnTjNxbe1SViU+W7hVlf6BuL9gBMi95eEXpR8FD+NIfRkQaFHw0vvTkNM06pNoZmLquxophWqrl2mz3W22o7pTeLgjkd7xoxoIybHrDHxzI8hiDGq9VzzNdN31x3R6gfidcALkZEv7cDNyZmxUZbrBNXZ8Pmxzt095QlAAcazWXsK/jOSxlDAGhQiP7iOkaSWePOdRGZmghfBKAJZrWSacmBKOzgbsxFcaY/YHLZ39WZd8wN1WDcdFKIAX0/Zooz7OAv7EHgJjnYHAX5P7USRPty3t3qN5gjm3mYgPQ8KUZBvs2hB2tzouIh1kIE80R0UhiBDvNnatM3F97jXDaTnQSEy6G1WrMh43WSyrPYEDqMsxhcUTvJUNxDKBoXIwLdYsnTyimizeb2nJBGSIJxKKSgcbyC6sAE1KEQGvwp0gh86JOEouOh2qxJcwQuiUDIhvzDTtWwg3HtWuQ6EkYVoDJjw4PyZC9PRQOtOAs/xGRXLpv3Bvby/Pw8KUS+8was/ri+52NW+UJHAPuL2482mhzAixa24Xz8OClEvvT605jd3tS6ApKHfOGKCEIaaM3NkUS+hDQnYQSHqRbajIH1WeCZRFaVvhCujbqlmdc5LvYi6T0EPLqz7iN14Wjdtivg1C0eha9Z/OB/x0P49lbf0d4XkoBD1kRBpaNChLiYhYY2JUufIrDpCEkkR5FrE3No9ZmnVYITb9f8BhSZnYemqCy4AAAAASUVORK5CYII="
]
end
# Compile after the debugger so we properly wrap it.
@before_compile Phoenix.Endpoint
@phoenix_render_errors var!(config)[:render_errors]
end
end
defp server() do
quote location: :keep, unquote: false do
@doc """
Returns the child specification to start the endpoint
under a supervision tree.
"""
def child_spec(opts) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [opts]},
type: :supervisor
}
end
@doc """
Starts the endpoint supervision tree.
"""
def start_link(_opts \\ []) do
Phoenix.Endpoint.Supervisor.start_link(@otp_app, __MODULE__)
end
@doc """
Returns the endpoint configuration for `key`
Returns `default` if the key does not exist.
"""
def config(key, default \\ nil) do
case :ets.lookup(__MODULE__, key) do
[{^key, val}] -> val
[] -> default
end
end
@doc """
Reloads the configuration given the application environment changes.
"""
def config_change(changed, removed) do
Phoenix.Endpoint.Supervisor.config_change(__MODULE__, changed, removed)
end
@doc """
Generates the endpoint base URL without any path information.
It uses the configuration under `:url` to generate such.
"""
def url do
Phoenix.Config.cache(__MODULE__,
:__phoenix_url__,
&Phoenix.Endpoint.Supervisor.url/1)
end
@doc """
Generates the static URL without any path information.
It uses the configuration under `:static_url` to generate
such. It falls back to `:url` if `:static_url` is not set.
"""
def static_url do
Phoenix.Config.cache(__MODULE__,
:__phoenix_static_url__,
&Phoenix.Endpoint.Supervisor.static_url/1)
end
@doc """
Generates the endpoint base URL but as a `URI` struct.
It uses the configuration under `:url` to generate such.
Useful for manipulating the URL data and passing it to
URL helpers.
"""
def struct_url do
Phoenix.Config.cache(__MODULE__,
:__phoenix_struct_url__,
&Phoenix.Endpoint.Supervisor.struct_url/1)
end
@doc """
Returns the host for the given endpoint.
"""
def host do
Phoenix.Config.cache(__MODULE__,
:__phoenix_host__,
&Phoenix.Endpoint.Supervisor.host/1)
end
@doc """
Generates the path information when routing to this endpoint.
"""
def path(path) do
Phoenix.Config.cache(__MODULE__,
:__phoenix_path__,
&Phoenix.Endpoint.Supervisor.path/1) <> path
end
@doc """
Generates the script name.
"""
def script_name do
Phoenix.Config.cache(__MODULE__,
:__phoenix_script_name__,
&Phoenix.Endpoint.Supervisor.script_name/1)
end
@doc """
Generates a route to a static file in `priv/static`.
"""
def static_path(path) do
Phoenix.Config.cache(__MODULE__, :__phoenix_static__,
&Phoenix.Endpoint.Supervisor.static_path/1) <>
elem(static_lookup(path), 0)
end
@doc """
Generates a base64-encoded cryptographic hash (sha512) to a static file
in `priv/static`. Meant to be used for Subresource Integrity with CDNs.
"""
def static_integrity(path) do
elem(static_lookup(path), 1)
end
@doc """
Returns a two item tuple with the first item being the `static_path`
and the second item being the `static_integrity`.
"""
def static_lookup(path) do
Phoenix.Config.cache(__MODULE__, {:__phoenix_static__, path},
&Phoenix.Endpoint.Supervisor.static_lookup(&1, path))
end
end
end
@doc false
def __force_ssl__(module, config) do
if force_ssl = config[:force_ssl] do
Keyword.put_new(force_ssl, :host, {module, :host, []})
end
end
@doc false
defmacro __before_compile__(%{module: module}) do
sockets = Module.get_attribute(module, :phoenix_sockets)
dispatches =
for {path, socket, socket_opts} <- sockets,
{path, type, conn_ast, socket, opts} <- socket_paths(module, path, socket, socket_opts) do
quote do
defp do_handler(unquote(path), conn, _opts) do
{unquote(type), unquote(conn_ast), unquote(socket), unquote(Macro.escape(opts))}
end
end
end
quote do
defoverridable [call: 2]
# Inline render errors so we set the endpoint before calling it.
def call(conn, opts) do
conn = put_in conn.secret_key_base, config(:secret_key_base)
conn = put_in conn.script_name, script_name()
conn = Plug.Conn.put_private(conn, :phoenix_endpoint, __MODULE__)
try do
super(conn, opts)
rescue
e in Plug.Conn.WrapperError ->
%{conn: conn, kind: kind, reason: reason, stack: stack} = e
Phoenix.Endpoint.RenderErrors.__catch__(conn, kind, reason, stack, @phoenix_render_errors)
catch
kind, reason ->
stack = System.stacktrace()
Phoenix.Endpoint.RenderErrors.__catch__(conn, kind, reason, stack, @phoenix_render_errors)
end
end
@doc false
def __sockets__, do: unquote(Macro.escape(sockets))
@doc false
def __handler__(%{path_info: path} = conn, opts), do: do_handler(path, conn, opts)
unquote(dispatches)
defp do_handler(_path, conn, opts), do: {:plug, conn, __MODULE__, opts}
end
end
defp socket_paths(endpoint, path, socket, opts) do
paths = []
websocket = Keyword.get(opts, :websocket, true)
longpoll = Keyword.get(opts, :longpoll, false)
paths =
if websocket do
config = Phoenix.Socket.Transport.load_config(websocket, Phoenix.Transports.WebSocket)
{conn_ast, match_path} = socket_path(path, config)
[{match_path, :websocket, conn_ast, socket, config} | paths]
else
paths
end
paths =
if longpoll do
config = Phoenix.Socket.Transport.load_config(longpoll, Phoenix.Transports.LongPoll)
plug_init = {endpoint, socket, config}
{conn_ast, match_path} = socket_path(path, config)
[{match_path, :plug, conn_ast, Phoenix.Transports.LongPoll, plug_init} | paths]
else
paths
end
paths
end
defp socket_path(path, config) do
end_path_fragment = Keyword.fetch!(config, :path)
{vars, path} =
String.split(path <> "/" <> end_path_fragment, "/", trim: true)
|> Enum.join("/")
|> Plug.Router.Utils.build_path_match()
conn_ast =
if vars == [] do
quote do
conn
end
else
params_map = {:%{}, [], Plug.Router.Utils.build_path_params_match(vars)}
quote do
params = unquote(params_map)
%Plug.Conn{conn | path_params: params, params: params}
end
end
{conn_ast, path}
end
## API
@doc """
Defines a websocket/longpoll mount-point for a socket.
Note: for backwards compatibility purposes, the `:websocket`
and `:longpoll` options only have an effect if the socket
given as argument has no `transport` declarations in it.
## Options
* `:websocket` - controls the websocket configuration.
Defaults to `true`. May be false or a keyword list
of options. See "Shared configuration" and
"WebSocket configuration" for the whole list
* `:longpoll` - controls the longpoll configuration.
Defaults to `false`. May be true or a keyword list
of options. See "Shared configuration" and
"Longpoll configuration" for the whole list
If your socket is implemented using `Phoenix.Socket`,
you can also pass here all options accepted on
`use Phoenix.Socket`. An option given here will override
the value in `Phoenix.Socket`.
## Examples
socket "/ws", MyApp.UserSocket
socket "/ws/admin", MyApp.AdminUserSocket,
longpoll: true,
websocket: [compress: true]
## Path params
It is possible to include variables in the path, these will be
available in the `params` that are passed to the socket.
socket "/ws/:user_id", MyApp.UserSocket,
websocket: [path: "/project/:project_id"]
Note: This feature is not supported with the Cowboy 1 adapter.
## Common configuration
The configuration below can be given to both `:websocket` and
`:longpoll` keys:
* `:path` - the path to use for the transport. Will default
to the transport name ("/websocket" or "/longpoll")
* `:serializer` - a list of serializers for messages. See
`Phoenix.Socket` for more information
* `:transport_log` - if the transport layer itself should log and,
if so, the level
* `:check_origin` - if we should check the origin of requests when the
origin header is present. It defaults to true and, in such cases,
it will check against the host value in `YourApp.Endpoint.config(:url)[:host]`.
It may be set to `false` (not recommended) or to a list of explicitly
allowed origins.
check_origin: ["https://example.com",
"//another.com:888", "//other.com"]
Note: To connect from a native app be sure to either have the native app
set an origin or allow any origin via `check_origin: false`
* `:code_reloader` - enable or disable the code reloader. Defaults to your
endpoint configuration
* `:connect_info` - a list of keys that represent data to be copied from
the transport to be made available in the user socket `connect/3` callback
The valid keys are:
* `:peer_data` - the result of `Plug.Conn.get_peer_data/1`
* `:x_headers` - all request headers that have an "x-" prefix
* `:uri` - a `%URI{}` with information from the conn
* `{:session, session_config}` - the session information from `Plug.Conn`.
The `session_config` is an exact copy of the arguments given to `Plug.Session`.
This requires the "_csrf_token" to be given as request parameter with
the value of `URI.encode_www_form(Plug.CSRFProtection.get_csrf_token())`
when connecting to the socket. Otherwise the session will be `nil`.
Arbitrary keywords may also appear following the above valid keys, which
is useful for passing custom connection information to the socket.
For example:
socket "/socket", AppWeb.UserSocket,
websocket: [
connect_info: [:peer_data, :x_headers, :uri, session: [store: :cookie]]
]
With arbitrary keywords:
socket "/socket", AppWeb.UserSocket,
websocket: [
connect_info: [:uri, custom_value: "abcdef"]
]
## Websocket configuration
The following configuration applies only to `:websocket`.
* `:timeout` - the timeout for keeping websocket connections
open after it last received data, defaults to 60_000ms
* `:max_frame_size` - the maximum allowed frame size in bytes.
Supported from Cowboy 2.3 onwards, defaults to "infinity"
* `:compress` - whether to enable per message compresssion on
all data frames, defaults to false
## Longpoll configuration
The following configuration applies only to `:longpoll`:
* `:window_ms` - how long the client can wait for new messages
in its poll request
* `:pubsub_timeout_ms` - how long a request can wait for the
pubsub layer to respond
* `:crypto` - options for verifying and signing the token, accepted
by `Phoenix.Token`. By default tokens are valid for 2 weeks
"""
defmacro socket(path, module, opts \\ []) do
# Tear the alias to simply store the root in the AST.
# This will make Elixir unable to track the dependency
# between endpoint <-> socket and avoid recompiling the
# endpoint (alongside the whole project ) whenever the
# socket changes.
module = tear_alias(module)
quote do
@phoenix_sockets {unquote(path), unquote(module), unquote(opts)}
end
end
@doc false
defmacro instrument(_endpoint_or_conn_or_socket, _event, _runtime, _fun) do
IO.warn "Phoenix.Endpoint.instrument/4 is deprecated and has no effect. Use :telemetry instead",
Macro.Env.stacktrace(__CALLER__)
:ok
end
@doc """
Checks if Endpoint's web server has been configured to start.
* `otp_app` - The OTP app running the endpoint, for example `:my_app`
* `endpoint` - The endpoint module, for example `MyApp.Endpoint`
## Examples
iex> Phoenix.Endpoint.server?(:my_app, MyApp.Endpoint)
true
"""
def server?(otp_app, endpoint) when is_atom(otp_app) and is_atom(endpoint) do
Phoenix.Endpoint.Supervisor.server?(otp_app, endpoint)
end
defp tear_alias({:__aliases__, meta, [h|t]}) do
alias = {:__aliases__, meta, [h]}
quote do
Module.concat([unquote(alias)|unquote(t)])
end
end
defp tear_alias(other), do: other
end
| 45.927536 | 10,138 | 0.735952 |
9e54c7f8ec1092818a4ab018ee0476fcd9b26e66 | 325 | exs | Elixir | config/config.exs | datapio/opencore | 6e766c3b3a2ad8b07295c7fd27cffc0923284197 | [
"Apache-2.0"
] | 5 | 2021-05-14T22:01:08.000Z | 2021-09-21T16:28:09.000Z | config/config.exs | datapio/opencore | 6e766c3b3a2ad8b07295c7fd27cffc0923284197 | [
"Apache-2.0"
] | 198 | 2019-10-17T12:22:25.000Z | 2022-03-16T02:14:14.000Z | config/config.exs | datapio/opencore | 6e766c3b3a2ad8b07295c7fd27cffc0923284197 | [
"Apache-2.0"
] | 1 | 2022-03-10T08:54:36.000Z | 2022-03-10T08:54:36.000Z | import Config
config :logger, :console,
level: :info,
format: "$date $time [$level] $metadata$message\n",
metadata: [:user_id]
config :datapio_cluster,
service_name: [env: "DATAPIO_SERVICE_NAME", default: nil],
app_name: [env: "DATAPIO_APP_NAME", default: "datapio-opencore"]
import_config "#{config_env()}.exs"
| 25 | 66 | 0.710769 |
9e54cac3ba267f053646bf37b5e15d1df43e8797 | 321 | exs | Elixir | lists-and-recursion/weather2.exs | sfat/programming-elixir-exercises | 19e62e3f3344ec044e1eb1b39b195f4dad3dff1c | [
"Apache-2.0"
] | 1 | 2019-02-17T11:54:17.000Z | 2019-02-17T11:54:17.000Z | lists-and-recursion/weather2.exs | sfat/programming-elixir-exercises | 19e62e3f3344ec044e1eb1b39b195f4dad3dff1c | [
"Apache-2.0"
] | null | null | null | lists-and-recursion/weather2.exs | sfat/programming-elixir-exercises | 19e62e3f3344ec044e1eb1b39b195f4dad3dff1c | [
"Apache-2.0"
] | null | null | null | defmodule WeatherHistory do
def for_location([], _target_loc), do: []
def for_location([ [time, target_loc, temp, rain] | tail], target_loc) do
[ [time, target_loc, temp, rain] | for_location(tail, target_loc) ]
end
def for_location([_ | tail], target_loc), do: for_location(tail, target_loc)
end
| 40.125 | 80 | 0.679128 |
9e54dc52c98724be905ff429e65f532c0edf5516 | 1,084 | ex | Elixir | test/support/conn_case.ex | lucianosousa/gears | a59a93dd28ccd82e6cd1e3db809535a4dc10d007 | [
"MIT"
] | null | null | null | test/support/conn_case.ex | lucianosousa/gears | a59a93dd28ccd82e6cd1e3db809535a4dc10d007 | [
"MIT"
] | null | null | null | test/support/conn_case.ex | lucianosousa/gears | a59a93dd28ccd82e6cd1e3db809535a4dc10d007 | [
"MIT"
] | null | null | null | defmodule Gears.ConnCase do
@moduledoc """
This module defines the test case to be used by
tests that require setting up a connection.
Such tests rely on `Phoenix.ConnTest` and also
import other functionality to make it easier
to build and query models.
Finally, if the test case interacts with the database,
it cannot be async. For this reason, every test runs
inside a transaction which is reset at the beginning
of the test unless the test case is marked as async.
"""
use ExUnit.CaseTemplate
using do
quote do
# Import conveniences for testing with connections
use Phoenix.ConnTest
alias Gears.Repo
import Ecto
import Ecto.Changeset
import Ecto.Query
import Gears.Router.Helpers
# The default endpoint for testing
@endpoint Gears.Endpoint
end
end
setup tags do
:ok = Ecto.Adapters.SQL.Sandbox.checkout(Gears.Repo)
unless tags[:async] do
Ecto.Adapters.SQL.Sandbox.mode(Gears.Repo, {:shared, self()})
end
{:ok, conn: Phoenix.ConnTest.build_conn()}
end
end
| 24.088889 | 67 | 0.701107 |
9e54dfda7883122da3be124f7e0f2b3b98ca16ee | 624 | exs | Elixir | day_1/puzzle_two.exs | Ian-GL/advent_of_code_2021 | a54424d46572735833105107f9d4298aa1533f37 | [
"MIT"
] | null | null | null | day_1/puzzle_two.exs | Ian-GL/advent_of_code_2021 | a54424d46572735833105107f9d4298aa1533f37 | [
"MIT"
] | null | null | null | day_1/puzzle_two.exs | Ian-GL/advent_of_code_2021 | a54424d46572735833105107f9d4298aa1533f37 | [
"MIT"
] | null | null | null | "input.txt"
|> File.read!()
|> String.split("\n")
|> Enum.reduce({0, [nil, nil, nil]}, fn val, {acc, prev_triade} ->
{current_val, _} = Integer.parse(val)
new_triade =
prev_triade
|> List.delete_at(0)
|> List.insert_at(2, current_val)
unless Enum.any?(prev_triade, &is_nil/1) do
prev_sum = Enum.reduce(prev_triade, 0, &Kernel.+(&1, &2))
new_triade_sum = Enum.reduce(new_triade, 0, &Kernel.+(&1, &2))
new_acc =
if new_triade_sum > prev_sum do
acc + 1
else
acc
end
{new_acc, new_triade}
else
{acc, new_triade}
end
end)
|> elem(0)
|> IO.inspect()
| 20.8 | 66 | 0.591346 |
9e54ffd485c521d9892673731986b7b95d6022c8 | 6,078 | ex | Elixir | clients/service_control/lib/google_api/service_control/v1/model/audit_log.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | clients/service_control/lib/google_api/service_control/v1/model/audit_log.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | clients/service_control/lib/google_api/service_control/v1/model/audit_log.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule GoogleApi.ServiceControl.V1.Model.AuditLog do
@moduledoc """
Common audit log format for Google Cloud Platform API operations.
## Attributes
- authenticationInfo (AuthenticationInfo): Authentication information. Defaults to: `null`.
- authorizationInfo ([AuthorizationInfo]): Authorization information. If there are multiple resources or permissions involved, then there is one AuthorizationInfo element for each {resource, permission} tuple. Defaults to: `null`.
- metadata (%{optional(String.t) => String.t}): Other service-specific data about the request, response, and other information associated with the current audited event. Defaults to: `null`.
- methodName (String.t): The name of the service method or operation. For API calls, this should be the name of the API method. For example, \"google.datastore.v1.Datastore.RunQuery\" \"google.logging.v1.LoggingService.DeleteLog\" Defaults to: `null`.
- numResponseItems (String.t): The number of items returned from a List or Query API method, if applicable. Defaults to: `null`.
- request (%{optional(String.t) => String.t}): The operation request. This may not include all request parameters, such as those that are too large, privacy-sensitive, or duplicated elsewhere in the log record. It should never include user-generated data, such as file contents. When the JSON object represented here has a proto equivalent, the proto name will be indicated in the `@type` property. Defaults to: `null`.
- requestMetadata (RequestMetadata): Metadata about the operation. Defaults to: `null`.
- resourceLocation (ResourceLocation): The resource location information. Defaults to: `null`.
- resourceName (String.t): The resource or collection that is the target of the operation. The name is a scheme-less URI, not including the API service name. For example: \"shelves/SHELF_ID/books\" \"shelves/SHELF_ID/books/BOOK_ID\" Defaults to: `null`.
- resourceOriginalState (%{optional(String.t) => String.t}): The resource's original state before mutation. Present only for operations which have successfully modified the targeted resource(s). In general, this field should contain all changed fields, except those that are already been included in `request`, `response`, `metadata` or `service_data` fields. When the JSON object represented here has a proto equivalent, the proto name will be indicated in the `@type` property. Defaults to: `null`.
- response (%{optional(String.t) => String.t}): The operation response. This may not include all response elements, such as those that are too large, privacy-sensitive, or duplicated elsewhere in the log record. It should never include user-generated data, such as file contents. When the JSON object represented here has a proto equivalent, the proto name will be indicated in the `@type` property. Defaults to: `null`.
- serviceData (%{optional(String.t) => String.t}): Deprecated, use `metadata` field instead. Other service-specific data about the request, response, and other activities. Defaults to: `null`.
- serviceName (String.t): The name of the API service performing the operation. For example, `\"datastore.googleapis.com\"`. Defaults to: `null`.
- status (Status): The status of the overall operation. Defaults to: `null`.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:authenticationInfo => GoogleApi.ServiceControl.V1.Model.AuthenticationInfo.t(),
:authorizationInfo => list(GoogleApi.ServiceControl.V1.Model.AuthorizationInfo.t()),
:metadata => map(),
:methodName => any(),
:numResponseItems => any(),
:request => map(),
:requestMetadata => GoogleApi.ServiceControl.V1.Model.RequestMetadata.t(),
:resourceLocation => GoogleApi.ServiceControl.V1.Model.ResourceLocation.t(),
:resourceName => any(),
:resourceOriginalState => map(),
:response => map(),
:serviceData => map(),
:serviceName => any(),
:status => GoogleApi.ServiceControl.V1.Model.Status.t()
}
field(:authenticationInfo, as: GoogleApi.ServiceControl.V1.Model.AuthenticationInfo)
field(:authorizationInfo, as: GoogleApi.ServiceControl.V1.Model.AuthorizationInfo, type: :list)
field(:metadata, type: :map)
field(:methodName)
field(:numResponseItems)
field(:request, type: :map)
field(:requestMetadata, as: GoogleApi.ServiceControl.V1.Model.RequestMetadata)
field(:resourceLocation, as: GoogleApi.ServiceControl.V1.Model.ResourceLocation)
field(:resourceName)
field(:resourceOriginalState, type: :map)
field(:response, type: :map)
field(:serviceData, type: :map)
field(:serviceName)
field(:status, as: GoogleApi.ServiceControl.V1.Model.Status)
end
defimpl Poison.Decoder, for: GoogleApi.ServiceControl.V1.Model.AuditLog do
def decode(value, options) do
GoogleApi.ServiceControl.V1.Model.AuditLog.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.ServiceControl.V1.Model.AuditLog do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 69.862069 | 564 | 0.739388 |
9e5505552d679f4609c4331a1a36e22ba3897860 | 107 | ex | Elixir | lib/aph/mailer.ex | tometoproject/tometo | ed91069b11a020723edb9a143de29d9bac86a2b0 | [
"BlueOak-1.0.0",
"Apache-2.0"
] | 8 | 2019-09-26T13:59:25.000Z | 2020-03-30T21:26:48.000Z | lib/aph/mailer.ex | tometoproject/tometo | ed91069b11a020723edb9a143de29d9bac86a2b0 | [
"BlueOak-1.0.0",
"Apache-2.0"
] | 39 | 2019-11-16T02:24:28.000Z | 2020-01-14T16:40:28.000Z | lib/aph/mailer.ex | tometoproject/tometo | ed91069b11a020723edb9a143de29d9bac86a2b0 | [
"BlueOak-1.0.0",
"Apache-2.0"
] | 2 | 2019-12-16T07:55:14.000Z | 2020-06-11T04:14:00.000Z | defmodule Aph.Mailer do
@moduledoc "Module that handles mailing."
use Bamboo.Mailer, otp_app: :aph
end
| 21.4 | 43 | 0.757009 |
9e5521b7db843b77df32af0a0108df0206fc5e38 | 5,782 | ex | Elixir | deps/credo/lib/credo/config_file.ex | BandanaPandey/nary_tree | fb1eeb69e38e43c9f9ffb54297cef52dff5c928d | [
"MIT"
] | 13 | 2018-09-19T21:03:29.000Z | 2022-01-27T04:06:32.000Z | deps/credo/lib/credo/config_file.ex | BandanaPandey/nary_tree | fb1eeb69e38e43c9f9ffb54297cef52dff5c928d | [
"MIT"
] | 1 | 2020-05-26T04:16:57.000Z | 2020-05-26T04:16:57.000Z | deps/credo/lib/credo/config_file.ex | BandanaPandey/nary_tree | fb1eeb69e38e43c9f9ffb54297cef52dff5c928d | [
"MIT"
] | 3 | 2020-05-21T04:32:08.000Z | 2021-07-28T05:14:01.000Z | defmodule Credo.ConfigFile do
@doc """
`ConfigFile` structs represent all loaded and merged config files in a run.
"""
defstruct files: nil,
color: true,
checks: nil,
requires: [],
strict: false,
check_for_updates: true # checks if there is a new version of Credo
@config_filename ".credo.exs"
@default_config_name "default"
@default_config_file File.read!(@config_filename)
@default_glob "**/*.{ex,exs}"
@default_files_included [@default_glob]
@default_files_excluded []
@doc """
Returns Execution struct representing a consolidated Execution for all `.credo.exs`
files in `relevant_directories/1` merged into the default configuration.
- `config_name`: name of the configuration to load
- `safe`: if +true+, the config files are loaded using static analysis rather
than `Code.eval_string/1`
"""
def read_or_default(dir, config_name \\ nil, safe \\ false) do
dir
|> relevant_config_files
|> Enum.filter(&File.exists?/1)
|> Enum.map(&File.read!/1)
|> List.insert_at(0, @default_config_file)
|> Enum.map(&from_exs(dir, config_name || @default_config_name, &1, safe))
|> merge
|> add_given_directory_to_files(dir)
end
defp relevant_config_files(dir) do
dir
|> relevant_directories
|> add_config_files
end
@doc """
Returns all parent directories of the given `dir` as well as each `./config`
sub-directory.
"""
def relevant_directories(dir) do
dir
|> Path.expand
|> Path.split
|> Enum.reverse
|> get_dir_paths
|> add_config_dirs
end
defp get_dir_paths(dirs), do: do_get_dir_paths(dirs, [])
defp do_get_dir_paths(dirs, acc) when length(dirs) < 2, do: acc
defp do_get_dir_paths([dir | tail], acc) do
expanded_path =
tail
|> Enum.reverse
|> Path.join
|> Path.join(dir)
do_get_dir_paths(tail, [expanded_path | acc])
end
defp add_config_dirs(paths) do
Enum.flat_map(paths, fn(path) -> [path, Path.join(path, "config")] end)
end
defp add_config_files(paths) do
for path <- paths, do: Path.join(path, @config_filename)
end
defp from_exs(dir, config_name, exs_string, safe) do
exs_string
|> Credo.ExsLoader.parse(safe)
|> from_data(dir, config_name)
end
defp from_data(data, dir, config_name) do
data =
data[:configs]
|> List.wrap
|> Enum.find(&(&1[:name] == config_name))
%__MODULE__{
check_for_updates: data[:check_for_updates] || false,
requires: data[:requires] || [],
files: files_from_data(data, dir),
checks: checks_from_data(data),
strict: data[:strict] || false,
color: data[:color] || false
}
end
defp files_from_data(data, dir) do
files = data[:files] || %{}
included_files = files[:included] || dir
included_dir =
included_files
|> List.wrap
|> Enum.map(&join_default_files_if_directory/1)
%{
included: included_dir,
excluded: files[:excluded] || @default_files_excluded,
}
end
defp checks_from_data(data) do
case data[:checks] do
checks when is_list(checks) ->
checks
_ ->
[]
end
end
@doc """
Merges the given structs from left to right, meaning that later entries
overwrites earlier ones.
merge(base, other)
Any options in `other` will overwrite those in `base`.
The `files:` field is merged, meaning that you can define `included` and/or
`excluded` and only override the given one.
The `checks:` field is merged.
"""
def merge(list) when is_list(list) do
base = List.first(list)
tail = List.delete_at(list, 0)
merge(tail, base)
end
def merge([], config), do: config
def merge([other|tail], base) do
new_base = merge(base, other)
merge(tail, new_base)
end
def merge(base, other) do
%__MODULE__{
check_for_updates: other.check_for_updates,
requires: base.requires ++ other.requires,
files: merge_files(base, other),
checks: merge_checks(base, other),
strict: other.strict,
color: other.color,
}
end
def merge_checks(%__MODULE__{checks: checks_base}, %__MODULE__{checks: checks_other}) do
base = normalize_check_tuples(checks_base)
other = normalize_check_tuples(checks_other)
Keyword.merge(base, other)
end
def merge_files(%__MODULE__{files: files_base}, %__MODULE__{files: files_other}) do
%{
included: files_other[:included] || files_base[:included],
excluded: files_other[:excluded] || files_base[:excluded],
}
end
defp normalize_check_tuples(nil), do: []
defp normalize_check_tuples(list) when is_list(list) do
Enum.map(list, &normalize_check_tuple/1)
end
defp normalize_check_tuple({name}), do: {name, []}
defp normalize_check_tuple(tuple), do: tuple
defp join_default_files_if_directory(dir) do
if File.dir?(dir) do
Path.join(dir, @default_files_included)
else
dir
end
end
defp add_given_directory_to_files(%__MODULE__{files: files} = config, dir) do
files = %{
included:
files[:included]
|> Enum.map(&add_directory_to_file(&1, dir))
|> Enum.uniq,
excluded:
files[:excluded]
|> Enum.map(&add_directory_to_file(&1, dir))
|> Enum.uniq
}
%__MODULE__{config | files: files}
end
defp add_directory_to_file(file_or_glob, dir) when is_binary(file_or_glob) do
if File.dir?(dir) do
if dir == "." || file_or_glob =~ ~r/^\// do
file_or_glob
else
Path.join(dir, file_or_glob)
end
else
dir
end
end
defp add_directory_to_file(regex, _), do: regex
end
| 26.768519 | 90 | 0.643549 |
9e554721073c2a955bed1a71a686b23f25480f96 | 11,937 | ex | Elixir | clients/service_consumer_management/lib/google_api/service_consumer_management/v1/model/service.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/service_consumer_management/lib/google_api/service_consumer_management/v1/model/service.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/service_consumer_management/lib/google_api/service_consumer_management/v1/model/service.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.ServiceConsumerManagement.V1.Model.Service do
@moduledoc """
`Service` is the root object of Google service configuration schema. It
describes basic information about a service, such as the name and the
title, and delegates other aspects to sub-sections. Each sub-section is
either a proto message or a repeated proto message that configures a
specific aspect, such as auth. See each proto message definition for details.
Example:
type: google.api.Service
config_version: 3
name: calendar.googleapis.com
title: Google Calendar API
apis:
- name: google.calendar.v3.Calendar
authentication:
providers:
- id: google_calendar_auth
jwks_uri: https://www.googleapis.com/oauth2/v1/certs
issuer: https://securetoken.google.com
rules:
- selector: "*"
requirements:
provider_id: google_calendar_auth
## Attributes
* `apis` (*type:* `list(GoogleApi.ServiceConsumerManagement.V1.Model.Api.t)`, *default:* `nil`) - A list of API interfaces exported by this service. Only the `name` field
of the google.protobuf.Api needs to be provided by the configuration
author, as the remaining fields will be derived from the IDL during the
normalization process. It is an error to specify an API interface here
which cannot be resolved against the associated IDL files.
* `authentication` (*type:* `GoogleApi.ServiceConsumerManagement.V1.Model.Authentication.t`, *default:* `nil`) - Auth configuration.
* `backend` (*type:* `GoogleApi.ServiceConsumerManagement.V1.Model.Backend.t`, *default:* `nil`) - API backend configuration.
* `billing` (*type:* `GoogleApi.ServiceConsumerManagement.V1.Model.Billing.t`, *default:* `nil`) - Billing configuration.
* `configVersion` (*type:* `integer()`, *default:* `nil`) - The semantic version of the service configuration. The config version
affects the interpretation of the service configuration. For example,
certain features are enabled by default for certain config versions.
The latest config version is `3`.
* `context` (*type:* `GoogleApi.ServiceConsumerManagement.V1.Model.Context.t`, *default:* `nil`) - Context configuration.
* `control` (*type:* `GoogleApi.ServiceConsumerManagement.V1.Model.Control.t`, *default:* `nil`) - Configuration for the service control plane.
* `customError` (*type:* `GoogleApi.ServiceConsumerManagement.V1.Model.CustomError.t`, *default:* `nil`) - Custom error configuration.
* `documentation` (*type:* `GoogleApi.ServiceConsumerManagement.V1.Model.Documentation.t`, *default:* `nil`) - Additional API documentation.
* `endpoints` (*type:* `list(GoogleApi.ServiceConsumerManagement.V1.Model.Endpoint.t)`, *default:* `nil`) - Configuration for network endpoints. If this is empty, then an endpoint
with the same name as the service is automatically generated to service all
defined APIs.
* `enums` (*type:* `list(GoogleApi.ServiceConsumerManagement.V1.Model.Enum.t)`, *default:* `nil`) - A list of all enum types included in this API service. Enums
referenced directly or indirectly by the `apis` are automatically
included. Enums which are not referenced but shall be included
should be listed here by name. Example:
enums:
- name: google.someapi.v1.SomeEnum
* `http` (*type:* `GoogleApi.ServiceConsumerManagement.V1.Model.Http.t`, *default:* `nil`) - HTTP configuration.
* `id` (*type:* `String.t`, *default:* `nil`) - A unique ID for a specific instance of this message, typically assigned
by the client for tracking purpose. Must be no longer than 63 characters
and only lower case letters, digits, '.', '_' and '-' are allowed. If
empty, the server may choose to generate one instead.
* `logging` (*type:* `GoogleApi.ServiceConsumerManagement.V1.Model.Logging.t`, *default:* `nil`) - Logging configuration.
* `logs` (*type:* `list(GoogleApi.ServiceConsumerManagement.V1.Model.LogDescriptor.t)`, *default:* `nil`) - Defines the logs used by this service.
* `metrics` (*type:* `list(GoogleApi.ServiceConsumerManagement.V1.Model.MetricDescriptor.t)`, *default:* `nil`) - Defines the metrics used by this service.
* `monitoredResources` (*type:* `list(GoogleApi.ServiceConsumerManagement.V1.Model.MonitoredResourceDescriptor.t)`, *default:* `nil`) - Defines the monitored resources used by this service. This is required
by the Service.monitoring and Service.logging configurations.
* `monitoring` (*type:* `GoogleApi.ServiceConsumerManagement.V1.Model.Monitoring.t`, *default:* `nil`) - Monitoring configuration.
* `name` (*type:* `String.t`, *default:* `nil`) - The service name, which is a DNS-like logical identifier for the
service, such as `calendar.googleapis.com`. The service name
typically goes through DNS verification to make sure the owner
of the service also owns the DNS name.
* `producerProjectId` (*type:* `String.t`, *default:* `nil`) - The Google project that owns this service.
* `quota` (*type:* `GoogleApi.ServiceConsumerManagement.V1.Model.Quota.t`, *default:* `nil`) - Quota configuration.
* `sourceInfo` (*type:* `GoogleApi.ServiceConsumerManagement.V1.Model.SourceInfo.t`, *default:* `nil`) - Output only. The source information for this configuration if available.
* `systemParameters` (*type:* `GoogleApi.ServiceConsumerManagement.V1.Model.SystemParameters.t`, *default:* `nil`) - System parameter configuration.
* `systemTypes` (*type:* `list(GoogleApi.ServiceConsumerManagement.V1.Model.Type.t)`, *default:* `nil`) - A list of all proto message types included in this API service.
It serves similar purpose as [google.api.Service.types], except that
these types are not needed by user-defined APIs. Therefore, they will not
show up in the generated discovery doc. This field should only be used
to define system APIs in ESF.
* `title` (*type:* `String.t`, *default:* `nil`) - The product title for this service.
* `types` (*type:* `list(GoogleApi.ServiceConsumerManagement.V1.Model.Type.t)`, *default:* `nil`) - A list of all proto message types included in this API service.
Types referenced directly or indirectly by the `apis` are
automatically included. Messages which are not referenced but
shall be included, such as types used by the `google.protobuf.Any` type,
should be listed here by name. Example:
types:
- name: google.protobuf.Int32
* `usage` (*type:* `GoogleApi.ServiceConsumerManagement.V1.Model.Usage.t`, *default:* `nil`) - Configuration controlling usage of this service.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:apis => list(GoogleApi.ServiceConsumerManagement.V1.Model.Api.t()),
:authentication => GoogleApi.ServiceConsumerManagement.V1.Model.Authentication.t(),
:backend => GoogleApi.ServiceConsumerManagement.V1.Model.Backend.t(),
:billing => GoogleApi.ServiceConsumerManagement.V1.Model.Billing.t(),
:configVersion => integer(),
:context => GoogleApi.ServiceConsumerManagement.V1.Model.Context.t(),
:control => GoogleApi.ServiceConsumerManagement.V1.Model.Control.t(),
:customError => GoogleApi.ServiceConsumerManagement.V1.Model.CustomError.t(),
:documentation => GoogleApi.ServiceConsumerManagement.V1.Model.Documentation.t(),
:endpoints => list(GoogleApi.ServiceConsumerManagement.V1.Model.Endpoint.t()),
:enums => list(GoogleApi.ServiceConsumerManagement.V1.Model.Enum.t()),
:http => GoogleApi.ServiceConsumerManagement.V1.Model.Http.t(),
:id => String.t(),
:logging => GoogleApi.ServiceConsumerManagement.V1.Model.Logging.t(),
:logs => list(GoogleApi.ServiceConsumerManagement.V1.Model.LogDescriptor.t()),
:metrics => list(GoogleApi.ServiceConsumerManagement.V1.Model.MetricDescriptor.t()),
:monitoredResources =>
list(GoogleApi.ServiceConsumerManagement.V1.Model.MonitoredResourceDescriptor.t()),
:monitoring => GoogleApi.ServiceConsumerManagement.V1.Model.Monitoring.t(),
:name => String.t(),
:producerProjectId => String.t(),
:quota => GoogleApi.ServiceConsumerManagement.V1.Model.Quota.t(),
:sourceInfo => GoogleApi.ServiceConsumerManagement.V1.Model.SourceInfo.t(),
:systemParameters => GoogleApi.ServiceConsumerManagement.V1.Model.SystemParameters.t(),
:systemTypes => list(GoogleApi.ServiceConsumerManagement.V1.Model.Type.t()),
:title => String.t(),
:types => list(GoogleApi.ServiceConsumerManagement.V1.Model.Type.t()),
:usage => GoogleApi.ServiceConsumerManagement.V1.Model.Usage.t()
}
field(:apis, as: GoogleApi.ServiceConsumerManagement.V1.Model.Api, type: :list)
field(:authentication, as: GoogleApi.ServiceConsumerManagement.V1.Model.Authentication)
field(:backend, as: GoogleApi.ServiceConsumerManagement.V1.Model.Backend)
field(:billing, as: GoogleApi.ServiceConsumerManagement.V1.Model.Billing)
field(:configVersion)
field(:context, as: GoogleApi.ServiceConsumerManagement.V1.Model.Context)
field(:control, as: GoogleApi.ServiceConsumerManagement.V1.Model.Control)
field(:customError, as: GoogleApi.ServiceConsumerManagement.V1.Model.CustomError)
field(:documentation, as: GoogleApi.ServiceConsumerManagement.V1.Model.Documentation)
field(:endpoints, as: GoogleApi.ServiceConsumerManagement.V1.Model.Endpoint, type: :list)
field(:enums, as: GoogleApi.ServiceConsumerManagement.V1.Model.Enum, type: :list)
field(:http, as: GoogleApi.ServiceConsumerManagement.V1.Model.Http)
field(:id)
field(:logging, as: GoogleApi.ServiceConsumerManagement.V1.Model.Logging)
field(:logs, as: GoogleApi.ServiceConsumerManagement.V1.Model.LogDescriptor, type: :list)
field(:metrics, as: GoogleApi.ServiceConsumerManagement.V1.Model.MetricDescriptor, type: :list)
field(:monitoredResources,
as: GoogleApi.ServiceConsumerManagement.V1.Model.MonitoredResourceDescriptor,
type: :list
)
field(:monitoring, as: GoogleApi.ServiceConsumerManagement.V1.Model.Monitoring)
field(:name)
field(:producerProjectId)
field(:quota, as: GoogleApi.ServiceConsumerManagement.V1.Model.Quota)
field(:sourceInfo, as: GoogleApi.ServiceConsumerManagement.V1.Model.SourceInfo)
field(:systemParameters, as: GoogleApi.ServiceConsumerManagement.V1.Model.SystemParameters)
field(:systemTypes, as: GoogleApi.ServiceConsumerManagement.V1.Model.Type, type: :list)
field(:title)
field(:types, as: GoogleApi.ServiceConsumerManagement.V1.Model.Type, type: :list)
field(:usage, as: GoogleApi.ServiceConsumerManagement.V1.Model.Usage)
end
defimpl Poison.Decoder, for: GoogleApi.ServiceConsumerManagement.V1.Model.Service do
def decode(value, options) do
GoogleApi.ServiceConsumerManagement.V1.Model.Service.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.ServiceConsumerManagement.V1.Model.Service do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 63.834225 | 210 | 0.727067 |
9e5576d9790e3c6d7c40ae08f2c422a9243ea760 | 1,858 | ex | Elixir | lib/graphql/lang/ast/nodes.ex | marvinhagemeister/graphql | 43bccc041438f05d14c8c6f40f193c3d7957ca9d | [
"BSD-3-Clause"
] | 719 | 2016-03-18T03:10:46.000Z | 2022-02-02T10:07:29.000Z | lib/graphql/lang/ast/nodes.ex | marvinhagemeister/graphql | 43bccc041438f05d14c8c6f40f193c3d7957ca9d | [
"BSD-3-Clause"
] | 51 | 2015-08-30T03:15:17.000Z | 2016-03-02T07:13:26.000Z | lib/graphql/lang/ast/nodes.ex | marvinhagemeister/graphql | 43bccc041438f05d14c8c6f40f193c3d7957ca9d | [
"BSD-3-Clause"
] | 34 | 2016-03-30T12:56:11.000Z | 2021-08-30T09:21:54.000Z |
defmodule GraphQL.Lang.AST.Nodes do
@kinds %{
Name: [],
Document: [:definitions],
OperationDefinition: [:name, :variableDefinitions, :directives, :selectionSet],
VariableDefinition: [:variable, :type, :defaultValue],
Variable: [:name],
SelectionSet: [:selections],
Field: [:alias, :name, :arguments, :directives, :selectionSet],
Argument: [:name, :value],
FragmentSpread: [:name, :directives],
InlineFragment: [:typeCondition, :directives, :selectionSet],
FragmentDefinition: [:name, :typeCondition, :directives, :selectionSet],
IntValue: [],
FloatValue: [],
StringValue: [],
BooleanValue: [],
EnumValue: [],
ListValue: [:values],
ObjectValue: [:fields],
ObjectField: [:name, :value],
Directive: [:name, :arguments],
NamedType: [:name],
ListType: [:type],
NonNullType: [:type],
ObjectTypeDefinition: [:name, :interfaces, :fields],
FieldDefinition: [:name, :arguments, :type],
InputValueDefinition: [:name, :type, :defaultValue],
InterfaceTypeDefinition: [:name, :fields],
UnionTypeDefinition: [:name, :types],
ScalarTypeDefinition: [:name],
EnumTypeDefinition: [:name, :values],
EnumValueDefinition: [:name],
InputObjectTypeDefinition: [:name, :fields],
TypeExtensionDefinition: [:definition]
}
def kinds, do: @kinds
@type operation_node :: %{
kind: :OperationDefinition,
operation: atom
}
end
| 39.531915 | 90 | 0.50915 |
9e559351b75faa664890b6099225828c163f8d97 | 8,698 | ex | Elixir | lib/aws/generated/iot1_click_devices.ex | qyon-brazil/aws-elixir | f7f21bebffc6776f95ffe9ef563cf368773438af | [
"Apache-2.0"
] | null | null | null | lib/aws/generated/iot1_click_devices.ex | qyon-brazil/aws-elixir | f7f21bebffc6776f95ffe9ef563cf368773438af | [
"Apache-2.0"
] | null | null | null | lib/aws/generated/iot1_click_devices.ex | qyon-brazil/aws-elixir | f7f21bebffc6776f95ffe9ef563cf368773438af | [
"Apache-2.0"
] | 1 | 2020-10-28T08:56:54.000Z | 2020-10-28T08:56:54.000Z | # WARNING: DO NOT EDIT, AUTO-GENERATED CODE!
# See https://github.com/aws-beam/aws-codegen for more details.
defmodule AWS.IoT1ClickDevices do
@moduledoc """
Describes all of the AWS IoT 1-Click device-related API operations for the
service.
Also provides sample requests, responses, and errors for the supported web
services
protocols.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2018-05-14",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "devices.iot1click",
global?: false,
protocol: "rest-json",
service_id: "IoT 1Click Devices Service",
signature_version: "v4",
signing_name: "iot1click",
target_prefix: nil
}
end
@doc """
Adds device(s) to your account (i.e., claim one or more devices) if and only if
you
received a claim code with the device(s).
"""
def claim_devices_by_claim_code(%Client{} = client, claim_code, input, options \\ []) do
url_path = "/claims/#{URI.encode(claim_code)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Given a device ID, returns a DescribeDeviceResponse object describing the
details of the device.
"""
def describe_device(%Client{} = client, device_id, options \\ []) do
url_path = "/devices/#{URI.encode(device_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Given a device ID, finalizes the claim request for the associated device.
Claiming a device consists of initiating a claim, then publishing a device
event,
and finalizing the claim. For a device of type button, a device event can
be published by simply clicking the device.
"""
def finalize_device_claim(%Client{} = client, device_id, input, options \\ []) do
url_path = "/devices/#{URI.encode(device_id)}/finalize-claim"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Given a device ID, returns the invokable methods associated with the device.
"""
def get_device_methods(%Client{} = client, device_id, options \\ []) do
url_path = "/devices/#{URI.encode(device_id)}/methods"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Given a device ID, initiates a claim request for the associated device.
Claiming a device consists of initiating a claim, then publishing a device
event,
and finalizing the claim. For a device of type button, a device event can
be published by simply clicking the device.
"""
def initiate_device_claim(%Client{} = client, device_id, input, options \\ []) do
url_path = "/devices/#{URI.encode(device_id)}/initiate-claim"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Given a device ID, issues a request to invoke a named device method (with
possible
parameters).
See the "Example POST" code snippet below.
"""
def invoke_device_method(%Client{} = client, device_id, input, options \\ []) do
url_path = "/devices/#{URI.encode(device_id)}/methods"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Using a device ID, returns a DeviceEventsResponse object containing an
array of events for the device.
"""
def list_device_events(
%Client{} = client,
device_id,
from_time_stamp,
max_results \\ nil,
next_token \\ nil,
to_time_stamp,
options \\ []
) do
url_path = "/devices/#{URI.encode(device_id)}/events"
headers = []
query_params = []
query_params =
if !is_nil(to_time_stamp) do
[{"toTimeStamp", to_time_stamp} | query_params]
else
query_params
end
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(from_time_stamp) do
[{"fromTimeStamp", from_time_stamp} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Lists the 1-Click compatible devices associated with your AWS account.
"""
def list_devices(
%Client{} = client,
device_type \\ nil,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/devices"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
query_params =
if !is_nil(device_type) do
[{"deviceType", device_type} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Lists the tags associated with the specified resource ARN.
"""
def list_tags_for_resource(%Client{} = client, resource_arn, options \\ []) do
url_path = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Adds or updates the tags associated with the resource ARN.
See [AWS IoT 1-Click Service Limits](https://docs.aws.amazon.com/iot-1-click/latest/developerguide/1click-appendix.html#1click-limits)
for the maximum number of tags allowed per
resource.
"""
def tag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Disassociates a device from your AWS account using its device ID.
"""
def unclaim_device(%Client{} = client, device_id, input, options \\ []) do
url_path = "/devices/#{URI.encode(device_id)}/unclaim"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Using tag keys, deletes the tags (key/value pairs) associated with the specified
resource ARN.
"""
def untag_resource(%Client{} = client, resource_arn, input, options \\ []) do
url_path = "/tags/#{URI.encode(resource_arn)}"
headers = []
{query_params, input} =
[
{"TagKeys", "tagKeys"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
204
)
end
@doc """
Using a Boolean value (true or false), this operation
enables or disables the device given a device ID.
"""
def update_device_state(%Client{} = client, device_id, input, options \\ []) do
url_path = "/devices/#{URI.encode(device_id)}/state"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
200
)
end
end | 21.90932 | 136 | 0.598528 |
9e55bcc6d98acf8a4537e73562841a13a3337d6f | 9,435 | exs | Elixir | apps/admin_api/test/admin_api/v1/controllers/admin_auth/admin_auth_controller_test.exs | amadeobrands/ewallet | 505b7822721940a7b892a9b35c225e80cc8ac0b4 | [
"Apache-2.0"
] | 1 | 2018-12-07T06:21:21.000Z | 2018-12-07T06:21:21.000Z | apps/admin_api/test/admin_api/v1/controllers/admin_auth/admin_auth_controller_test.exs | amadeobrands/ewallet | 505b7822721940a7b892a9b35c225e80cc8ac0b4 | [
"Apache-2.0"
] | null | null | null | apps/admin_api/test/admin_api/v1/controllers/admin_auth/admin_auth_controller_test.exs | amadeobrands/ewallet | 505b7822721940a7b892a9b35c225e80cc8ac0b4 | [
"Apache-2.0"
] | null | null | null | defmodule AdminAPI.V1.AdminAuth.AdminAuthControllerTest do
use AdminAPI.ConnCase, async: true
alias EWallet.Web.V1.{AccountSerializer, UserSerializer}
alias EWalletConfig.System
alias EWalletDB.{Account, AuthToken, Membership, Repo, Role, User}
describe "/admin.login" do
test "responds with a new auth token if the given email and password are valid" do
response =
unauthenticated_request("/admin.login", %{email: @user_email, password: @password})
auth_token = AuthToken |> get_last_inserted() |> Repo.preload([:user, :account])
expected = %{
"version" => @expected_version,
"success" => true,
"data" => %{
"object" => "authentication_token",
"authentication_token" => auth_token.token,
"user_id" => auth_token.user.id,
"user" => auth_token.user |> UserSerializer.serialize() |> stringify_keys(),
"account_id" => auth_token.account.id,
"account" => auth_token.account |> AccountSerializer.serialize() |> stringify_keys(),
"master_admin" => true,
"role" => "admin"
}
}
assert response == expected
end
test "responds with a new auth token if credentials are valid but user is not master_admin" do
user = get_test_admin() |> Repo.preload([:accounts])
{:ok, _} = Membership.unassign(user, Enum.at(user.accounts, 0))
account = insert(:account)
role = Role.get_by(name: "admin")
_membership = insert(:membership, %{user: user, role: role, account: account})
response =
unauthenticated_request("/admin.login", %{email: @user_email, password: @password})
auth_token = AuthToken |> get_last_inserted() |> Repo.preload([:user, :account])
expected = %{
"version" => @expected_version,
"success" => true,
"data" => %{
"object" => "authentication_token",
"authentication_token" => auth_token.token,
"user_id" => auth_token.user.id,
"user" => auth_token.user |> UserSerializer.serialize() |> stringify_keys(),
"account_id" => auth_token.account.id,
"account" => auth_token.account |> AccountSerializer.serialize() |> stringify_keys(),
"master_admin" => false,
"role" => "admin"
}
}
assert response == expected
end
test "responds with a new auth token if credentials are valid and user is a viewer" do
user = get_test_admin() |> Repo.preload([:accounts])
{:ok, _} = Membership.unassign(user, Enum.at(user.accounts, 0))
account = insert(:account)
role = insert(:role, %{name: "viewer"})
_membership = insert(:membership, %{user: user, role: role, account: account})
response =
unauthenticated_request("/admin.login", %{email: @user_email, password: @password})
auth_token = AuthToken |> get_last_inserted() |> Repo.preload([:user, :account])
expected = %{
"version" => @expected_version,
"success" => true,
"data" => %{
"object" => "authentication_token",
"authentication_token" => auth_token.token,
"user_id" => auth_token.user.id,
"user" => auth_token.user |> UserSerializer.serialize() |> stringify_keys(),
"account_id" => auth_token.account.id,
"account" => auth_token.account |> AccountSerializer.serialize() |> stringify_keys(),
"master_admin" => false,
"role" => "viewer"
}
}
assert response == expected
end
test "returns an error if the credentials are valid but the email invite is not yet accepted" do
{:ok, _user} =
[email: @user_email]
|> User.get_by()
|> User.update(%{
invite_uuid: insert(:invite).uuid,
originator: %System{}
})
response =
unauthenticated_request("/admin.login", %{email: @user_email, password: @password})
assert response["version"] == @expected_version
assert response["success"] == false
assert response["data"]["object"] == "error"
assert response["data"]["code"] == "user:invite_pending"
assert response["data"]["description"] == "The user has not accepted the invite."
end
test "returns an error if the given email does not exist" do
response =
unauthenticated_request("/admin.login", %{
email: "[email protected]",
password: @password
})
expected = %{
"version" => @expected_version,
"success" => false,
"data" => %{
"object" => "error",
"code" => "user:invalid_login_credentials",
"description" => "There is no user corresponding to the provided login credentials.",
"messages" => nil
}
}
assert response == expected
end
test "returns an error if the given password is incorrect" do
response =
unauthenticated_request("/admin.login", %{email: @user_email, password: "wrong_password"})
expected = %{
"version" => @expected_version,
"success" => false,
"data" => %{
"object" => "error",
"code" => "user:invalid_login_credentials",
"description" => "There is no user corresponding to the provided login credentials.",
"messages" => nil
}
}
assert response == expected
end
test "returns :invalid_parameter if email is blank" do
response = unauthenticated_request("/admin.login", %{email: "", password: @password})
refute response["success"]
assert response["data"]["code"] == "user:invalid_login_credentials"
end
test "returns :invalid_parameter if password is blank" do
response = unauthenticated_request("/admin.login", %{email: @user_email, password: ""})
refute response["success"]
assert response["data"]["code"] == "user:invalid_login_credentials"
end
test "returns :invalid_parameter if email is missing" do
response = unauthenticated_request("/admin.login", %{email: nil, password: @password})
refute response["success"]
assert response["data"]["code"] == "client:invalid_parameter"
end
test "returns :invalid_parameter if password is missing" do
response = unauthenticated_request("/admin.login", %{email: @user_email, password: nil})
refute response["success"]
assert response["data"]["code"] == "client:invalid_parameter"
end
test "returns :invalid_parameter if both email and password are missing" do
response = unauthenticated_request("/admin.login", %{foo: "bar"})
refute response["success"]
refute response["success"]
assert response["data"]["code"] == "client:invalid_parameter"
end
end
describe "/auth_token.switch_account" do
test "switches the account" do
user = get_test_admin()
account = insert(:account, parent: Account.get_master_account())
# User belongs to the master account and has access to the sub account
# just created
response =
admin_user_request("/auth_token.switch_account", %{
"account_id" => account.id
})
assert response["success"]
assert response["data"]["user"]["id"] == user.id
assert response["data"]["account"]["id"] == account.id
end
test "returns a permission error when trying to switch to an invalid account" do
user = get_test_admin() |> Repo.preload([:accounts])
{:ok, _} = Membership.unassign(user, Enum.at(user.accounts, 0))
account = insert(:account)
response =
admin_user_request("/auth_token.switch_account", %{
"account_id" => account.id
})
refute response["success"]
assert response["data"]["code"] == "unauthorized"
end
test "returns :unauthorized when the account does not exist" do
response =
admin_user_request("/auth_token.switch_account", %{
"account_id" => "123"
})
refute response["success"]
assert response["data"]["code"] == "unauthorized"
end
test "returns :invalid_parameter when account_id is not sent" do
response =
admin_user_request("/auth_token.switch_account", %{
"fake" => "123"
})
refute response["success"]
assert response["data"]["code"] == "client:invalid_parameter"
end
test "returns :auth_token_not_found if user credentials are invalid" do
response =
admin_user_request(
"/auth_token.switch_account",
%{
"account_id" => "123"
},
auth_token: "bad_auth_token"
)
refute response["success"]
assert response["data"]["code"] == "auth_token:not_found"
end
end
describe "/me.logout" do
test "responds success with empty response when successful" do
response = admin_user_request("/me.logout")
expected = %{
"version" => @expected_version,
"success" => true,
"data" => %{}
}
assert response == expected
end
test "prevents following calls from using the same credentials" do
response1 = admin_user_request("/me.logout")
assert response1["success"]
response2 = admin_user_request("/me.logout")
refute response2["success"]
assert response2["data"]["code"] == "user:auth_token_expired"
end
end
end
| 34.815498 | 100 | 0.613567 |
9e55c799df1c6b1bc35f31d69dc0cbdfa52d082c | 2,736 | ex | Elixir | clients/retail/lib/google_api/retail/v2/model/google_cloud_retail_v2alpha_import_user_events_response.ex | renovate-bot/elixir-google-api | 1da34cd39b670c99f067011e05ab90af93fef1f6 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/retail/lib/google_api/retail/v2/model/google_cloud_retail_v2alpha_import_user_events_response.ex | swansoffiee/elixir-google-api | 9ea6d39f273fb430634788c258b3189d3613dde0 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/retail/lib/google_api/retail/v2/model/google_cloud_retail_v2alpha_import_user_events_response.ex | dazuma/elixir-google-api | 6a9897168008efe07a6081d2326735fe332e522c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Retail.V2.Model.GoogleCloudRetailV2alphaImportUserEventsResponse do
@moduledoc """
Response of the ImportUserEventsRequest. If the long running operation was successful, then this message is returned by the google.longrunning.Operations.response field if the operation was successful.
## Attributes
* `errorSamples` (*type:* `list(GoogleApi.Retail.V2.Model.GoogleRpcStatus.t)`, *default:* `nil`) - A sample of errors encountered while processing the request.
* `errorsConfig` (*type:* `GoogleApi.Retail.V2.Model.GoogleCloudRetailV2alphaImportErrorsConfig.t`, *default:* `nil`) - Echoes the destination for the complete errors if this field was set in the request.
* `importSummary` (*type:* `GoogleApi.Retail.V2.Model.GoogleCloudRetailV2alphaUserEventImportSummary.t`, *default:* `nil`) - Aggregated statistics of user event import status.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:errorSamples => list(GoogleApi.Retail.V2.Model.GoogleRpcStatus.t()) | nil,
:errorsConfig =>
GoogleApi.Retail.V2.Model.GoogleCloudRetailV2alphaImportErrorsConfig.t() | nil,
:importSummary =>
GoogleApi.Retail.V2.Model.GoogleCloudRetailV2alphaUserEventImportSummary.t() | nil
}
field(:errorSamples, as: GoogleApi.Retail.V2.Model.GoogleRpcStatus, type: :list)
field(:errorsConfig, as: GoogleApi.Retail.V2.Model.GoogleCloudRetailV2alphaImportErrorsConfig)
field(:importSummary,
as: GoogleApi.Retail.V2.Model.GoogleCloudRetailV2alphaUserEventImportSummary
)
end
defimpl Poison.Decoder,
for: GoogleApi.Retail.V2.Model.GoogleCloudRetailV2alphaImportUserEventsResponse do
def decode(value, options) do
GoogleApi.Retail.V2.Model.GoogleCloudRetailV2alphaImportUserEventsResponse.decode(
value,
options
)
end
end
defimpl Poison.Encoder,
for: GoogleApi.Retail.V2.Model.GoogleCloudRetailV2alphaImportUserEventsResponse do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 43.428571 | 208 | 0.761696 |
9e55c7dd6bb73a23c1eb0ac3c7274878efa4645a | 7,265 | ex | Elixir | farmbot_core/lib/farmbot_celery_script/compilers/axis_control_compiler.ex | gdwb/farmbot_os | 0ef2697c580c9fbf37a22daa063a64addfcb778d | [
"MIT"
] | 1 | 2021-08-23T13:36:14.000Z | 2021-08-23T13:36:14.000Z | farmbot_core/lib/farmbot_celery_script/compilers/axis_control_compiler.ex | gdwb/farmbot_os | 0ef2697c580c9fbf37a22daa063a64addfcb778d | [
"MIT"
] | null | null | null | farmbot_core/lib/farmbot_celery_script/compilers/axis_control_compiler.ex | gdwb/farmbot_os | 0ef2697c580c9fbf37a22daa063a64addfcb778d | [
"MIT"
] | null | null | null | defmodule FarmbotCeleryScript.Compiler.AxisControl do
alias FarmbotCeleryScript.Compiler
# Compiles move_absolute
def move_absolute(%{args: %{location: location,offset: offset,speed: speed}}, cs_scope) do
[locx , locy , locz] = cs_to_xyz(location, cs_scope)
[offx, offy, offz] = cs_to_xyz(offset, cs_scope)
quote location: :keep do
# Subtract the location from offset.
# Note: list syntax here for readability.
[x, y, z] = [
unquote(locx) + unquote(offx),
unquote(locy) + unquote(offy),
unquote(locz) + unquote(offz)
]
x_str = FarmbotCeleryScript.FormatUtil.format_float(x)
y_str = FarmbotCeleryScript.FormatUtil.format_float(y)
z_str = FarmbotCeleryScript.FormatUtil.format_float(z)
FarmbotCeleryScript.SysCalls.log(
"Moving to (#{x_str}, #{y_str}, #{z_str})",
true
)
FarmbotCeleryScript.SysCalls.move_absolute(
x,
y,
z,
unquote(Compiler.celery_to_elixir(speed, cs_scope))
)
end
end
# compiles move_relative into move absolute
def move_relative(%{args: %{x: x, y: y, z: z, speed: speed}}, cs_scope) do
quote location: :keep do
with locx when is_number(locx) <- unquote(Compiler.celery_to_elixir(x, cs_scope)),
locy when is_number(locy) <- unquote(Compiler.celery_to_elixir(y, cs_scope)),
locz when is_number(locz) <- unquote(Compiler.celery_to_elixir(z, cs_scope)),
curx when is_number(curx) <-
FarmbotCeleryScript.SysCalls.get_current_x(),
cury when is_number(cury) <-
FarmbotCeleryScript.SysCalls.get_current_y(),
curz when is_number(curz) <-
FarmbotCeleryScript.SysCalls.get_current_z() do
# Combine them
x = locx + curx
y = locy + cury
z = locz + curz
x_str = FarmbotCeleryScript.FormatUtil.format_float(x)
y_str = FarmbotCeleryScript.FormatUtil.format_float(y)
z_str = FarmbotCeleryScript.FormatUtil.format_float(z)
FarmbotCeleryScript.SysCalls.log(
"Moving relative to (#{x_str}, #{y_str}, #{z_str})",
true
)
FarmbotCeleryScript.SysCalls.move_absolute(
x,
y,
z,
unquote(Compiler.celery_to_elixir(speed, cs_scope))
)
end
end
end
# Expands find_home(all) into three find_home/1 calls
def find_home(%{args: %{axis: "all"}}, _cs_scope) do
quote location: :keep do
FarmbotCeleryScript.SysCalls.log("Finding home on all axes", true)
with :ok <- FarmbotCeleryScript.SysCalls.find_home("z"),
:ok <- FarmbotCeleryScript.SysCalls.find_home("y") do
FarmbotCeleryScript.SysCalls.find_home("x")
end
end
end
# compiles find_home
def find_home(%{args: %{axis: axis}}, cs_scope) do
quote location: :keep do
with axis when axis in ["x", "y", "z"] <-
unquote(Compiler.celery_to_elixir(axis, cs_scope)) do
FarmbotCeleryScript.SysCalls.log(
"Finding home on the #{String.upcase(axis)} axis",
true
)
FarmbotCeleryScript.SysCalls.find_home(axis)
else
{:error, reason} ->
{:error, reason}
end
end
end
# Expands home(all) into three home/1 calls
def home(%{args: %{axis: "all", speed: speed}}, cs_scope) do
quote location: :keep do
FarmbotCeleryScript.SysCalls.log("Going to home on all axes", true)
with speed when is_number(speed) <-
unquote(Compiler.celery_to_elixir(speed, cs_scope)),
:ok <- FarmbotCeleryScript.SysCalls.home("z", speed),
:ok <- FarmbotCeleryScript.SysCalls.home("y", speed) do
FarmbotCeleryScript.SysCalls.home("x", speed)
end
end
end
# compiles home
def home(%{args: %{axis: axis, speed: speed}}, cs_scope) do
quote location: :keep do
with axis when axis in ["x", "y", "z"] <-
unquote(Compiler.celery_to_elixir(axis, cs_scope)),
speed when is_number(speed) <-
unquote(Compiler.celery_to_elixir(speed, cs_scope)) do
FarmbotCeleryScript.SysCalls.log(
"Going to home on the #{String.upcase(axis)} axis",
true
)
FarmbotCeleryScript.SysCalls.home(axis, speed)
else
{:error, reason} ->
{:error, reason}
end
end
end
# Expands zero(all) into three zero/1 calls
def zero(%{args: %{axis: "all"}}, _cs_scope) do
quote location: :keep do
FarmbotCeleryScript.SysCalls.log("Setting home for all axes", true)
with :ok <- FarmbotCeleryScript.SysCalls.zero("z"),
:ok <- FarmbotCeleryScript.SysCalls.zero("y") do
FarmbotCeleryScript.SysCalls.zero("x")
end
end
end
# compiles zero
def zero(%{args: %{axis: axis}}, cs_scope) do
quote location: :keep do
with axis when axis in ["x", "y", "z"] <-
unquote(Compiler.celery_to_elixir(axis, cs_scope)) do
FarmbotCeleryScript.SysCalls.log(
"Setting home for the #{String.upcase(axis)} axis",
true
)
FarmbotCeleryScript.SysCalls.zero(axis)
else
{:error, reason} ->
{:error, reason}
end
end
end
# Expands calibrate(all) into three calibrate/1 calls
def calibrate(%{args: %{axis: "all"}}, _cs_scope) do
quote location: :keep do
FarmbotCeleryScript.SysCalls.log("Finding length of all axes", true)
with :ok <- FarmbotCeleryScript.SysCalls.calibrate("z"),
:ok <- FarmbotCeleryScript.SysCalls.calibrate("y") do
FarmbotCeleryScript.SysCalls.calibrate("x")
else
{:error, reason} -> {:error, reason}
end
end
end
# compiles calibrate
def calibrate(%{args: %{axis: axis}}, cs_scope) do
quote location: :keep do
with axis when axis in ["x", "y", "z"] <-
unquote(Compiler.celery_to_elixir(axis, cs_scope)) do
msg = "Determining length of the #{String.upcase(axis)} axis"
FarmbotCeleryScript.SysCalls.log(msg, true)
FarmbotCeleryScript.SysCalls.calibrate(axis)
else
{:error, reason} -> {:error, reason}
end
end
end
defp cs_to_xyz(%{kind: :identifier} = ast, cs_scope) do
label = ast.args.label
{:ok, variable} = FarmbotCeleryScript.Compiler.Scope.fetch!(cs_scope, label)
# Prevent circular refernces.
# I doubt end users would intentionally do this, so treat
# it like an error.
if variable.kind == :identifier, do: raise "Refusing to perform recursion"
cs_to_xyz(variable, cs_scope)
end
defp cs_to_xyz(%{kind: :coordinate} = ast, _) do
vec_map_to_array(ast.args)
end
defp cs_to_xyz(%{kind: :tool, args: args}, _) do
slot = FarmbotCeleryScript.SysCalls.get_toolslot_for_tool(args.tool_id)
vec_map_to_array(slot)
end
defp cs_to_xyz(%{kind: :point} = ast, _) do
%{ pointer_type: t, pointer_id: id } = ast.args
vec_map_to_array(FarmbotCeleryScript.SysCalls.point(t, id))
end
defp cs_to_xyz(other, _), do: raise "Unexpected location or offset: #{inspect(other)}"
defp vec_map_to_array(xyz), do: [ xyz.x, xyz.y, xyz.z ]
end
| 32.873303 | 92 | 0.625465 |
9e55d31498bbe55ef487c60212d6ad04edfe3616 | 2,286 | ex | Elixir | insights/lib/insights_web/telemetry.ex | MillionIntegrals/elixir-mongodb-driver | 96c4cc3f21c4043323b8a9b33ad3a374760864c6 | [
"Apache-2.0"
] | null | null | null | insights/lib/insights_web/telemetry.ex | MillionIntegrals/elixir-mongodb-driver | 96c4cc3f21c4043323b8a9b33ad3a374760864c6 | [
"Apache-2.0"
] | null | null | null | insights/lib/insights_web/telemetry.ex | MillionIntegrals/elixir-mongodb-driver | 96c4cc3f21c4043323b8a9b33ad3a374760864c6 | [
"Apache-2.0"
] | null | null | null | defmodule InsightsWeb.Telemetry do
use Supervisor
import Telemetry.Metrics
def start_link(arg) do
Supervisor.start_link(__MODULE__, arg, name: __MODULE__)
end
@impl true
def init(_arg) do
children = [
# Telemetry poller will execute the given period measurements
# every 10_000ms. Learn more here: https://hexdocs.pm/telemetry_metrics
{:telemetry_poller, measurements: periodic_measurements(), period: 10_000}
# Add reporters as children of your supervision tree.
# {Telemetry.Metrics.ConsoleReporter, metrics: metrics()}
]
Supervisor.init(children, strategy: :one_for_one)
end
def metrics do
[
# Phoenix Metrics
summary("phoenix.endpoint.stop.duration",
unit: {:native, :millisecond}
),
summary("phoenix.router_dispatch.stop.duration",
tags: [:route],
unit: {:native, :millisecond}
),
# Database Metrics
summary("insights.repo.query.total_time",
unit: {:native, :millisecond},
description: "The sum of the other measurements"
),
summary("insights.repo.query.decode_time",
unit: {:native, :millisecond},
description: "The time spent decoding the data received from the database"
),
summary("insights.repo.query.query_time",
unit: {:native, :millisecond},
description: "The time spent executing the query"
),
summary("insights.repo.query.queue_time",
unit: {:native, :millisecond},
description: "The time spent waiting for a database connection"
),
summary("insights.repo.query.idle_time",
unit: {:native, :millisecond},
description:
"The time the connection spent waiting before being checked out for the query"
),
# VM Metrics
summary("vm.memory.total", unit: {:byte, :kilobyte}),
summary("vm.total_run_queue_lengths.total"),
summary("vm.total_run_queue_lengths.cpu"),
summary("vm.total_run_queue_lengths.io")
]
end
defp periodic_measurements do
[
# A module, function and arguments to be invoked periodically.
# This function must call :telemetry.execute/3 and a metric must be added above.
# {InsightsWeb, :count_users, []}
]
end
end
| 31.75 | 88 | 0.654418 |
9e560928504aab74be617723b1adfbfd372e66f1 | 122 | ex | Elixir | debian/mesa.cron.d.ex | SoftReaper/Mesa-Renoir-deb | 8d1de1f66058d62b41fe55d36522efea2bdf996d | [
"MIT"
] | null | null | null | debian/mesa.cron.d.ex | SoftReaper/Mesa-Renoir-deb | 8d1de1f66058d62b41fe55d36522efea2bdf996d | [
"MIT"
] | null | null | null | debian/mesa.cron.d.ex | SoftReaper/Mesa-Renoir-deb | 8d1de1f66058d62b41fe55d36522efea2bdf996d | [
"MIT"
] | null | null | null | #
# Regular cron jobs for the mesa package
#
0 4 * * * root [ -x /usr/bin/mesa_maintenance ] && /usr/bin/mesa_maintenance
| 24.4 | 76 | 0.680328 |
9e56138f23e38b7eaf5bbef374796fe684e293a3 | 7,086 | ex | Elixir | lib/teiserver_web/router.ex | Born2Crawl/teiserver | b89601aea1a338cd41b4eabc4092207cb50110f0 | [
"MIT"
] | null | null | null | lib/teiserver_web/router.ex | Born2Crawl/teiserver | b89601aea1a338cd41b4eabc4092207cb50110f0 | [
"MIT"
] | null | null | null | lib/teiserver_web/router.ex | Born2Crawl/teiserver | b89601aea1a338cd41b4eabc4092207cb50110f0 | [
"MIT"
] | null | null | null | defmodule TeiserverWeb.Router do
defmacro __using__(_opts \\ []) do
quote do
import unquote(__MODULE__)
end
end
defmacro teiserver_routes() do
quote do
scope "/", TeiserverWeb.General, as: :ts_general do
pipe_through([:browser, :blank_layout])
get("/privacy_policy", GeneralController, :gdpr)
get("/gdpr", GeneralController, :gdpr)
end
scope "/teiserver", TeiserverWeb.General, as: :ts_general do
pipe_through([:browser, :admin_layout, :protected])
get("/", GeneralController, :index)
end
# ts_account_X_path
scope "/teiserver/account", TeiserverWeb.Account, as: :ts_account do
pipe_through([:browser, :admin_layout, :protected])
get("/relationships", RelationshipsController, :index)
post("/relationships/find/", RelationshipsController, :find)
post("/relationships/create/:action/:target", RelationshipsController, :create)
put("/relationships/update/:action/:target", RelationshipsController, :update)
delete("/relationships/delete/:action/:target", RelationshipsController, :delete)
resources("/preferences", PreferencesController,
only: [:index, :edit, :update, :new, :create]
)
get("/", GeneralController, :index)
end
# ts_clans_X_path
scope "/teiserver/clans", TeiserverWeb.Clans, as: :ts_clans do
pipe_through([:browser, :admin_layout, :protected])
get("/", ClanController, :index)
get("/:name", ClanController, :show)
put("/update/:clan_id", ClanController, :update)
get("/set_default/:id", ClanController, :set_default)
post("/create_invite", ClanController, :create_invite)
delete("/delete_invite/:clan_id/:user_id", ClanController, :delete_invite)
put("/respond_to_invite/:clan_id/:response", ClanController, :respond_to_invite)
delete("/delete_membership/:clan_id/:user_id", ClanController, :delete_membership)
put("/promote/:clan_id/:user_id", ClanController, :promote)
put("/demote/:clan_id/:user_id", ClanController, :demote)
end
scope "/teiserver/games", TeiserverWeb.Game, as: :ts_game do
pipe_through([:browser, :admin_layout, :protected])
resources("/tournaments", TournamentController)
resources("/queues", QueueController)
end
scope "/teiserver/battle", TeiserverWeb.Battle, as: :ts_battle do
pipe_through([:browser, :admin_layout, :protected])
get("/", GeneralController, :index)
end
scope "/teiserver/battle", TeiserverWeb.Battle, as: :ts_battle do
pipe_through([:browser, :admin_layout, :protected])
resources("/matches", MatchController, only: [:index, :show, :delete])
end
scope "/teiserver/battle", TeiserverWeb.Battle.LobbyLive, as: :ts_battle do
pipe_through([:browser, :admin_layout, :protected])
live("/lobbies", Index, :index)
live("/lobbies/:id", Show, :show)
end
scope "/teiserver/game_live", TeiserverWeb.Matchmaking.QueueLive, as: :ts_game do
pipe_through([:browser, :admin_layout, :protected])
live("/queues", Index, :index)
live("/queues/:id", Show, :show)
end
# REPORTING
scope "/teiserver/reports", TeiserverWeb.Report, as: :ts_reports do
pipe_through([:browser, :admin_layout, :protected])
get("/", GeneralController, :index)
get("/day_metrics/today", MetricController, :day_metrics_today)
get("/day_metrics/show/:date", MetricController, :day_metrics_show)
get("/day_metrics/export/:date", MetricController, :day_metrics_export)
get("/day_metrics/graph", MetricController, :day_metrics_graph)
post("/day_metrics/graph", MetricController, :day_metrics_graph)
get("/day_metrics", MetricController, :day_metrics_list)
post("/day_metrics", MetricController, :day_metrics_list)
get("/client_events/export/form", ClientEventController, :export_form)
post("/client_events/export/post", ClientEventController, :export_post)
get("/client_events/summary", ClientEventController, :summary)
get("/client_events/property/:property_name/detail", ClientEventController, :property_detail)
get("/client_events/event/:event_name/detail", ClientEventController, :event_detail)
get("/show/:name", ReportController, :show)
post("/show/:name", ReportController, :show)
end
# ts_engine_X_path
scope "/teiserver/engine", TeiserverWeb.Engine, as: :ts_engine do
pipe_through([:browser, :admin_layout, :protected])
resources("/unit", UnitController)
end
# API
scope "/teiserver/api", TeiserverWeb.API do
pipe_through :api
post "/login", SessionController, :login
end
scope "/teiserver/api", TeiserverWeb.API do
pipe_through([:token_api])
post "/battle/create", BattleController, :create
end
# ADMIN
scope "/teiserver/admin", TeiserverWeb.ClientLive, as: :ts_admin do
pipe_through([:browser, :admin_layout, :protected])
live("/client", Index, :index)
live("/client/:id", Show, :show)
end
scope "/teiserver/admin", TeiserverWeb.AgentLive, as: :ts_admin do
pipe_through([:browser, :admin_layout, :protected])
live("/agent", Index, :index)
# live("/agent/:id", Show, :show)
end
scope "/teiserver/admin", TeiserverWeb.Admin, as: :ts_admin do
pipe_through([:browser, :admin_layout, :protected])
get("/", GeneralController, :index)
get("/metrics", GeneralController, :metrics)
get("/tools", ToolController, :index)
get("/tools/convert", ToolController, :convert_form)
post("/tools/convert_post", ToolController, :convert_post)
post("/clans/create_membership", ClanController, :create_membership)
delete("/clans/delete_membership/:clan_id/:user_id", ClanController, :delete_membership)
delete("/clans/delete_invite/:clan_id/:user_id", ClanController, :delete_invite)
put("/clans/promote/:clan_id/:user_id", ClanController, :promote)
put("/clans/demote/:clan_id/:user_id", ClanController, :demote)
resources("/clans", ClanController)
resources("/parties", PartyController)
# resources("/tournaments", TournamentController)
get("/users/reset_password/:id", UserController, :reset_password)
get("/users/action/:id/:action", UserController, :perform_action)
put("/users/action/:id/:action", UserController, :perform_action)
get("/users/reports/:id/respond", UserController, :respond_form)
put("/users/reports/:id/respond", UserController, :respond_post)
get("/users/smurf_search/:id", UserController, :smurf_search)
get("/users/search", UserController, :index)
post("/users/search", UserController, :search)
resources("/user", UserController)
end
end
end
end
| 38.721311 | 101 | 0.658482 |
9e56139251656e26ebe697f39ddce381aa6ff57a | 1,243 | exs | Elixir | apps/chat/mix.exs | mikemorris/elixir-eks-terraform | e1c8a4e7ba26bd85322dfcedd229b3558d9d5844 | [
"MIT"
] | 2 | 2019-06-27T11:51:11.000Z | 2020-06-04T16:00:55.000Z | apps/chat/mix.exs | mikemorris/elixir-eks-terraform | e1c8a4e7ba26bd85322dfcedd229b3558d9d5844 | [
"MIT"
] | null | null | null | apps/chat/mix.exs | mikemorris/elixir-eks-terraform | e1c8a4e7ba26bd85322dfcedd229b3558d9d5844 | [
"MIT"
] | null | null | null | defmodule Chat.MixProject do
use Mix.Project
def project do
[
app: :chat,
version: "0.1.0",
build_path: "../../_build",
config_path: "../../config/config.exs",
deps_path: "../../deps",
lockfile: "../../mix.lock",
elixir: "~> 1.5",
elixirc_paths: elixirc_paths(Mix.env()),
compilers: [:phoenix, :gettext] ++ Mix.compilers(),
start_permanent: Mix.env() == :prod,
deps: deps()
]
end
# Configuration for the OTP application.
#
# Type `mix help compile.app` for more information.
def application do
[
mod: {Chat.Application, []},
extra_applications: [:logger, :runtime_tools]
]
end
# Specifies which paths to compile per environment.
defp elixirc_paths(:test), do: ["lib", "test/support"]
defp elixirc_paths(_), do: ["lib"]
# Specifies your project dependencies.
#
# Type `mix help deps` for examples and options.
defp deps do
[
{:phoenix, "~> 1.4.0"},
{:phoenix_pubsub, "~> 1.1"},
{:phoenix_html, "~> 2.11"},
{:phoenix_live_reload, "~> 1.2", only: :dev},
{:gettext, "~> 0.11"},
{:core, in_umbrella: true},
{:jason, "~> 1.0"},
{:plug_cowboy, "~> 2.0"}
]
end
end
| 24.86 | 57 | 0.561545 |
9e565144c507195b0967ea05cf78f205e51215bb | 3,674 | ex | Elixir | lib/membrane/rtp/twcc_receiver/packet_info_store.ex | membraneframework/membrane_rtp | 1df5f0bdb66d5c89bc63122c8b29af6cd4e600aa | [
"Apache-2.0"
] | null | null | null | lib/membrane/rtp/twcc_receiver/packet_info_store.ex | membraneframework/membrane_rtp | 1df5f0bdb66d5c89bc63122c8b29af6cd4e600aa | [
"Apache-2.0"
] | 2 | 2020-04-01T14:06:34.000Z | 2020-04-08T11:48:59.000Z | lib/membrane/rtp/twcc_receiver/packet_info_store.ex | membraneframework/membrane_rtp | 1df5f0bdb66d5c89bc63122c8b29af6cd4e600aa | [
"Apache-2.0"
] | null | null | null | defmodule Membrane.RTP.TWCCReceiver.PacketInfoStore do
@moduledoc false
# The module stores TWCC sequence number along with their arrival timestamps, handling sequence
# number rollovers if necessary. Stored packet info can used for generating statistics used for
# assembling a TWCC feedback packet.
alias Membrane.Time
alias Membrane.RTP.Utils
require Bitwise
defstruct base_seq_num: nil,
max_seq_num: nil,
seq_to_timestamp: %{}
@type t :: %__MODULE__{
base_seq_num: non_neg_integer(),
max_seq_num: non_neg_integer(),
seq_to_timestamp: %{non_neg_integer() => Time.t()}
}
@type stats_t :: %{
base_seq_num: non_neg_integer(),
packet_status_count: non_neg_integer(),
receive_deltas: [Time.t() | :not_received],
reference_time: Time.t()
}
@seq_number_limit Bitwise.bsl(1, 16)
@spec empty?(__MODULE__.t()) :: boolean
def empty?(%__MODULE__{base_seq_num: base_seq_num}), do: base_seq_num == nil
@spec insert_packet_info(__MODULE__.t(), non_neg_integer()) :: __MODULE__.t()
def insert_packet_info(store, seq_num) do
arrival_ts = Time.vm_time()
{store, seq_num} = maybe_handle_rollover(store, seq_num)
%{
store
| base_seq_num: min(store.base_seq_num, seq_num) || seq_num,
max_seq_num: max(store.max_seq_num, seq_num) || seq_num,
seq_to_timestamp: Map.put(store.seq_to_timestamp, seq_num, arrival_ts)
}
end
@spec get_stats(__MODULE__.t()) :: stats_t()
def get_stats(store) do
{reference_time, receive_deltas} = make_receive_deltas(store)
packet_status_count = store.max_seq_num - store.base_seq_num + 1
%{
base_seq_num: store.base_seq_num,
packet_status_count: packet_status_count,
reference_time: reference_time,
receive_deltas: receive_deltas
}
end
defp maybe_handle_rollover(store, new_seq_num) do
%{
base_seq_num: base_seq_num,
max_seq_num: max_seq_num,
seq_to_timestamp: seq_to_timestamp
} = store
case Utils.from_which_rollover(base_seq_num, new_seq_num, @seq_number_limit) do
:current ->
{store, new_seq_num}
:next ->
{store, new_seq_num + @seq_number_limit}
:previous ->
shifted_seq_to_timestamp =
Map.new(seq_to_timestamp, fn {seq_num, timestamp} ->
{seq_num + @seq_number_limit, timestamp}
end)
store = %{
store
| base_seq_num: new_seq_num,
max_seq_num: max_seq_num + @seq_number_limit,
seq_to_timestamp: shifted_seq_to_timestamp
}
{store, new_seq_num}
end
end
defp make_receive_deltas(store) do
%{
base_seq_num: base_seq_num,
max_seq_num: max_seq_num,
seq_to_timestamp: seq_to_timestamp
} = store
# reference time has to be in 64ms resolution
# https://datatracker.ietf.org/doc/html/draft-holmer-rmcat-transport-wide-cc-extensions-01#section-3.1
reference_time =
seq_to_timestamp
|> Map.fetch!(base_seq_num)
|> make_divisible_by_64ms()
receive_deltas =
base_seq_num..max_seq_num
|> Enum.map_reduce(reference_time, fn seq_num, previous_timestamp ->
case Map.get(seq_to_timestamp, seq_num) do
nil ->
{:not_received, previous_timestamp}
timestamp ->
delta = timestamp - previous_timestamp
{delta, timestamp}
end
end)
|> elem(0)
{reference_time, receive_deltas}
end
defp make_divisible_by_64ms(timestamp) do
timestamp - rem(timestamp, Time.milliseconds(64))
end
end
| 28.929134 | 106 | 0.662221 |
9e56533e910ac5108ff09fc973959bd12882cda3 | 639 | ex | Elixir | apps/tai/lib/tai/venues/fee_info.ex | ihorkatkov/tai | 09f9f15d2c385efe762ae138a8570f1e3fd41f26 | [
"MIT"
] | 1 | 2019-12-19T05:16:26.000Z | 2019-12-19T05:16:26.000Z | apps/tai/lib/tai/venues/fee_info.ex | ihorkatkov/tai | 09f9f15d2c385efe762ae138a8570f1e3fd41f26 | [
"MIT"
] | null | null | null | apps/tai/lib/tai/venues/fee_info.ex | ihorkatkov/tai | 09f9f15d2c385efe762ae138a8570f1e3fd41f26 | [
"MIT"
] | null | null | null | defmodule Tai.Venues.FeeInfo do
@type fee_type :: :percent
@type t :: %Tai.Venues.FeeInfo{
venue_id: Tai.Venues.Adapter.venue_id(),
account_id: Tai.Venues.Adapter.account_id(),
symbol: Tai.Venues.Product.symbol(),
maker: Decimal.t(),
maker_type: fee_type,
taker: Decimal.t(),
taker_type: fee_type
}
@enforce_keys ~w(
venue_id
account_id
symbol
maker
maker_type
taker
taker_type
)a
defstruct ~w(
venue_id
account_id
symbol
maker
maker_type
taker
taker_type
)a
def percent, do: :percent
end
| 18.794118 | 54 | 0.594679 |
9e5655799888e9ab758cf3bd548e3c1214eeb51c | 363 | exs | Elixir | test/angle_test.exs | devstopfix/elixoids | df4f2ba9ddb3d13c3c86e57f100c3d57fa64ed8d | [
"MIT"
] | 5 | 2016-07-05T13:42:33.000Z | 2020-12-07T14:12:16.000Z | test/angle_test.exs | devstopfix/elixoids | df4f2ba9ddb3d13c3c86e57f100c3d57fa64ed8d | [
"MIT"
] | 70 | 2016-06-04T11:31:27.000Z | 2020-11-21T20:00:09.000Z | test/angle_test.exs | devstopfix/elixoids | df4f2ba9ddb3d13c3c86e57f100c3d57fa64ed8d | [
"MIT"
] | 1 | 2016-07-05T17:10:05.000Z | 2016-07-05T17:10:05.000Z | defmodule World.AngleTest do
use ExUnit.Case, async: true
import Elixoids.World.Angle
test "Wrap angle" do
assert 0.0 == normalize_radians(0.0)
end
test "Wrap negative angle" do
assert :math.pi() == normalize_radians(-1 * :math.pi())
end
test "Wrap overflow angle" do
assert :math.pi() == normalize_radians(3 * :math.pi())
end
end
| 20.166667 | 59 | 0.669421 |
9e567b5a14ca5326e7abab675c586a2694edb52e | 11,272 | ex | Elixir | lib/livebook/live_markdown/import.ex | rodrigues/livebook | 9822735bcf0b5bffbbc2bd59a7b942e81276ffe3 | [
"Apache-2.0"
] | null | null | null | lib/livebook/live_markdown/import.ex | rodrigues/livebook | 9822735bcf0b5bffbbc2bd59a7b942e81276ffe3 | [
"Apache-2.0"
] | null | null | null | lib/livebook/live_markdown/import.ex | rodrigues/livebook | 9822735bcf0b5bffbbc2bd59a7b942e81276ffe3 | [
"Apache-2.0"
] | null | null | null | defmodule Livebook.LiveMarkdown.Import do
alias Livebook.Notebook
alias Livebook.LiveMarkdown.MarkdownHelpers
@doc """
Converts the given Markdown document into a notebook data structure.
Returns the notebook structure and a list of informative messages/warnings
related to the imported input.
"""
@spec notebook_from_markdown(String.t()) :: {Notebook.t(), list(String.t())}
def notebook_from_markdown(markdown) do
{_, ast, earmark_messages} = MarkdownHelpers.markdown_to_block_ast(markdown)
earmark_messages = Enum.map(earmark_messages, &earmark_message_to_string/1)
{ast, rewrite_messages} = rewrite_ast(ast)
notebook =
ast
|> group_elements()
|> build_notebook()
|> postprocess_notebook()
{notebook, earmark_messages ++ rewrite_messages}
end
defp earmark_message_to_string({_severity, line_number, message}) do
"Line #{line_number}: #{message}"
end
# Does initial pre-processing of the AST, so that it conforms to the expected form.
# Returns {altered_ast, messages}.
defp rewrite_ast(ast) do
{ast, messages1} = rewrite_multiple_primary_headings(ast)
{ast, messages2} = move_primary_heading_top(ast)
ast = trim_comments(ast)
{ast, messages1 ++ messages2}
end
# There should be only one h1 tag indicating notebook name,
# if there are many we downgrade all headings.
# This doesn't apply to documents exported from Livebook,
# but may be the case for an arbitrary markdown file,
# so we do our best to preserve the intent.
defp rewrite_multiple_primary_headings(ast) do
primary_headings = Enum.count(ast, &(tag(&1) == "h1"))
if primary_headings > 1 do
ast = Enum.map(ast, &downgrade_heading/1)
message =
"Downgrading all headings, because #{primary_headings} instances of heading 1 were found"
{ast, [message]}
else
{ast, []}
end
end
defp downgrade_heading({"h1", attrs, content, meta}), do: {"h2", attrs, content, meta}
defp downgrade_heading({"h2", attrs, content, meta}), do: {"h3", attrs, content, meta}
defp downgrade_heading({"h3", attrs, content, meta}), do: {"h4", attrs, content, meta}
defp downgrade_heading({"h4", attrs, content, meta}), do: {"h5", attrs, content, meta}
defp downgrade_heading({"h5", attrs, content, meta}), do: {"h6", attrs, content, meta}
defp downgrade_heading({"h6", attrs, content, meta}), do: {"strong", attrs, content, meta}
defp downgrade_heading(ast_node), do: ast_node
# This moves h1 together with any preceding comments to the top.
defp move_primary_heading_top(ast) do
case Enum.split_while(ast, &(tag(&1) != "h1")) do
{_ast, []} ->
{ast, []}
{leading, [heading | rest]} ->
{leading, comments} = split_while_right(leading, &(tag(&1) == :comment))
if leading == [] do
{ast, []}
else
ast = comments ++ [heading] ++ leading ++ rest
message = "Moving heading 1 to the top of the notebook"
{ast, [message]}
end
end
end
defp tag(ast_node)
defp tag({tag, _, _, _}), do: tag
defp tag(_), do: nil
defp split_while_right(list, fun) do
{right_rev, left_rev} = list |> Enum.reverse() |> Enum.split_while(fun)
{Enum.reverse(left_rev), Enum.reverse(right_rev)}
end
# Trims one-line comments to allow nice pattern matching
# on Livebook-specific annotations with no regard to surrounding whitespace.
defp trim_comments(ast) do
Enum.map(ast, fn
{:comment, attrs, [line], %{comment: true}} ->
{:comment, attrs, [String.trim(line)], %{comment: true}}
ast_node ->
ast_node
end)
end
# Builds a list of classified elements from the AST.
defp group_elements(ast, elems \\ [])
defp group_elements([], elems), do: elems
defp group_elements([{"h1", _, content, %{}} | ast], elems) do
group_elements(ast, [{:notebook_name, content} | elems])
end
defp group_elements([{"h2", _, content, %{}} | ast], elems) do
group_elements(ast, [{:section_name, content} | elems])
end
# The <!-- livebook:{"force_markdown":true} --> annotation forces the next node
# to be interpreted as Markdown cell content.
defp group_elements(
[
{:comment, _, [~s/livebook:{"force_markdown":true}/], %{comment: true}},
ast_node | ast
],
[{:cell, :markdown, md_ast} | rest]
) do
group_elements(ast, [{:cell, :markdown, [ast_node | md_ast]} | rest])
end
defp group_elements(
[
{:comment, _, [~s/livebook:{"force_markdown":true}/], %{comment: true}},
ast_node | ast
],
elems
) do
group_elements(ast, [{:cell, :markdown, [ast_node]} | elems])
end
defp group_elements(
[{:comment, _, [~s/livebook:{"break_markdown":true}/], %{comment: true}} | ast],
elems
) do
group_elements(ast, [{:cell, :markdown, []} | elems])
end
defp group_elements(
[{:comment, _, ["livebook:" <> json], %{comment: true}} | ast],
elems
) do
group_elements(ast, [livebook_json_to_element(json) | elems])
end
defp group_elements(
[{"pre", _, [{"code", [{"class", "elixir"}], [source], %{}}], %{}} | ast],
elems
) do
{outputs, ast} = take_outputs(ast, [])
group_elements(ast, [{:cell, :elixir, source, outputs} | elems])
end
defp group_elements([ast_node | ast], [{:cell, :markdown, md_ast} | rest]) do
group_elements(ast, [{:cell, :markdown, [ast_node | md_ast]} | rest])
end
defp group_elements([ast_node | ast], elems) do
group_elements(ast, [{:cell, :markdown, [ast_node]} | elems])
end
defp livebook_json_to_element(json) do
data = Jason.decode!(json)
case data do
%{"livebook_object" => "cell_input"} ->
{:cell, :input, data}
_ ->
{:metadata, data}
end
end
defp take_outputs(
[{"pre", _, [{"code", [{"class", "output"}], [output], %{}}], %{}} | ast],
outputs
) do
take_outputs(ast, [output | outputs])
end
defp take_outputs(ast, outputs), do: {outputs, ast}
# Builds a notebook from the list of elements obtained in the previous step.
# Note that the list of elements is reversed:
# first we group elements by traversing Earmark AST top-down
# and then aggregate elements into data strictures going bottom-up.
defp build_notebook(elems, cells \\ [], sections \\ [])
defp build_notebook([{:cell, :elixir, source, outputs} | elems], cells, sections) do
{metadata, elems} = grab_metadata(elems)
attrs = cell_metadata_to_attrs(:elixir, metadata)
outputs = Enum.map(outputs, &{:text, &1})
cell = %{Notebook.Cell.new(:elixir) | source: source, outputs: outputs} |> Map.merge(attrs)
build_notebook(elems, [cell | cells], sections)
end
defp build_notebook([{:cell, :markdown, md_ast} | elems], cells, sections) do
{metadata, elems} = grab_metadata(elems)
attrs = cell_metadata_to_attrs(:markdown, metadata)
source = md_ast |> Enum.reverse() |> MarkdownHelpers.markdown_from_ast()
cell = %{Notebook.Cell.new(:markdown) | source: source} |> Map.merge(attrs)
build_notebook(elems, [cell | cells], sections)
end
defp build_notebook([{:cell, :input, data} | elems], cells, sections) do
attrs = parse_input_attrs(data)
cell = Notebook.Cell.new(:input) |> Map.merge(attrs)
build_notebook(elems, [cell | cells], sections)
end
defp build_notebook([{:section_name, content} | elems], cells, sections) do
name = text_from_markdown(content)
{metadata, elems} = grab_metadata(elems)
attrs = section_metadata_to_attrs(metadata)
section = %{Notebook.Section.new() | name: name, cells: cells} |> Map.merge(attrs)
build_notebook(elems, [], [section | sections])
end
# If there are section-less cells, put them in a default one.
defp build_notebook([{:notebook_name, _content} | _] = elems, cells, sections)
when cells != [] do
section = %{Notebook.Section.new() | cells: cells}
build_notebook(elems, [], [section | sections])
end
# If there are section-less cells, put them in a default one.
defp build_notebook([] = elems, cells, sections) when cells != [] do
section = %{Notebook.Section.new() | cells: cells}
build_notebook(elems, [], [section | sections])
end
defp build_notebook([{:notebook_name, content} | elems], [], sections) do
name = text_from_markdown(content)
{metadata, []} = grab_metadata(elems)
attrs = notebook_metadata_to_attrs(metadata)
%{Notebook.new() | name: name, sections: sections} |> Map.merge(attrs)
end
# If there's no explicit notebook heading, use the defaults.
defp build_notebook([], [], sections) do
%{Notebook.new() | sections: sections}
end
defp text_from_markdown(markdown) do
markdown
|> MarkdownHelpers.markdown_to_ast()
|> elem(1)
|> MarkdownHelpers.text_from_ast()
end
# Takes optional leading metadata JSON object and returns {metadata, rest}.
defp grab_metadata([{:metadata, metadata} | elems]) do
{metadata, elems}
end
defp grab_metadata(elems), do: {%{}, elems}
defp parse_input_attrs(data) do
type = data["type"] |> String.to_existing_atom()
%{
type: type,
name: data["name"],
value: data["value"],
# Fields with implicit value
reactive: Map.get(data, "reactive", false),
props: data |> Map.get("props", %{}) |> parse_input_props(type)
}
end
defp parse_input_props(data, type) do
default_props = Notebook.Cell.Input.default_props(type)
Map.new(default_props, fn {key, default_value} ->
value = Map.get(data, to_string(key), default_value)
{key, value}
end)
end
defp notebook_metadata_to_attrs(metadata) do
Enum.reduce(metadata, %{}, fn
{"persist_outputs", persist_outputs}, attrs ->
Map.put(attrs, :persist_outputs, persist_outputs)
{"autosave_interval_s", autosave_interval_s}, attrs ->
Map.put(attrs, :autosave_interval_s, autosave_interval_s)
_entry, attrs ->
attrs
end)
end
defp section_metadata_to_attrs(metadata) do
Enum.reduce(metadata, %{}, fn
{"branch_parent_index", parent_idx}, attrs ->
# At this point we cannot extract other section id,
# so we temporarily keep the index
Map.put(attrs, :parent_id, {:idx, parent_idx})
_entry, attrs ->
attrs
end)
end
defp cell_metadata_to_attrs(:elixir, metadata) do
Enum.reduce(metadata, %{}, fn
{"disable_formatting", disable_formatting}, attrs ->
Map.put(attrs, :disable_formatting, disable_formatting)
_entry, attrs ->
attrs
end)
end
defp cell_metadata_to_attrs(_type, _metadata) do
%{}
end
defp postprocess_notebook(notebook) do
sections =
Enum.map(notebook.sections, fn section ->
# Set parent_id based on the persisted branch_parent_index if present
case section.parent_id do
nil ->
section
{:idx, parent_idx} ->
parent = Enum.at(notebook.sections, parent_idx)
%{section | parent_id: parent.id}
end
end)
%{notebook | sections: sections}
end
end
| 32.390805 | 97 | 0.64363 |
9e56a78061157c4b25a8b527bd074986d8255665 | 5,144 | ex | Elixir | lib/mix/tasks/knigge/verify.ex | samuel-uniris/knigge | fd7c6e735a1840211a02733c628167a1831d9c92 | [
"MIT"
] | 83 | 2019-07-26T14:51:19.000Z | 2022-03-27T08:05:15.000Z | lib/mix/tasks/knigge/verify.ex | samuel-uniris/knigge | fd7c6e735a1840211a02733c628167a1831d9c92 | [
"MIT"
] | 21 | 2019-08-06T08:35:44.000Z | 2021-10-17T19:55:46.000Z | lib/mix/tasks/knigge/verify.ex | samuel-uniris/knigge | fd7c6e735a1840211a02733c628167a1831d9c92 | [
"MIT"
] | 10 | 2019-07-31T09:56:26.000Z | 2022-01-03T12:03:33.000Z | defmodule Mix.Tasks.Knigge.Verify do
use Mix.Task
import Knigge.CLI.Output
alias Knigge.Verification
alias Knigge.Verification.Context
require Context
@shortdoc "Verify the validity of your facades and their implementations."
@moduledoc """
#{@shortdoc}
At the moment `knigge.verify` "only" ensures that the implementation modules
of your facades exist. Running the task on a code base with two facades might
look like this:
$ mix knigge.verify
Verify 2 Knigge facades in 'my_app'.
1/2 Facades passed:
MyApp.MyGreatFacade -> MyApp.MyGreatImpl
1/2 Facades failed:
MyApp.AnotherFacade -> MyApp.AnothrImpl (implementation does not exist)
Completed in 0.009 seconds.
Validation failed for 1/2 facades.
The attentive reader might have noticed that `MyApp.AnothrImpl` contains a
spelling error: `Anothr` instead of `Another`.
Catching errors like this is the main responsibility of `knigge.verify`. When
an issue is detected the task will exit with an error code, which allows you
to use it in your CI pipeline - for example before you build your production
release.
## Options
--app (optional):
Name of the app for which the facades need to be verified.
Defaults to the current working environment app.
"""
@exit_codes %{unknown_app: 1, missing_modules: 2, unknown_options: 3}
@exit_reasons Map.keys(@exit_codes)
@unknown_error_code 64
@impl Mix.Task
def run(raw_args) do
Mix.Task.run("compile")
with {:ok, opts} <- parse(raw_args),
:ok <- do_run(opts) do
:ok
else
{:error, reason} ->
maybe_print_error(reason)
exit({:shutdown, to_exit_code(reason)})
end
end
defp parse(raw_args) do
case OptionParser.parse(raw_args, strict: [app: :string]) do
{opts, _argv, []} -> {:ok, opts}
{_parsed, _argv, errors} -> unknown_switches(errors)
end
end
defp unknown_switches(errors) do
options =
errors
|> Enum.map(&elem(&1, 0))
|> Enum.join(", ")
{:error, {:unknown_options, "Unknown switch(es) received: " <> options}}
end
defp do_run(opts) do
opts
|> Keyword.get_lazy(:app, &calling_app/0)
|> run_for()
end
defp calling_app, do: Mix.Project.get().project()[:app]
defp run_for(app) when is_binary(app) do
app
|> String.to_atom()
|> run_for()
end
defp run_for(app) do
case Context.for_app(app) do
{:ok, context} ->
context
|> begin_verification()
|> Verification.run()
|> finish_verification()
|> to_result_tuple()
{:error, {:unknown_app, app}} ->
error("Unable to load modules for #{app || "current app"}, are you sure the app exists?")
{:error, :unknown_app}
other ->
other
end
end
defp begin_verification(%Context{app: app, modules: []} = context) do
warn("No modules in `#{app}` found which `use Knigge`.")
context
end
defp begin_verification(%Context{app: app, modules: modules} = context) do
info("Verify #{length(modules)} Knigge facades in '#{app}'.")
context
end
defp finish_verification(context) do
context
|> print_result()
|> completed_in()
end
defp print_result(context) do
print_existing(context)
print_missing(context)
context
end
defp print_existing(%Context{existing: []}), do: :ok
defp print_existing(%Context{existing: facades, modules: modules}) do
success("\n#{length(facades)}/#{length(modules)} Facades passed:")
facades
|> Enum.map_join("\n", fn {module, implementation} ->
" #{inspect(module)} -> #{inspect(implementation)}"
end)
|> success()
end
defp print_missing(%Context{missing: []}), do: :ok
defp print_missing(%Context{missing: facades, modules: modules}) do
error("\n#{length(facades)}/#{length(modules)} Facades failed:")
facades
|> Enum.map_join("\n", fn {module, implementation} ->
" #{inspect(module)} -> #{inspect(implementation)} (implementation does not exist)"
end)
|> error()
end
defp completed_in(context) do
context = Context.finished(context)
duration =
context
|> Context.duration()
|> Kernel./(1_000)
|> Float.round(3)
info("\nCompleted in #{duration} seconds.\n")
context
end
defp to_result_tuple(%Context{} = context) do
if Context.error?(context) do
{:error, context.error}
else
:ok
end
end
defp maybe_print_error(%Context{error: :missing_modules} = context) do
error("Validation failed for #{length(context.missing)}/#{length(context.modules)} facades.")
end
defp maybe_print_error(%Context{}), do: :ok
defp maybe_print_error({:unknown_switches, message}), do: error(message)
defp maybe_print_error(unknown_reason) do
error("An unknown error occurred: #{inspect(unknown_reason)}")
end
defp to_exit_code(%Context{error: reason}), do: to_exit_code(reason)
defp to_exit_code(reason) when reason in @exit_reasons, do: @exit_codes[reason]
defp to_exit_code(_unknown_reason), do: @unknown_error_code
end
| 25.465347 | 97 | 0.658631 |
9e56bd07d7ed7d40b58677a185d2bdbc1132d33c | 3,493 | ex | Elixir | clients/video_intelligence/lib/google_api/video_intelligence/v1/model/google_cloud_videointelligence_v1_label_detection_config.ex | linjunpop/elixir-google-api | 444cb2b2fb02726894535461a474beddd8b86db4 | [
"Apache-2.0"
] | null | null | null | clients/video_intelligence/lib/google_api/video_intelligence/v1/model/google_cloud_videointelligence_v1_label_detection_config.ex | linjunpop/elixir-google-api | 444cb2b2fb02726894535461a474beddd8b86db4 | [
"Apache-2.0"
] | null | null | null | clients/video_intelligence/lib/google_api/video_intelligence/v1/model/google_cloud_videointelligence_v1_label_detection_config.ex | linjunpop/elixir-google-api | 444cb2b2fb02726894535461a474beddd8b86db4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule GoogleApi.VideoIntelligence.V1.Model.GoogleCloudVideointelligenceV1LabelDetectionConfig do
@moduledoc """
Config for LABEL_DETECTION.
## Attributes
- frameConfidenceThreshold (float()): The confidence threshold we perform filtering on the labels from frame-level detection. If not set, it is set to 0.4 by default. The valid range for this threshold is [0.1, 0.9]. Any value set outside of this range will be clipped. Note: for best results please follow the default threshold. We will update the default threshold everytime when we release a new model. Defaults to: `null`.
- labelDetectionMode (String.t): What labels should be detected with LABEL_DETECTION, in addition to video-level labels or segment-level labels. If unspecified, defaults to `SHOT_MODE`. Defaults to: `null`.
- Enum - one of [LABEL_DETECTION_MODE_UNSPECIFIED, SHOT_MODE, FRAME_MODE, SHOT_AND_FRAME_MODE]
- model (String.t): Model to use for label detection. Supported values: \"builtin/stable\" (the default if unset) and \"builtin/latest\". Defaults to: `null`.
- stationaryCamera (boolean()): Whether the video has been shot from a stationary (i.e. non-moving) camera. When set to true, might improve detection accuracy for moving objects. Should be used with `SHOT_AND_FRAME_MODE` enabled. Defaults to: `null`.
- videoConfidenceThreshold (float()): The confidence threshold we perform filtering on the labels from video-level and shot-level detections. If not set, it is set to 0.3 by default. The valid range for this threshold is [0.1, 0.9]. Any value set outside of this range will be clipped. Note: for best results please follow the default threshold. We will update the default threshold everytime when we release a new model. Defaults to: `null`.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:frameConfidenceThreshold => any(),
:labelDetectionMode => any(),
:model => any(),
:stationaryCamera => any(),
:videoConfidenceThreshold => any()
}
field(:frameConfidenceThreshold)
field(:labelDetectionMode)
field(:model)
field(:stationaryCamera)
field(:videoConfidenceThreshold)
end
defimpl Poison.Decoder,
for: GoogleApi.VideoIntelligence.V1.Model.GoogleCloudVideointelligenceV1LabelDetectionConfig do
def decode(value, options) do
GoogleApi.VideoIntelligence.V1.Model.GoogleCloudVideointelligenceV1LabelDetectionConfig.decode(
value,
options
)
end
end
defimpl Poison.Encoder,
for: GoogleApi.VideoIntelligence.V1.Model.GoogleCloudVideointelligenceV1LabelDetectionConfig do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 52.924242 | 444 | 0.757515 |
9e56dc3fe452eb965be13558ee4ec48164a143c6 | 4,442 | ex | Elixir | clients/private_ca/lib/google_api/private_ca/v1/model/certificate_template.ex | renovate-bot/elixir-google-api | 1da34cd39b670c99f067011e05ab90af93fef1f6 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/private_ca/lib/google_api/private_ca/v1/model/certificate_template.ex | swansoffiee/elixir-google-api | 9ea6d39f273fb430634788c258b3189d3613dde0 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/private_ca/lib/google_api/private_ca/v1/model/certificate_template.ex | dazuma/elixir-google-api | 6a9897168008efe07a6081d2326735fe332e522c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.PrivateCA.V1.Model.CertificateTemplate do
@moduledoc """
A CertificateTemplate refers to a managed template for certificate issuance.
## Attributes
* `createTime` (*type:* `DateTime.t`, *default:* `nil`) - Output only. The time at which this CertificateTemplate was created.
* `description` (*type:* `String.t`, *default:* `nil`) - Optional. A human-readable description of scenarios this template is intended for.
* `identityConstraints` (*type:* `GoogleApi.PrivateCA.V1.Model.CertificateIdentityConstraints.t`, *default:* `nil`) - Optional. Describes constraints on identities that may be appear in Certificates issued using this template. If this is omitted, then this template will not add restrictions on a certificate's identity.
* `labels` (*type:* `map()`, *default:* `nil`) - Optional. Labels with user-defined metadata.
* `name` (*type:* `String.t`, *default:* `nil`) - Output only. The resource name for this CertificateTemplate in the format `projects/*/locations/*/certificateTemplates/*`.
* `passthroughExtensions` (*type:* `GoogleApi.PrivateCA.V1.Model.CertificateExtensionConstraints.t`, *default:* `nil`) - Optional. Describes the set of X.509 extensions that may appear in a Certificate issued using this CertificateTemplate. If a certificate request sets extensions that don't appear in the passthrough_extensions, those extensions will be dropped. If the issuing CaPool's IssuancePolicy defines baseline_values that don't appear here, the certificate issuance request will fail. If this is omitted, then this template will not add restrictions on a certificate's X.509 extensions. These constraints do not apply to X.509 extensions set in this CertificateTemplate's predefined_values.
* `predefinedValues` (*type:* `GoogleApi.PrivateCA.V1.Model.X509Parameters.t`, *default:* `nil`) - Optional. A set of X.509 values that will be applied to all issued certificates that use this template. If the certificate request includes conflicting values for the same properties, they will be overwritten by the values defined here. If the issuing CaPool's IssuancePolicy defines conflicting baseline_values for the same properties, the certificate issuance request will fail.
* `updateTime` (*type:* `DateTime.t`, *default:* `nil`) - Output only. The time at which this CertificateTemplate was updated.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:createTime => DateTime.t() | nil,
:description => String.t() | nil,
:identityConstraints =>
GoogleApi.PrivateCA.V1.Model.CertificateIdentityConstraints.t() | nil,
:labels => map() | nil,
:name => String.t() | nil,
:passthroughExtensions =>
GoogleApi.PrivateCA.V1.Model.CertificateExtensionConstraints.t() | nil,
:predefinedValues => GoogleApi.PrivateCA.V1.Model.X509Parameters.t() | nil,
:updateTime => DateTime.t() | nil
}
field(:createTime, as: DateTime)
field(:description)
field(:identityConstraints, as: GoogleApi.PrivateCA.V1.Model.CertificateIdentityConstraints)
field(:labels, type: :map)
field(:name)
field(:passthroughExtensions, as: GoogleApi.PrivateCA.V1.Model.CertificateExtensionConstraints)
field(:predefinedValues, as: GoogleApi.PrivateCA.V1.Model.X509Parameters)
field(:updateTime, as: DateTime)
end
defimpl Poison.Decoder, for: GoogleApi.PrivateCA.V1.Model.CertificateTemplate do
def decode(value, options) do
GoogleApi.PrivateCA.V1.Model.CertificateTemplate.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.PrivateCA.V1.Model.CertificateTemplate do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 63.457143 | 705 | 0.74606 |
9e56f0c4eda72afef5c04d89d1d7c6be98b36f72 | 1,252 | ex | Elixir | lib/banking_graph_web/endpoint.ex | oryono/banking | 0a49ebae5ebf93a6db0c24476a1c86c60bb72733 | [
"MIT"
] | null | null | null | lib/banking_graph_web/endpoint.ex | oryono/banking | 0a49ebae5ebf93a6db0c24476a1c86c60bb72733 | [
"MIT"
] | null | null | null | lib/banking_graph_web/endpoint.ex | oryono/banking | 0a49ebae5ebf93a6db0c24476a1c86c60bb72733 | [
"MIT"
] | null | null | null | defmodule BankingGraphWeb.Endpoint do
use Phoenix.Endpoint, otp_app: :banking_graph
socket "/socket", BankingGraphWeb.UserSocket,
websocket: true,
longpoll: false
plug CORSPlug
# Serve at "/" the static files from "priv/static" directory.
#
# You should set gzip to true if you are running phx.digest
# when deploying your static files in production.
plug Plug.Static,
at: "/",
from: :banking_graph,
gzip: false,
only: ~w(css fonts images js favicon.ico robots.txt)
# Code reloading can be explicitly enabled under the
# :code_reloader configuration of your endpoint.
if code_reloading? do
plug Phoenix.CodeReloader
end
plug Plug.RequestId
plug Plug.Telemetry, event_prefix: [:phoenix, :endpoint]
plug Plug.Parsers,
parsers: [:urlencoded, :multipart, :json],
pass: ["*/*"],
json_decoder: Phoenix.json_library()
plug Plug.MethodOverride
plug Plug.Head
# The session will be stored in the cookie and signed,
# this means its contents can be read but not tampered with.
# Set :encryption_salt if you would also like to encrypt it.
plug Plug.Session,
store: :cookie,
key: "_banking_graph_key",
signing_salt: "9/50Dj2O"
plug BankingGraphWeb.Router
end
| 26.638298 | 63 | 0.711661 |
9e570b40adb3cad3e84981b5daa3bcfbbb52e29a | 1,380 | ex | Elixir | lib/jalka2021/accounts/user_notifier.ex | kriips/jalka2021 | f4d968e20cae116fd4056bff2f937cd036421977 | [
"MIT"
] | null | null | null | lib/jalka2021/accounts/user_notifier.ex | kriips/jalka2021 | f4d968e20cae116fd4056bff2f937cd036421977 | [
"MIT"
] | null | null | null | lib/jalka2021/accounts/user_notifier.ex | kriips/jalka2021 | f4d968e20cae116fd4056bff2f937cd036421977 | [
"MIT"
] | null | null | null | defmodule Jalka2021.Accounts.UserNotifier do
# For simplicity, this module simply logs messages to the terminal.
# You should replace it by a proper email or notification tool, such as:
#
# * Swoosh - https://hexdocs.pm/swoosh
# * Bamboo - https://hexdocs.pm/bamboo
#
defp deliver(to, body) do
require Logger
Logger.debug(body)
{:ok, %{to: to, body: body}}
end
@doc """
Deliver instructions to confirm account.
"""
def deliver_confirmation_instructions(user, url) do
deliver(user.email, """
==============================
Tere #{user.email},
oma konto kinnitamiseks vajuta allolevale lingile:
#{url}
==============================
""")
end
@doc """
Deliver instructions to reset a user password.
"""
def deliver_reset_password_instructions(user, url) do
deliver(user.email, """
==============================
Tere #{user.email},
parooli lähtestamiseks mine allolevale lingile:
#{url}
==============================
""")
end
@doc """
Deliver instructions to update a user email.
"""
def deliver_update_email_instructions(user, url) do
deliver(user.email, """
==============================
Tere #{user.email},
oma meiliaadressi vahetamiseks mine allolevale lingile:
#{url}
==============================
""")
end
end
| 20.294118 | 74 | 0.555072 |
9e572dc667286a89ce06118ac6fd1876b05d1b78 | 5,511 | ex | Elixir | clients/remote_build_execution/lib/google_api/remote_build_execution/v2/model/build_bazel_remote_execution_v2_action.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/remote_build_execution/lib/google_api/remote_build_execution/v2/model/build_bazel_remote_execution_v2_action.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/remote_build_execution/lib/google_api/remote_build_execution/v2/model/build_bazel_remote_execution_v2_action.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.RemoteBuildExecution.V2.Model.BuildBazelRemoteExecutionV2Action do
@moduledoc """
An `Action` captures all the information about an execution which is required
to reproduce it.
`Action`s are the core component of the [Execution] service. A single
`Action` represents a repeatable action that can be performed by the
execution service. `Action`s can be succinctly identified by the digest of
their wire format encoding and, once an `Action` has been executed, will be
cached in the action cache. Future requests can then use the cached result
rather than needing to run afresh.
When a server completes execution of an
Action, it MAY choose to
cache the result in
the ActionCache unless
`do_not_cache` is `true`. Clients SHOULD expect the server to do so. By
default, future calls to
Execute the same
`Action` will also serve their results from the cache. Clients must take care
to understand the caching behaviour. Ideally, all `Action`s will be
reproducible so that serving a result from cache is always desirable and
correct.
## Attributes
* `commandDigest` (*type:* `GoogleApi.RemoteBuildExecution.V2.Model.BuildBazelRemoteExecutionV2Digest.t`, *default:* `nil`) - The digest of the Command
to run, which MUST be present in the
ContentAddressableStorage.
* `doNotCache` (*type:* `boolean()`, *default:* `nil`) - If true, then the `Action`'s result cannot be cached, and in-flight
requests for the same `Action` may not be merged.
* `inputRootDigest` (*type:* `GoogleApi.RemoteBuildExecution.V2.Model.BuildBazelRemoteExecutionV2Digest.t`, *default:* `nil`) - The digest of the root
Directory for the input
files. The files in the directory tree are available in the correct
location on the build machine before the command is executed. The root
directory, as well as every subdirectory and content blob referred to, MUST
be in the
ContentAddressableStorage.
* `outputNodeProperties` (*type:* `list(String.t)`, *default:* `nil`) - List of required supported NodeProperty
keys. In order to ensure that equivalent `Action`s always hash to the same
value, the supported node properties MUST be lexicographically sorted by name.
Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
The interpretation of these properties is server-dependent. If a property is
not recognized by the server, the server will return an `INVALID_ARGUMENT`
error.
* `timeout` (*type:* `String.t`, *default:* `nil`) - A timeout after which the execution should be killed. If the timeout is
absent, then the client is specifying that the execution should continue
as long as the server will let it. The server SHOULD impose a timeout if
the client does not specify one, however, if the client does specify a
timeout that is longer than the server's maximum timeout, the server MUST
reject the request.
The timeout is a part of the
Action message, and
therefore two `Actions` with different timeouts are different, even if they
are otherwise identical. This is because, if they were not, running an
`Action` with a lower timeout than is required might result in a cache hit
from an execution run with a longer timeout, hiding the fact that the
timeout is too short. By encoding it directly in the `Action`, a lower
timeout will result in a cache miss and the execution timeout will fail
immediately, rather than whenever the cache entry gets evicted.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:commandDigest =>
GoogleApi.RemoteBuildExecution.V2.Model.BuildBazelRemoteExecutionV2Digest.t(),
:doNotCache => boolean(),
:inputRootDigest =>
GoogleApi.RemoteBuildExecution.V2.Model.BuildBazelRemoteExecutionV2Digest.t(),
:outputNodeProperties => list(String.t()),
:timeout => String.t()
}
field(:commandDigest,
as: GoogleApi.RemoteBuildExecution.V2.Model.BuildBazelRemoteExecutionV2Digest
)
field(:doNotCache)
field(:inputRootDigest,
as: GoogleApi.RemoteBuildExecution.V2.Model.BuildBazelRemoteExecutionV2Digest
)
field(:outputNodeProperties, type: :list)
field(:timeout)
end
defimpl Poison.Decoder,
for: GoogleApi.RemoteBuildExecution.V2.Model.BuildBazelRemoteExecutionV2Action do
def decode(value, options) do
GoogleApi.RemoteBuildExecution.V2.Model.BuildBazelRemoteExecutionV2Action.decode(
value,
options
)
end
end
defimpl Poison.Encoder,
for: GoogleApi.RemoteBuildExecution.V2.Model.BuildBazelRemoteExecutionV2Action do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 44.443548 | 155 | 0.737071 |
9e5765dd7f1400b5c8c41d0f511c6e3a39d95bbb | 7,706 | ex | Elixir | lib/validators/email.ex | tomciopp/ecto_commons | 75ca493739a54b2f73b753c3d2623dc61781d91d | [
"MIT"
] | null | null | null | lib/validators/email.ex | tomciopp/ecto_commons | 75ca493739a54b2f73b753c3d2623dc61781d91d | [
"MIT"
] | null | null | null | lib/validators/email.ex | tomciopp/ecto_commons | 75ca493739a54b2f73b753c3d2623dc61781d91d | [
"MIT"
] | null | null | null | defmodule EctoCommons.EmailValidator do
@moduledoc ~S"""
Validates emails.
## Options
There are various `:checks` depending on the strictness of the validation you require. Indeed, perfect email validation
does not exist (see StackOverflow questions about it):
- `:html_input`: Checks if the email follows the regular expression used by browsers for
their `type="email"` input fields. This is the default as it corresponds to most use-cases. It is quite strict
without being too narrow. It does not support unicode emails though. If you need better internationalization,
please use the `:pow` check as it is more flexible with international emails. Defaults to enabled.
- `:burner`: Checks if the email given is a burner email provider. When enabled, will reject temporary
email providers. Defaults to disabled.
- `:pow`: Checks the email using the [`pow`](https://hex.pm/packages/pow) logic. Defaults to disabled.
The rules are the following:
- Split into local-part and domain at last `@` occurrence
- Local-part should;
- be at most 64 octets
- separate quoted and unquoted content with a single dot
- only have letters, digits, and the following characters outside quoted
content:
```text
!#$%&'*+-/=?^_`{|}~.
```
- not have any consecutive dots outside quoted content
- Domain should;
- be at most 255 octets
- only have letters, digits, hyphen, and dots
- do not start or end with hyphen or dot
- can be an IPv4 or IPv6 address
Unicode characters are permitted in both local-part and domain.
## Example:
iex> types = %{email: :string}
iex> params = %{email: "[email protected]"}
iex> Ecto.Changeset.cast({%{}, types}, params, Map.keys(types))
...> |> validate_email(:email)
#Ecto.Changeset<action: nil, changes: %{email: "[email protected]"}, errors: [], data: %{}, valid?: true>
iex> types = %{email: :string}
iex> params = %{email: "@invalid_email"}
iex> Ecto.Changeset.cast({%{}, types}, params, Map.keys(types))
...> |> validate_email(:email)
#Ecto.Changeset<action: nil, changes: %{email: "@invalid_email"}, errors: [email: {"is not a valid email", [validation: :email]}], data: %{}, valid?: false>
iex> types = %{email: :string}
iex> params = %{email: "[email protected]"}
iex> Ecto.Changeset.cast({%{}, types}, params, Map.keys(types))
...> |> validate_email(:email, checks: [:html_input, :burner])
#Ecto.Changeset<action: nil, changes: %{email: "[email protected]"}, errors: [email: {"uses a forbidden provider", [validation: :email]}], data: %{}, valid?: false>
iex> types = %{email: :string}
iex> params = %{email: "[email protected]"}
iex> Ecto.Changeset.cast({%{}, types}, params, Map.keys(types))
...> |> validate_email(:email, checks: [:html_input, :pow])
#Ecto.Changeset<action: nil, changes: %{email: "[email protected]"}, errors: [], data: %{}, valid?: true>
"""
import Ecto.Changeset
# We use the regular expression of the html `email` field specification.
# See https://html.spec.whatwg.org/multipage/input.html#e-mail-state-(type=email)
# and https://stackoverflow.com/a/15659649/1656568
# credo:disable-for-next-line Credo.Check.Readability.MaxLineLength
@email_regex ~r/^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$/
# credo:disable-for-next-line Credo.Check.Readability.MaxLineLength
@ipv6_regex ~r/(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))/
@ipv4_regex ~r/((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])/
def validate_email(%Ecto.Changeset{} = changeset, field, opts \\ []) do
validate_change(changeset, field, {:email, opts}, fn _, value ->
checks = Keyword.get(opts, :checks, [:pow])
# credo:disable-for-lines:6 Credo.Check.Refactor.Nesting
Enum.reduce(checks, [], fn check, errors ->
case do_validate_email(value, check) do
:ok -> errors
{:error, msg} -> [{field, {message(opts, msg), [validation: :email]}} | errors]
end
end)
|> List.flatten()
end)
end
@spec do_validate_email(String.t(), atom()) :: :ok | {:error, String.t()}
defp do_validate_email(email, :burner) do
case Burnex.is_burner?(email) do
true ->
{:error, "uses a forbidden provider"}
false ->
:ok
end
end
defp do_validate_email(email, :html_input) do
if String.match?(email, @email_regex),
do: :ok,
else: {:error, "is not a valid email"}
end
defp do_validate_email(email, :pow) do
case pow_validate_email(email) do
:ok -> :ok
{:error, _msg} -> {:error, "is not a valid email"}
end
end
# The code below is copied and adapted from the [pow](https://hex.pm/packages/pow) package
# with a few fixes on the domain part.
defp pow_validate_email(email) do
[domain | rest] =
email
|> String.split("@")
|> Enum.reverse()
local_part =
rest
|> Enum.reverse()
|> Enum.join("@")
cond do
String.length(local_part) > 64 -> {:error, "local-part too long"}
String.length(domain) > 255 -> {:error, "domain too long"}
local_part == "" -> {:error, "invalid format"}
true -> pow_validate_email(local_part, domain)
end
end
defp pow_validate_email(local_part, domain) do
sanitized_local_part = remove_quotes_from_local_part(local_part)
cond do
local_part_only_quoted?(local_part) ->
validate_domain(domain)
local_part_consecutive_dots?(sanitized_local_part) ->
{:error, "consecutive dots in local-part"}
local_part_valid_characters?(sanitized_local_part) ->
validate_domain(domain)
true ->
{:error, "invalid characters in local-part"}
end
end
defp remove_quotes_from_local_part(local_part),
do: Regex.replace(~r/(^\".*\"$)|(^\".*\"\.)|(\.\".*\"$)?/, local_part, "")
defp local_part_only_quoted?(local_part), do: local_part =~ ~r/^"[^\"]+"$/
defp local_part_consecutive_dots?(local_part), do: local_part =~ ~r/\.\./
defp local_part_valid_characters?(sanitized_local_part),
do: sanitized_local_part =~ ~r<^[\p{L}0-9!#$%&'*+-/=?^_`{|}~\.]+$>u
defp validate_domain(domain) do
cond do
String.first(domain) == "-" -> {:error, "domain begins with hyphen"}
String.first(domain) == "." -> {:error, "domain begins with a dot"}
String.last(domain) == "-" -> {:error, "domain ends with hyphen"}
String.last(domain) == "." -> {:error, "domain ends with a dot"}
domain =~ ~r/^[\p{L}0-9-\.]+$/u -> :ok
domain =~ @ipv6_regex -> :ok
domain =~ @ipv4_regex -> :ok
true -> {:error, "invalid domain"}
end
end
defp message(opts, key \\ :message, default) do
Keyword.get(opts, key, default)
end
end
| 43.050279 | 677 | 0.610174 |
9e576d401fe5d53dab8cf48a8cc955ecc7df7824 | 2,060 | exs | Elixir | talks-articles/languages-n-runtimes/elixir/book--programming-elixir-ge-1.6/chapter-24/chapter-24-exercises.exs | abhishekkr/tutorials_as_code | f355dc62a5025b710ac6d4a6ac2f9610265fad54 | [
"MIT"
] | 37 | 2015-02-01T23:16:39.000Z | 2021-12-22T16:50:48.000Z | talks-articles/languages-n-runtimes/elixir/book--programming-elixir-ge-1.6/chapter-24/chapter-24-exercises.exs | abhishekkr/tutorials_as_code | f355dc62a5025b710ac6d4a6ac2f9610265fad54 | [
"MIT"
] | 1 | 2017-03-02T04:55:48.000Z | 2018-01-14T10:51:11.000Z | talks-articles/languages-n-runtimes/elixir/book--programming-elixir-ge-1.6/chapter-24/chapter-24-exercises.exs | abhishekkr/tutorials_as_code | f355dc62a5025b710ac6d4a6ac2f9610265fad54 | [
"MIT"
] | 15 | 2015-03-02T08:09:01.000Z | 2021-06-10T03:25:41.000Z | defprotocol Caesar do
def encrypt(string, shift)
def rot13(string)
end
defimpl Caesar, for: List do
@lowercase 97..122
@uppercase 65..90
def encrypt(list, shift), do: list |> Enum.map(&do_encrypt(&1, shift))
def rot13(list), do: encrypt(list, 13)
defp do_encrypt(char, shift) when char in @lowercase and char+shift < 123 do
char + shift
end
defp do_encrypt(char, shift) when char in @lowercase do
96 + (char + shift - 122)
end
defp do_encrypt(char, shift) when char in @uppercase and char+shift < 91 do
char + shift
end
defp do_encrypt(char, shift) when char in @uppercase do
64 + (char + shift - 90)
end
defp do_encrypt(char, _shift), do: char
end
defimpl Caesar, for: BitString do
def encrypt(string, shift) do
String.to_char_list(string)
|> Caesar.List.encrypt(shift)
|> List.to_string
end
def rot13(string), do:
encrypt(string, 13)
end
defmodule Exercise3 do
import(Enum, only: [reduce: 3])
def each(list, foo) do
reduce(list, :ok, fn x, acc ->
foo.(x)
acc
end)
end
def filter(list, foo) do
reduce(list, [], fn x, acc ->
if foo.(x) do
[x | acc]
else
acc
end
end) |> do_reverse([])
end
def map(list, foo) do
reduce(list, [], fn x, acc ->
[foo.(x) | acc]
end) |> do_reverse([])
end
defp do_reverse([], revlst), do: revlst
defp do_reverse([h|tail], revlst), do: do_reverse(tail, [h| revlst])
end
ExUnit.start
defmodule CaesarTest do
use ExUnit.Case
test "list implementation" do
assert Caesar.rot13('Make It Rain') == 'Znxr Vg Enva'
end
test "bitstring implementation" do
assert Caesar.rot13("Make It Rain") == "Znxr Vg Enva"
end
end
defmodule Exercise3Test do
use ExUnit.Case
test "#each" do
assert Exercise3.each([1,10], &IO.inspect/1) == :ok
end
test "#filter" do
assert Exercise3.filter([1,10], &(&1<10)) == [1]
end
test "#map" do
assert Exercise3.map([1,10], &(&1+1)) == [2,11]
end
end
| 20.39604 | 78 | 0.613592 |
9e57733d552f34195d0f7138a904266935bb6eb3 | 606 | exs | Elixir | apps/transform_wkt/mix.exs | kennyatpillar/hindsight | e90e2150a14218e5d6fdf5874f57eb055fd2dd07 | [
"Apache-2.0"
] | null | null | null | apps/transform_wkt/mix.exs | kennyatpillar/hindsight | e90e2150a14218e5d6fdf5874f57eb055fd2dd07 | [
"Apache-2.0"
] | null | null | null | apps/transform_wkt/mix.exs | kennyatpillar/hindsight | e90e2150a14218e5d6fdf5874f57eb055fd2dd07 | [
"Apache-2.0"
] | null | null | null | defmodule Transform.WKT.MixProject do
use Mix.Project
def project do
[
app: :transform_wkt,
version: "0.1.0",
build_path: "../../_build",
config_path: "../../config/config.exs",
deps_path: "../../deps",
lockfile: "../../mix.lock",
elixir: "~> 1.9",
start_permanent: Mix.env() == :prod,
deps: deps()
]
end
def application do
[
extra_applications: [:logger]
]
end
defp deps do
[
{:transform_step, in_umbrella: true},
{:definition_dictionary, in_umbrella: true},
{:geo, "~> 3.3"}
]
end
end
| 18.9375 | 50 | 0.539604 |
9e578074cf9341a5fd882edbc46569fe90bef87f | 451 | ex | Elixir | web/router.ex | jwdotjs/battlestation | 9552cf2af11f5d5420ce90947ca58d9cca2f7c4a | [
"MIT"
] | null | null | null | web/router.ex | jwdotjs/battlestation | 9552cf2af11f5d5420ce90947ca58d9cca2f7c4a | [
"MIT"
] | null | null | null | web/router.ex | jwdotjs/battlestation | 9552cf2af11f5d5420ce90947ca58d9cca2f7c4a | [
"MIT"
] | null | null | null | defmodule Battlestation.Router do
use Battlestation.Web, :router
pipeline :browser do
plug :accepts, ["html"]
plug :fetch_session
plug :fetch_flash
plug :protect_from_forgery
plug :put_secure_browser_headers
end
scope "/", Battlestation do
pipe_through :browser # Use the default browser stack
get "/", PageController, :index
post "/", PageController, :create
put "/", PageController, :update
end
end
| 21.47619 | 57 | 0.698448 |
9e5786f018bf929acb9f6ae16705e4624b3339cf | 682 | ex | Elixir | lib/churn/renderer/console.ex | patrykwozinski/churn | 5a5ad6442da8ae0dc46064d4f80e5404c397d90d | [
"MIT"
] | 78 | 2021-05-18T09:55:12.000Z | 2022-03-02T22:11:53.000Z | lib/churn/renderer/console.ex | patrykwozinski/churn | 5a5ad6442da8ae0dc46064d4f80e5404c397d90d | [
"MIT"
] | 14 | 2021-05-12T23:00:43.000Z | 2022-03-02T08:16:01.000Z | lib/churn/renderer/console.ex | patrykwozinski/churn | 5a5ad6442da8ae0dc46064d4f80e5404c397d90d | [
"MIT"
] | 2 | 2021-05-20T14:54:08.000Z | 2021-06-08T18:01:40.000Z | defmodule Churn.Renderer.Console do
@moduledoc false
alias Churn.Processor.Result
@title "Churn :: Refactoring candidates"
@spec render([Result.t()]) :: :ok
def render(results) do
header = ["File", "Times Changed", "Complexity", "Score"]
results
|> Enum.map(fn result ->
[result.file.path, result.times_changed, result.complexity, result.score]
end)
|> do_render(header, @title)
end
defp do_render([] = _results, _header, _title) do
IO.puts "(No results. Please try again with different options.)"
end
defp do_render(results, header, title) do
results
|> TableRex.quick_render!(header, title)
|> IO.puts()
end
end
| 23.517241 | 79 | 0.665689 |
9e578cee03e4adb93245e877df0272ad3804f420 | 1,779 | ex | Elixir | clients/retail/lib/google_api/retail/v2/model/google_cloud_retail_v2_product_inline_source.ex | renovate-bot/elixir-google-api | 1da34cd39b670c99f067011e05ab90af93fef1f6 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/retail/lib/google_api/retail/v2/model/google_cloud_retail_v2_product_inline_source.ex | swansoffiee/elixir-google-api | 9ea6d39f273fb430634788c258b3189d3613dde0 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/retail/lib/google_api/retail/v2/model/google_cloud_retail_v2_product_inline_source.ex | dazuma/elixir-google-api | 6a9897168008efe07a6081d2326735fe332e522c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Retail.V2.Model.GoogleCloudRetailV2ProductInlineSource do
@moduledoc """
The inline source for the input config for ImportProducts method.
## Attributes
* `products` (*type:* `list(GoogleApi.Retail.V2.Model.GoogleCloudRetailV2Product.t)`, *default:* `nil`) - Required. A list of products to update/create. Each product must have a valid Product.id. Recommended max of 100 items.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:products => list(GoogleApi.Retail.V2.Model.GoogleCloudRetailV2Product.t()) | nil
}
field(:products, as: GoogleApi.Retail.V2.Model.GoogleCloudRetailV2Product, type: :list)
end
defimpl Poison.Decoder, for: GoogleApi.Retail.V2.Model.GoogleCloudRetailV2ProductInlineSource do
def decode(value, options) do
GoogleApi.Retail.V2.Model.GoogleCloudRetailV2ProductInlineSource.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Retail.V2.Model.GoogleCloudRetailV2ProductInlineSource do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 37.851064 | 229 | 0.76335 |
9e57ec03a6206410f4b92a098f721a50127e9c84 | 192 | exs | Elixir | priv/repo/migrations/20180411191500_add_whitelist.exs | teaearlgraycold/HippoGuesser | f47fb0636c841ce9b579c07e423c980b17cb9965 | [
"MIT"
] | null | null | null | priv/repo/migrations/20180411191500_add_whitelist.exs | teaearlgraycold/HippoGuesser | f47fb0636c841ce9b579c07e423c980b17cb9965 | [
"MIT"
] | null | null | null | priv/repo/migrations/20180411191500_add_whitelist.exs | teaearlgraycold/HippoGuesser | f47fb0636c841ce9b579c07e423c980b17cb9965 | [
"MIT"
] | null | null | null | defmodule Mtpo.Repo.Migrations.AddWhitelist do
use Ecto.Migration
def change do
alter table(:users) do
add :whitelisted, :boolean, default: false, null: false
end
end
end
| 19.2 | 61 | 0.708333 |
9e581412e80df8c69bdd840ef0ed3a8eb0aba669 | 6,266 | exs | Elixir | test/paddle/subscription/subscription_test.exs | imricardoramos/paddlex | 5880a0bca6aa7801f767f586ac0821014e52299c | [
"MIT"
] | 7 | 2021-09-27T10:10:30.000Z | 2022-03-31T09:44:35.000Z | test/paddle/subscription/subscription_test.exs | imricardoramos/paddlex | 5880a0bca6aa7801f767f586ac0821014e52299c | [
"MIT"
] | 2 | 2021-09-29T16:57:41.000Z | 2022-01-25T12:43:26.000Z | test/paddle/subscription/subscription_test.exs | imricardoramos/paddlex | 5880a0bca6aa7801f767f586ac0821014e52299c | [
"MIT"
] | 2 | 2021-09-27T10:12:40.000Z | 2022-01-24T21:28:20.000Z | defmodule Paddle.SubscriptionTest do
use ExUnit.Case
setup do
bypass = Bypass.open(port: 12_345)
{:ok, bypass: bypass}
end
test "lists users", %{bypass: bypass} do
Bypass.expect(bypass, fn conn ->
Plug.Conn.resp(conn, 200, ~s(
{
"success": true,
"response": [
{
"subscription_id": 502198,
"plan_id": 496199,
"user_id": 285846,
"user_email": "[email protected]",
"marketing_consent": true,
"update_url": "https://checkout.paddle.com/subscription/update?user=12345&subscription=87654321&hash=eyJpdiI6Ik1RTE1nbHpXQmtJUG5...",
"cancel_url": "https://checkout.paddle.com/subscription/cancel?user=12345&subscription=87654321&hash=eyJpdiI6IlU0Nk5cL1JZeHQyTXd...",
"state": "active",
"signup_date": "2015-10-06 09:44:23",
"last_payment": {
"amount": 5,
"currency": "USD",
"date": "2015-10-06"
},
"payment_information": {
"payment_method": "card",
"card_type": "visa",
"last_four_digits": "1111",
"expiry_date": "02/2020"
},
"next_payment": {
"amount": 10,
"currency": "USD",
"date": "2015-11-06"
}
}
]
}
))
end)
assert {:ok,
[
%Paddle.Subscription{
subscription_id: 502_198,
plan_id: 496_199,
user_id: 285_846,
user_email: "[email protected]",
marketing_consent: true,
update_url:
"https://checkout.paddle.com/subscription/update?user=12345&subscription=87654321&hash=eyJpdiI6Ik1RTE1nbHpXQmtJUG5...",
cancel_url:
"https://checkout.paddle.com/subscription/cancel?user=12345&subscription=87654321&hash=eyJpdiI6IlU0Nk5cL1JZeHQyTXd...",
state: "active",
signup_date: ~U"2015-10-06 09:44:23Z",
last_payment: %{
"amount" => 5,
"currency" => "USD",
"date" => ~D"2015-10-06"
},
payment_information: %{
"payment_method" => "card",
"card_type" => "visa",
"last_four_digits" => "1111",
"expiry_date" => "02/2020"
},
next_payment: %{
"amount" => 10,
"currency" => "USD",
"date" => ~D"2015-11-06"
}
}
]} == Paddle.Subscription.list()
end
test "update user", %{bypass: bypass} do
Bypass.expect(bypass, fn conn ->
Plug.Conn.resp(conn, 200, ~s(
{
"success": true,
"response": {
"subscription_id": 12345,
"user_id": 425123,
"plan_id": 525123,
"next_payment": {
"amount": 144.06,
"currency": "GBP",
"date": "2018-02-15"
}
}
}
))
end)
params = %{
bill_immediately: true,
plan_id: 525_123,
prorate: true,
keep_modifiers: true,
passthrough: true,
pause: true
}
assert {:ok,
%{
subscription_id: 12_345,
user_id: 425_123,
plan_id: 525_123,
next_payment: %{
"amount" => 144.06,
"currency" => "GBP",
"date" => ~D"2018-02-15"
}
}} == Paddle.Subscription.update(12_345, params)
end
test "cancel_user", %{bypass: bypass} do
Bypass.expect(bypass, fn conn ->
Plug.Conn.resp(conn, 200, ~s(
{
"success": true
}
))
end)
assert {:ok, nil} == Paddle.Subscription.cancel(12_345)
end
test "do not attempt to convert dates for nil values", %{bypass: bypass} do
Bypass.expect(bypass, fn conn ->
Plug.Conn.resp(conn, 200, ~s(
{
"success": true,
"response": [
{
"subscription_id": 502198,
"plan_id": 496199,
"user_id": 285846,
"user_email": "[email protected]",
"marketing_consent": true,
"update_url": "https://checkout.paddle.com/subscription/update?user=12345&subscription=87654321&hash=eyJpdiI6Ik1RTE1nbHpXQmtJUG5...",
"cancel_url": "https://checkout.paddle.com/subscription/cancel?user=12345&subscription=87654321&hash=eyJpdiI6IlU0Nk5cL1JZeHQyTXd...",
"state": "active",
"signup_date": "2015-10-06 09:44:23",
"last_payment": null,
"payment_information": {
"payment_method": "card",
"card_type": "visa",
"last_four_digits": "1111",
"expiry_date": "02/2020"
},
"next_payment": null
}
]
}
))
end)
assert {:ok,
[
%Paddle.Subscription{
subscription_id: 502_198,
plan_id: 496_199,
user_id: 285_846,
user_email: "[email protected]",
marketing_consent: true,
update_url:
"https://checkout.paddle.com/subscription/update?user=12345&subscription=87654321&hash=eyJpdiI6Ik1RTE1nbHpXQmtJUG5...",
cancel_url:
"https://checkout.paddle.com/subscription/cancel?user=12345&subscription=87654321&hash=eyJpdiI6IlU0Nk5cL1JZeHQyTXd...",
state: "active",
signup_date: ~U"2015-10-06 09:44:23Z",
last_payment: nil,
payment_information: %{
"payment_method" => "card",
"card_type" => "visa",
"last_four_digits" => "1111",
"expiry_date" => "02/2020"
},
next_payment: nil
}
]} == Paddle.Subscription.list()
end
end
| 32.978947 | 147 | 0.471433 |
9e581d4f038238671dc62ecd3488634e45a015a6 | 31 | ex | Elixir | lib/mqtt_udp_sink_ex.ex | yeongsheng-tan/mqtt_udp_sink_ex | 6ddc9914ac2bc54a6da46932525de4da9feb1356 | [
"BSD-3-Clause"
] | null | null | null | lib/mqtt_udp_sink_ex.ex | yeongsheng-tan/mqtt_udp_sink_ex | 6ddc9914ac2bc54a6da46932525de4da9feb1356 | [
"BSD-3-Clause"
] | null | null | null | lib/mqtt_udp_sink_ex.ex | yeongsheng-tan/mqtt_udp_sink_ex | 6ddc9914ac2bc54a6da46932525de4da9feb1356 | [
"BSD-3-Clause"
] | null | null | null | defmodule MqttUdpSinkEx do
end
| 10.333333 | 26 | 0.870968 |
9e5853f31e73be8c5a61e6100d646118e70d43b6 | 428 | exs | Elixir | test/models/post_test.exs | joonatank/darfey | 71ba0a41795bf97c35e940a4659dfbdf4805c46f | [
"MIT"
] | null | null | null | test/models/post_test.exs | joonatank/darfey | 71ba0a41795bf97c35e940a4659dfbdf4805c46f | [
"MIT"
] | null | null | null | test/models/post_test.exs | joonatank/darfey | 71ba0a41795bf97c35e940a4659dfbdf4805c46f | [
"MIT"
] | null | null | null | defmodule Darfey.PostTest do
use Darfey.DataCase
alias DarfeyWeb.Post
@valid_attrs %{body: "some body", title: "some title"}
@invalid_attrs %{}
test "changeset with valid attributes" do
changeset = Post.changeset(%Post{}, @valid_attrs)
assert changeset.valid?
end
test "changeset with invalid attributes" do
changeset = Post.changeset(%Post{}, @invalid_attrs)
refute changeset.valid?
end
end
| 22.526316 | 56 | 0.712617 |
9e587d87bf02b0ca08e641786ab20890effecb47 | 321 | exs | Elixir | PatternMatching/patternmatching.exs | MiyamotoAkira/IntoAdventurePresentation | a41d480bcb922f2950917d7486f159eccd3f5a12 | [
"MIT"
] | null | null | null | PatternMatching/patternmatching.exs | MiyamotoAkira/IntoAdventurePresentation | a41d480bcb922f2950917d7486f159eccd3f5a12 | [
"MIT"
] | null | null | null | PatternMatching/patternmatching.exs | MiyamotoAkira/IntoAdventurePresentation | a41d480bcb922f2950917d7486f159eccd3f5a12 | [
"MIT"
] | null | null | null | defmodule Person do
defstruct name: "Araceli", age: 70
def salute(%Person{name: name, age: age}) when name == "Araceli" do
IO.puts "Happy #{age}th Birthday"
end
def salute(%Person{name: name}) when name != "" do
IO.puts "I salute you, #{name}"
end
def salute(%Person{}) do
IO.puts "Hey Stranger"
end
end
| 20.0625 | 68 | 0.657321 |
9e5897aeb6733e95c795fa1fdd3faf67a2f4d053 | 1,392 | ex | Elixir | demo/elixir.ex | virejdasani/HydroxyTheme | 8c22c679c754226c8d5f9d18fcd35f36804f00bf | [
"MIT"
] | 7 | 2021-04-10T05:29:40.000Z | 2021-07-27T02:58:51.000Z | demo/elixir.ex | virejdasani/Hydroxy | 8af57425136ed8035d769c2667ee3f889465ea6a | [
"MIT"
] | 2 | 2021-08-16T08:41:17.000Z | 2021-09-02T18:05:20.000Z | demo/elixir.ex | greven/umbra-vscode-theme | 20a3a545e555785dba74e7d8aa377bf327963f57 | [
"MIT"
] | 3 | 2022-01-08T04:16:32.000Z | 2022-01-24T00:31:36.000Z | # Taken from Ecto Github Repo:
# https://github.com/elixir-ecto/ecto/blob/master/lib/mix/tasks/ecto.create.ex
defmodule Mix.Tasks.Ecto.Create do
use Mix.Task
import Mix.Ecto
@shortdoc "Creates the repository storage"
@switches [
quiet: :boolean,
repo: [:string, :keep],
no_compile: :boolean,
no_deps_check: :boolean
]
@aliases [
r: :repo,
q: :quiet
]
@doc false
def run(args) do
repos = parse_repo(args)
{opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases)
Enum.each(repos, fn repo ->
ensure_repo(repo, args)
ensure_implements(
repo.__adapter__,
Ecto.Adapter.Storage,
"create storage for #{inspect(repo)}"
)
case repo.__adapter__.storage_up(repo.config) do
:ok ->
unless opts[:quiet] do
Mix.shell().info("The database for #{inspect(repo)} has been created")
end
{:error, :already_up} ->
unless opts[:quiet] do
Mix.shell().info("The database for #{inspect(repo)} has already been created")
end
{:error, term} when is_binary(term) ->
Mix.raise("The database for #{inspect(repo)} couldn't be created: #{term}")
{:error, term} ->
Mix.raise("The database for #{inspect(repo)} couldn't be created: #{inspect(term)}")
end
end)
end
end
| 24.857143 | 94 | 0.599856 |
9e589c230e59bfc3f467d84270b8857a11b23c6d | 4,885 | exs | Elixir | test/phxcrd_web/controllers/authority_controller_test.exs | spapas/phxcrd | 84877896f56400b0cc8624fe96c4fe4f5fd8053c | [
"MIT"
] | 18 | 2019-06-21T09:55:46.000Z | 2022-02-16T18:44:17.000Z | test/phxcrd_web/controllers/authority_controller_test.exs | spapas/phxcrd | 84877896f56400b0cc8624fe96c4fe4f5fd8053c | [
"MIT"
] | 16 | 2019-05-21T20:19:44.000Z | 2020-05-12T08:30:42.000Z | test/phxcrd_web/controllers/authority_controller_test.exs | spapas/phxcrd | 84877896f56400b0cc8624fe96c4fe4f5fd8053c | [
"MIT"
] | 2 | 2019-09-05T00:35:27.000Z | 2020-10-17T16:36:35.000Z | defmodule PhxcrdWeb.AuthorityControllerTest do
use PhxcrdWeb.ConnCase
alias Phxcrd.Auth
@create_attrs %{name: "some name", authority_kind_id: 1}
@update_attrs %{name: "some updated name"}
@invalid_attrs %{name: nil}
def fixture(:authority) do
{:ok, authority_kind} = Auth.create_authority_kind(%{name: "some name"})
{:ok, authority} =
Auth.create_authority(%{@create_attrs | authority_kind_id: authority_kind.id})
authority
end
def fixture(:authority_kind) do
{:ok, authority_kind} = Auth.create_authority_kind(%{name: "some name2"})
authority_kind
end
def fixture(:user) do
{:ok, user} =
Auth.create_db_user(%{
name: "some name",
first_name: "some name",
last_name: "some name",
username: "username",
password: "pwd",
email: "email"
})
user
end
defp fake_sign_in(conn, user_id \\ 1) do
conn
|> Plug.Test.init_test_session(%{})
|> Plug.Conn.put_session(:permissions, ["admin"])
|> Plug.Conn.put_session(:user_signed_in?, true)
|> Plug.Conn.put_session(:user_id, user_id)
|> Plug.Conn.put_session(:username, "test")
end
describe "index" do
test "lists all authorities", %{conn: conn} do
conn = get(conn |> fake_sign_in, "/admin/authorities")
# conn = get(conn |> fake_sign_in, AdminRoutes.authority_path(conn, :index))
assert html_response(conn, 200) =~ "Authority list"
end
test "does not allow anonymous access", %{conn: conn} do
conn = get(conn, AdminRoutes.authority_path(conn, :index))
assert html_response(conn, 302) =~ "redirected"
end
test "does not allow access without permissions", %{conn: conn} do
conn =
conn
|> Plug.Test.init_test_session(%{})
|> Plug.Conn.put_session(:permissions, [])
|> Plug.Conn.put_session(:user_signed_in?, true)
|> Plug.Conn.put_session(:user_id, 1)
|> Plug.Conn.put_session(:username, "test")
conn = get(conn, AdminRoutes.authority_path(conn, :index))
assert html_response(conn, 302) =~ "redirected"
end
end
describe "new authority" do
test "renders form", %{conn: conn} do
conn = get(conn |> fake_sign_in, AdminRoutes.authority_path(conn, :new))
assert html_response(conn, 200) =~ "New authority"
end
end
describe "create authority" do
setup [:create_authority_kind, :create_user]
test "redirects to show when data is valid", %{
conn: conn,
authority_kind: authority_kind,
user: user
} do
conn =
post(conn |> fake_sign_in(user.id), AdminRoutes.authority_path(conn, :create),
authority: %{@create_attrs | authority_kind_id: authority_kind.id}
)
assert %{id: id} = redirected_params(conn)
assert redirected_to(conn) == AdminRoutes.authority_path(conn, :show, id)
conn = get(conn, AdminRoutes.authority_path(conn, :show, id))
assert html_response(conn, 200) =~ "Authority"
end
test "renders errors when data is invalid", %{conn: conn} do
conn =
post(conn |> fake_sign_in, AdminRoutes.authority_path(conn, :create),
authority: @invalid_attrs
)
assert html_response(conn, 200) =~ "New authority"
end
end
describe "edit authority" do
setup [:create_authority]
test "renders form for editing chosen authority", %{
conn: conn,
authority: authority
} do
conn =
get(
conn |> fake_sign_in,
AdminRoutes.authority_path(conn, :edit, authority)
)
assert html_response(conn, 200) =~ "Edit authority"
end
end
describe "update authority" do
setup [:create_authority, :create_user]
test "redirects when data is valid", %{
conn: conn,
authority: authority,
user: %{id: user_id}
} do
conn =
put(conn |> fake_sign_in(user_id), AdminRoutes.authority_path(conn, :update, authority),
authority: @update_attrs
)
assert redirected_to(conn) == AdminRoutes.authority_path(conn, :show, authority)
conn = get(conn, AdminRoutes.authority_path(conn, :show, authority))
assert html_response(conn, 200) =~ "some updated name"
end
test "renders errors when data is invalid", %{conn: conn, authority: authority} do
conn =
put(conn |> fake_sign_in, AdminRoutes.authority_path(conn, :update, authority),
authority: @invalid_attrs
)
assert html_response(conn, 200) =~ "Edit authority"
end
end
defp create_authority(_) do
authority = fixture(:authority)
{:ok, authority: authority}
end
defp create_authority_kind(_) do
authority_kind = fixture(:authority_kind)
{:ok, authority_kind: authority_kind}
end
defp create_user(_) do
user = fixture(:user)
{:ok, user: user}
end
end
| 28.735294 | 96 | 0.640942 |
9e589f87d4aa426c65eee9e1962586fad83285c0 | 236 | exs | Elixir | priv/repo/migrations/20210630144545_add_status_to_sms_messages.exs | bikebrigade/dispatch | eb622fe4f6dab7c917d678d3d7a322a01f97da44 | [
"Apache-2.0"
] | 28 | 2021-10-11T01:53:53.000Z | 2022-03-24T17:45:55.000Z | priv/repo/migrations/20210630144545_add_status_to_sms_messages.exs | bikebrigade/dispatch | eb622fe4f6dab7c917d678d3d7a322a01f97da44 | [
"Apache-2.0"
] | 20 | 2021-10-21T08:12:31.000Z | 2022-03-31T13:35:53.000Z | priv/repo/migrations/20210630144545_add_status_to_sms_messages.exs | bikebrigade/dispatch | eb622fe4f6dab7c917d678d3d7a322a01f97da44 | [
"Apache-2.0"
] | null | null | null | defmodule BikeBrigade.Repo.Migrations.AddStatusToSmsMessages do
use Ecto.Migration
def change do
alter table(:sms_messages) do
add :twilio_status, :string
end
create index(:sms_messages, [:twilio_sid])
end
end
| 19.666667 | 63 | 0.733051 |
9e58b6ae28ac952309e1e1cf620916940f1dfe55 | 3,207 | exs | Elixir | test/supervisor_test.exs | LaudateCorpus1/gen_registry | 74246ae4e4d9b3c330bacc529c9dd32851378446 | [
"MIT"
] | 39 | 2020-04-11T07:21:23.000Z | 2022-02-03T13:17:59.000Z | test/supervisor_test.exs | LaudateCorpus1/gen_registry | 74246ae4e4d9b3c330bacc529c9dd32851378446 | [
"MIT"
] | 2 | 2019-06-21T17:47:11.000Z | 2019-06-21T19:10:35.000Z | test/supervisor_test.exs | LaudateCorpus1/gen_registry | 74246ae4e4d9b3c330bacc529c9dd32851378446 | [
"MIT"
] | 7 | 2021-01-19T00:00:10.000Z | 2022-03-17T09:51:10.000Z | defmodule Supervisor.Test do
use ExUnit.Case
describe "pre-1.5 specs" do
test "valid spec" do
children = [
GenRegistry.Spec.child_spec(ExampleWorker)
]
{:ok, supervisor_pid} = Supervisor.start_link(children, strategy: :one_for_one)
assert Supervisor.count_children(supervisor_pid) == %{
active: 1,
specs: 1,
supervisors: 1,
workers: 0
}
assert [{ExampleWorker, _, :supervisor, _}] = Supervisor.which_children(supervisor_pid)
Supervisor.stop(supervisor_pid)
end
test "can customize the name and run multiple registries for the same module" do
children = [
GenRegistry.Spec.child_spec(ExampleWorker, name: ExampleWorker.A),
GenRegistry.Spec.child_spec(ExampleWorker, name: ExampleWorker.B),
]
{:ok, supervisor_pid} = Supervisor.start_link(children, strategy: :one_for_one)
assert Supervisor.count_children(supervisor_pid) == %{
active: 2,
specs: 2,
supervisors: 2,
workers: 0
}
children = Supervisor.which_children(supervisor_pid)
assert Enum.find(children, &match?({ExampleWorker.A, _, :supervisor, _}, &1))
assert Enum.find(children, &match?({ExampleWorker.B, _, :supervisor, _}, &1))
Supervisor.stop(supervisor_pid)
end
end
describe "modern specs" do
test "invalid spec, no arguments" do
assert_raise KeyError, "key :worker_module not found in: []", fn ->
children = [
GenRegistry
]
Supervisor.start_link(children, strategy: :one_for_one)
end
end
test "invalid spec, no :worker_module argument" do
assert_raise KeyError, "key :worker_module not found in: [test_key: :test_value]", fn ->
children = [
{GenRegistry, test_key: :test_value}
]
Supervisor.start_link(children, strategy: :one_for_one)
end
end
test "valid spec" do
children = [
{GenRegistry, worker_module: ExampleWorker}
]
{:ok, supervisor_pid} = Supervisor.start_link(children, strategy: :one_for_one)
assert Supervisor.count_children(supervisor_pid) == %{
active: 1,
specs: 1,
supervisors: 1,
workers: 0
}
assert [{ExampleWorker, _, :supervisor, _}] = Supervisor.which_children(supervisor_pid)
Supervisor.stop(supervisor_pid)
end
test "can customize the name and run multiple registries for the same module" do
children = [
{GenRegistry, worker_module: ExampleWorker, name: ExampleWorker.A},
{GenRegistry, worker_module: ExampleWorker, name: ExampleWorker.B},
]
{:ok, supervisor_pid} = Supervisor.start_link(children, strategy: :one_for_one)
assert Supervisor.count_children(supervisor_pid) == %{
active: 2,
specs: 2,
supervisors: 2,
workers: 0
}
children = Supervisor.which_children(supervisor_pid)
assert Enum.find(children, &match?({ExampleWorker.A, _, :supervisor, _}, &1))
assert Enum.find(children, &match?({ExampleWorker.B, _, :supervisor, _}, &1))
Supervisor.stop(supervisor_pid)
end
end
end
| 28.633929 | 94 | 0.639539 |
9e58c1e6364e2d6e8986d7e808afec26a988b61d | 4,418 | ex | Elixir | installer/lib/phx_new/web.ex | shritesh/phoenix | 4bf53ecaae5a9057ea57c248964490dfdee312af | [
"MIT"
] | null | null | null | installer/lib/phx_new/web.ex | shritesh/phoenix | 4bf53ecaae5a9057ea57c248964490dfdee312af | [
"MIT"
] | 1 | 2021-11-17T12:10:06.000Z | 2021-11-24T12:53:45.000Z | installer/lib/phx_new/web.ex | shritesh/phoenix | 4bf53ecaae5a9057ea57c248964490dfdee312af | [
"MIT"
] | null | null | null | defmodule Phx.New.Web do
@moduledoc false
use Phx.New.Generator
alias Phx.New.{Project}
@pre "phx_umbrella/apps/app_name_web"
template :new, [
{:config, "#{@pre}/config/config.exs", :project, "config/config.exs"},
{:config, "#{@pre}/config/dev.exs", :project, "config/dev.exs"},
{:config, "#{@pre}/config/prod.exs", :project, "config/prod.exs"},
{:config, "#{@pre}/config/prod.secret.exs", :project, "config/prod.secret.exs"},
{:config, "#{@pre}/config/test.exs", :project, "config/test.exs"},
{:eex, "#{@pre}/lib/app_name.ex", :web, "lib/:web_app.ex"},
{:eex, "#{@pre}/lib/app_name/application.ex", :web, "lib/:web_app/application.ex"},
{:eex, "phx_web/channels/user_socket.ex", :web, "lib/:web_app/channels/user_socket.ex"},
{:keep, "phx_web/controllers", :web, "lib/:web_app/controllers"},
{:eex, "phx_web/endpoint.ex", :web, "lib/:web_app/endpoint.ex"},
{:eex, "phx_web/router.ex", :web, "lib/:web_app/router.ex"},
{:eex, "phx_web/views/error_helpers.ex", :web, "lib/:web_app/views/error_helpers.ex"},
{:eex, "phx_web/views/error_view.ex", :web, "lib/:web_app/views/error_view.ex"},
{:eex, "#{@pre}/mix.exs", :web, "mix.exs"},
{:eex, "#{@pre}/README.md", :web, "README.md"},
{:eex, "#{@pre}/gitignore", :web, ".gitignore"},
{:keep, "phx_test/channels", :web, "test/:web_app/channels"},
{:keep, "phx_test/controllers", :web, "test/:web_app/controllers"},
{:eex, "#{@pre}/test/test_helper.exs", :web, "test/test_helper.exs"},
{:eex, "phx_test/support/channel_case.ex", :web, "test/support/channel_case.ex"},
{:eex, "phx_test/support/conn_case.ex", :web, "test/support/conn_case.ex"},
{:eex, "phx_test/views/error_view_test.exs", :web, "test/:web_app/views/error_view_test.exs"},
{:eex, "#{@pre}/formatter.exs", :web, ".formatter.exs"},
]
template :gettext, [
{:eex, "phx_gettext/gettext.ex", :web, "lib/:web_app/gettext.ex"},
{:eex, "phx_gettext/en/LC_MESSAGES/errors.po", :web, "priv/gettext/en/LC_MESSAGES/errors.po"},
{:eex, "phx_gettext/errors.pot", :web, "priv/gettext/errors.pot"}
]
template :html, [
{:eex, "phx_web/controllers/page_controller.ex", :web, "lib/:web_app/controllers/page_controller.ex"},
{:eex, "phx_web/templates/layout/app.html.eex", :web, "lib/:web_app/templates/layout/app.html.eex"},
{:eex, "phx_web/templates/page/index.html.eex", :web, "lib/:web_app/templates/page/index.html.eex"},
{:eex, "phx_web/views/layout_view.ex", :web, "lib/:web_app/views/layout_view.ex"},
{:eex, "phx_web/views/page_view.ex", :web, "lib/:web_app/views/page_view.ex"},
{:eex, "phx_test/controllers/page_controller_test.exs", :web, "test/:web_app/controllers/page_controller_test.exs"},
{:eex, "phx_test/views/layout_view_test.exs", :web, "test/:web_app/views/layout_view_test.exs"},
{:eex, "phx_test/views/page_view_test.exs", :web, "test/:web_app/views/page_view_test.exs"},
]
def prepare_project(%Project{app: app} = project) when not is_nil(app) do
web_path = Path.expand(project.base_path)
project_path = Path.dirname(Path.dirname(web_path))
%Project{project |
in_umbrella?: true,
project_path: project_path,
web_path: web_path,
web_app: app,
generators: [context_app: false],
web_namespace: project.app_mod}
end
def generate(%Project{} = project) do
inject_umbrella_config_defaults(project)
copy_from project, __MODULE__, :new
copy_from project, __MODULE__, :gettext
if Project.html?(project), do: gen_html(project)
case {Project.webpack?(project), Project.html?(project)} do
{true, _} -> Phx.New.Single.gen_webpack(project)
{false, true} -> Phx.New.Single.gen_static(project)
{false, false} -> Phx.New.Single.gen_bare(project)
end
project
end
defp gen_html(%Project{} = project) do
copy_from project, __MODULE__, :html
end
end
| 52.595238 | 122 | 0.586238 |
9e58cafb97e4ecd8915104948372c699351bd7b8 | 1,894 | ex | Elixir | processors/IA32/bochs/build/debian/manpage.1.ex | pavel-krivanek/opensmalltalk-vm | 694dfe3ed015e16f5b8e9cf17d37e4bdd32bea16 | [
"MIT"
] | 445 | 2016-06-30T08:19:11.000Z | 2022-03-28T06:09:49.000Z | processors/IA32/bochs/build/debian/manpage.1.ex | pavel-krivanek/opensmalltalk-vm | 694dfe3ed015e16f5b8e9cf17d37e4bdd32bea16 | [
"MIT"
] | 439 | 2016-06-29T20:14:36.000Z | 2022-03-17T19:59:58.000Z | processors/IA32/bochs/build/debian/manpage.1.ex | pavel-krivanek/opensmalltalk-vm | 694dfe3ed015e16f5b8e9cf17d37e4bdd32bea16 | [
"MIT"
] | 137 | 2016-07-02T17:32:07.000Z | 2022-03-20T11:17:25.000Z | .\" Hey, EMACS: -*- nroff -*-
.\" First parameter, NAME, should be all caps
.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection
.\" other parameters are allowed: see man(7), man(1)
.TH BOCHS SECTION "June 5, 2001"
.\" Please adjust this date whenever revising the manpage.
.\"
.\" Some roff macros, for reference:
.\" .nh disable hyphenation
.\" .hy enable hyphenation
.\" .ad l left justify
.\" .ad b justify to both left and right margins
.\" .nf disable filling
.\" .fi enable filling
.\" .br insert line break
.\" .sp <n> insert n+1 empty lines
.\" for manpage-specific macros, see man(7)
.SH NAME
bochs \- program to do something
.SH SYNOPSIS
.B bochs
.RI [ options ] " files" ...
.br
.B bar
.RI [ options ] " files" ...
.SH DESCRIPTION
This manual page documents briefly the
.B bochs
and
.B bar
commands.
This manual page was written for the Debian GNU/Linux distribution
because the original program does not have a manual page.
Instead, it has documentation in the GNU Info format; see below.
.PP
.\" TeX users may be more comfortable with the \fB<whatever>\fP and
.\" \fI<whatever>\fP escape sequences to invode bold face and italics,
.\" respectively.
\fBbochs\fP is a program that...
.SH OPTIONS
These programs follow the usual GNU command line syntax, with long
options starting with two dashes (`-').
A summary of options is included below.
For a complete description, see the Info files.
.TP
.B \-h, \-\-help
Show summary of options.
.TP
.B \-v, \-\-version
Show version of program.
.SH SEE ALSO
.BR bar (1),
.BR baz (1).
.br
The programs are documented fully by
.IR "The Rise and Fall of a Fooish Bar" ,
available via the Info system.
.SH AUTHOR
This manual page was written by Rob Lemley <[email protected]>,
for the Debian GNU/Linux system (but may be used by others).
| 31.04918 | 71 | 0.68321 |
9e58cd24a391b4f59b41f948bc1f3229d1e2b01a | 2,399 | ex | Elixir | clients/vision/lib/google_api/vision/v1/model/google_cloud_vision_v1p2beta1_text_annotation_text_property.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | 1 | 2018-12-03T23:43:10.000Z | 2018-12-03T23:43:10.000Z | clients/vision/lib/google_api/vision/v1/model/google_cloud_vision_v1p2beta1_text_annotation_text_property.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | clients/vision/lib/google_api/vision/v1/model/google_cloud_vision_v1p2beta1_text_annotation_text_property.ex | matehat/elixir-google-api | c1b2523c2c4cdc9e6ca4653ac078c94796b393c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the elixir code generator program.
# Do not edit the class manually.
defmodule GoogleApi.Vision.V1.Model.GoogleCloudVisionV1p2beta1TextAnnotationTextProperty do
@moduledoc """
Additional information detected on the structural component.
## Attributes
* `detectedBreak` (*type:* `GoogleApi.Vision.V1.Model.GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak.t`, *default:* `nil`) - Detected start or end of a text segment.
* `detectedLanguages` (*type:* `list(GoogleApi.Vision.V1.Model.GoogleCloudVisionV1p2beta1TextAnnotationDetectedLanguage.t)`, *default:* `nil`) - A list of detected languages together with confidence.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:detectedBreak =>
GoogleApi.Vision.V1.Model.GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak.t(),
:detectedLanguages =>
list(
GoogleApi.Vision.V1.Model.GoogleCloudVisionV1p2beta1TextAnnotationDetectedLanguage.t()
)
}
field(
:detectedBreak,
as: GoogleApi.Vision.V1.Model.GoogleCloudVisionV1p2beta1TextAnnotationDetectedBreak
)
field(
:detectedLanguages,
as: GoogleApi.Vision.V1.Model.GoogleCloudVisionV1p2beta1TextAnnotationDetectedLanguage,
type: :list
)
end
defimpl Poison.Decoder,
for: GoogleApi.Vision.V1.Model.GoogleCloudVisionV1p2beta1TextAnnotationTextProperty do
def decode(value, options) do
GoogleApi.Vision.V1.Model.GoogleCloudVisionV1p2beta1TextAnnotationTextProperty.decode(
value,
options
)
end
end
defimpl Poison.Encoder,
for: GoogleApi.Vision.V1.Model.GoogleCloudVisionV1p2beta1TextAnnotationTextProperty do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 35.80597 | 203 | 0.755732 |
9e590e1f3a3f8ee5b99cbf06bbfdb6fd5df6fd82 | 1,380 | ex | Elixir | clients/blogger/lib/google_api/blogger/v3/model/user_blogs.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/blogger/lib/google_api/blogger/v3/model/user_blogs.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/blogger/lib/google_api/blogger/v3/model/user_blogs.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Blogger.V3.Model.UserBlogs do
@moduledoc """
The container of blogs for this user.
## Attributes
* `selfLink` (*type:* `String.t`, *default:* `nil`) - The URL of the Blogs for this user.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:selfLink => String.t() | nil
}
field(:selfLink)
end
defimpl Poison.Decoder, for: GoogleApi.Blogger.V3.Model.UserBlogs do
def decode(value, options) do
GoogleApi.Blogger.V3.Model.UserBlogs.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Blogger.V3.Model.UserBlogs do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 29.361702 | 93 | 0.726812 |
9e591d536d26484e00e0234c44cc58403017c001 | 2,397 | ex | Elixir | clients/display_video/lib/google_api/display_video/v1/model/date.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/display_video/lib/google_api/display_video/v1/model/date.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/display_video/lib/google_api/display_video/v1/model/date.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.DisplayVideo.V1.Model.Date do
@moduledoc """
Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values * A month and day value, with a zero year, such as an anniversary * A year on its own, with zero month and day values * A year and month value, with a zero day, such as a credit card expiration date Related types are google.type.TimeOfDay and `google.protobuf.Timestamp`.
## Attributes
* `day` (*type:* `integer()`, *default:* `nil`) - Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
* `month` (*type:* `integer()`, *default:* `nil`) - Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
* `year` (*type:* `integer()`, *default:* `nil`) - Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:day => integer() | nil,
:month => integer() | nil,
:year => integer() | nil
}
field(:day)
field(:month)
field(:year)
end
defimpl Poison.Decoder, for: GoogleApi.DisplayVideo.V1.Model.Date do
def decode(value, options) do
GoogleApi.DisplayVideo.V1.Model.Date.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.DisplayVideo.V1.Model.Date do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 45.226415 | 567 | 0.713809 |
9e593a11b78fe3b3d107bc393def00418fe83d0a | 2,783 | ex | Elixir | lib/algoliax/resources/object.ex | kianmeng/algoliax | 7226c9e7a142f24522a52d390f0a828b85b3abf9 | [
"BSD-2-Clause"
] | 25 | 2020-12-18T10:05:35.000Z | 2022-02-01T13:55:11.000Z | lib/algoliax/resources/object.ex | kianmeng/algoliax | 7226c9e7a142f24522a52d390f0a828b85b3abf9 | [
"BSD-2-Clause"
] | 4 | 2021-03-04T08:45:49.000Z | 2022-03-29T12:07:35.000Z | lib/algoliax/resources/object.ex | kianmeng/algoliax | 7226c9e7a142f24522a52d390f0a828b85b3abf9 | [
"BSD-2-Clause"
] | 4 | 2021-02-05T15:21:44.000Z | 2022-03-02T16:12:15.000Z | defmodule Algoliax.Resources.Object do
@moduledoc false
import Algoliax.Utils, only: [index_name: 2, object_id_attribute: 1]
import Algoliax.Client, only: [request: 1]
alias Algoliax.TemporaryIndexer
def get_object(module, settings, model) do
request(%{
action: :get_object,
url_params: [
index_name: index_name(module, settings),
object_id: get_object_id(module, settings, model)
]
})
end
def save_objects(module, settings, models, opts) do
objects =
Enum.map(models, fn model ->
action = get_action(module, model, opts)
if action do
build_batch_object(module, settings, model, action)
end
end)
|> Enum.reject(&is_nil/1)
call_indexer(:save_objects, module, settings, models, opts)
request(%{
action: :save_objects,
url_params: [index_name: index_name(module, settings)],
body: %{requests: objects}
})
end
def save_object(module, settings, model) do
if apply(module, :to_be_indexed?, [model]) do
object = build_object(module, settings, model)
call_indexer(:save_object, module, settings, model)
request(%{
action: :save_object,
url_params: [index_name: index_name(module, settings), object_id: object.objectID],
body: object
})
else
{:not_indexable, model}
end
end
def delete_object(module, settings, model) do
call_indexer(:delete_object, module, settings, model)
request(%{
action: :delete_object,
url_params: [
index_name: index_name(module, settings),
object_id: get_object_id(module, settings, model)
]
})
end
defp build_batch_object(module, settings, model, "deleteObject" = action) do
%{
action: action,
body: %{objectID: get_object_id(module, settings, model)}
}
end
defp build_batch_object(module, settings, model, action) do
%{
action: action,
body: build_object(module, settings, model)
}
end
defp build_object(module, settings, model) do
apply(module, :build_object, [model])
|> Map.put(:objectID, get_object_id(module, settings, model))
end
defp get_object_id(module, settings, model) do
case apply(module, :get_object_id, [model]) do
:default ->
Map.fetch!(model, object_id_attribute(settings))
value ->
to_string(value)
end
end
defp get_action(module, model, opts) do
if apply(module, :to_be_indexed?, [model]) do
"updateObject"
else
if Keyword.get(opts, :force_delete) do
"deleteObject"
end
end
end
defp call_indexer(action, module, settings, models, opts \\ []) do
TemporaryIndexer.run(action, module, settings, models, opts)
end
end
| 25.53211 | 91 | 0.648581 |
9e596fb1196d9b415c91df63566f87fbcb651d73 | 271 | ex | Elixir | lib/web/models/settings.ex | yknx4/opencov | dc961a41e29b41b0657bc2a64bb67350a65477b8 | [
"MIT"
] | 8 | 2021-08-22T10:37:57.000Z | 2022-01-10T11:27:06.000Z | lib/web/models/settings.ex | yknx4/librecov | dc961a41e29b41b0657bc2a64bb67350a65477b8 | [
"MIT"
] | 109 | 2021-08-20T04:08:04.000Z | 2022-01-03T07:39:18.000Z | lib/web/models/settings.ex | Librecov/librecov | dc961a41e29b41b0657bc2a64bb67350a65477b8 | [
"MIT"
] | null | null | null | defmodule Librecov.Settings do
use Librecov.Web, :model
schema "settings" do
field(:signup_enabled, :boolean, default: false)
field(:restricted_signup_domains, :string, default: "")
field(:default_project_visibility, :string)
timestamps()
end
end
| 22.583333 | 59 | 0.723247 |
9e5976b9725da507d1aa14fefcc80cb5f206cf02 | 2,595 | exs | Elixir | test/mars/track/track_test.exs | jchristopherinc/mars | a109958cb549ede8d983c3af8183d52528a5eaea | [
"MIT"
] | 2 | 2020-08-28T19:17:33.000Z | 2020-09-13T18:49:20.000Z | test/mars/track/track_test.exs | jchristopherinc/mars | a109958cb549ede8d983c3af8183d52528a5eaea | [
"MIT"
] | 5 | 2018-10-28T14:39:26.000Z | 2019-01-31T17:23:36.000Z | test/mars/track/track_test.exs | jchristopherinc/mars | a109958cb549ede8d983c3af8183d52528a5eaea | [
"MIT"
] | null | null | null | defmodule Mars.TrackTest do
use Mars.DataCase
alias Mars.Track
describe "event" do
alias Mars.Track.Event
@valid_attrs %{
app_id: "some app_id",
created_at: "2010-04-17 14:00:00.000000Z",
event: "some event",
message_id: "some message_id"
}
@update_attrs %{
app_id: "some updated app_id",
created_at: "2011-05-18 15:01:01.000000Z",
event: "some updated event",
message_id: "some updated message_id"
}
@invalid_attrs %{app_id: nil, created_at: nil, event: nil, message_id: nil}
def event_fixture(attrs \\ %{}) do
{:ok, event} =
attrs
|> Enum.into(@valid_attrs)
|> Track.create_event()
event
end
test "list_event/0 returns all event" do
event = event_fixture()
assert Track.list_event() == [event]
end
test "get_event!/1 returns the event with given id" do
event = event_fixture()
assert Track.get_event!(event.id) == event
end
test "create_event/1 with valid data creates a event" do
assert {:ok, %Event{} = event} = Track.create_event(@valid_attrs)
assert event.app_id == "some app_id"
assert event.created_at == DateTime.from_naive!(~N[2010-04-17 14:00:00.000000Z], "Etc/UTC")
assert event.event == "some event"
assert event.message_id == "some message_id"
end
test "create_event/1 with invalid data returns error changeset" do
assert {:error, %Ecto.Changeset{}} = Track.create_event(@invalid_attrs)
end
test "update_event/2 with valid data updates the event" do
event = event_fixture()
assert {:ok, %Event{} = event} = Track.update_event(event, @update_attrs)
assert event.app_id == "some updated app_id"
assert event.created_at == DateTime.from_naive!(~N[2011-05-18 15:01:01.000000Z], "Etc/UTC")
assert event.event == "some updated event"
assert event.message_id == "some updated message_id"
end
test "update_event/2 with invalid data returns error changeset" do
event = event_fixture()
assert {:error, %Ecto.Changeset{}} = Track.update_event(event, @invalid_attrs)
assert event == Track.get_event!(event.id)
end
test "delete_event/1 deletes the event" do
event = event_fixture()
assert {:ok, %Event{}} = Track.delete_event(event)
assert_raise Ecto.NoResultsError, fn -> Track.get_event!(event.id) end
end
test "change_event/1 returns a event changeset" do
event = event_fixture()
assert %Ecto.Changeset{} = Track.change_event(event)
end
end
end
| 31.646341 | 97 | 0.650096 |
9e59a65e9551accc8eaae65e25da16350e8bf1a6 | 388 | ex | Elixir | apps/etv_data/lib/repo/schema.ex | sheharyarn/etv | 65b96e436d5fe6fa11bfb293fa676f0bbf2d5121 | [
"MIT"
] | 2 | 2020-01-13T09:34:04.000Z | 2021-05-19T07:51:07.000Z | apps/etv_data/lib/repo/schema.ex | sheharyarn/etv | 65b96e436d5fe6fa11bfb293fa676f0bbf2d5121 | [
"MIT"
] | null | null | null | apps/etv_data/lib/repo/schema.ex | sheharyarn/etv | 65b96e436d5fe6fa11bfb293fa676f0bbf2d5121 | [
"MIT"
] | 2 | 2018-09-01T20:58:11.000Z | 2021-05-19T07:51:11.000Z | defmodule ETV.Data.Repo.Schema do
@moduledoc """
Custom Macro for initializing Ecto Schemas with
sane defaults
"""
defmacro __using__(_opts) do
quote do
use Ecto.Schema
use Ecto.Rut, repo: ETV.Data.Repo
import Ecto.Changeset
require Ecto.Query
alias Ecto.Query
alias ETV.Data.Repo
alias ETV.Data.Repo.Enums
end
end
end
| 16.166667 | 49 | 0.654639 |
9e59af49e0ee3660846f774f017ad3915cce8ff6 | 5,107 | ex | Elixir | lib/jalka2021/accounts/user.ex | kriips/jalka2021 | f4d968e20cae116fd4056bff2f937cd036421977 | [
"MIT"
] | null | null | null | lib/jalka2021/accounts/user.ex | kriips/jalka2021 | f4d968e20cae116fd4056bff2f937cd036421977 | [
"MIT"
] | null | null | null | lib/jalka2021/accounts/user.ex | kriips/jalka2021 | f4d968e20cae116fd4056bff2f937cd036421977 | [
"MIT"
] | null | null | null | defmodule Jalka2021.Accounts.User do
use Ecto.Schema
import Ecto.Changeset
alias Jalka2021.Accounts
@derive {Inspect, except: [:password]}
schema "users" do
field :email, :string
field :name, :string
field :password, :string, virtual: true
field :hashed_password, :string
field :confirmed_at, :naive_datetime
timestamps()
end
@doc """
A user changeset for registration.
It is important to validate the length of both email and password.
Otherwise databases may truncate the email without warnings, which
could lead to unpredictable or insecure behaviour. Long passwords may
also be very expensive to hash for certain algorithms.
## Options
* `:hash_password` - Hashes the password so it can be stored securely
in the database and ensures the password field is cleared to prevent
leaks in the logs. If password hashing is not needed and clearing the
password field is not desired (like when using this changeset for
validations on a LiveView form), this option can be set to `false`.
Defaults to `true`.
"""
def registration_changeset(user, attrs, opts \\ []) do
user
|> cast(attrs, [:name, :password])
|> validate_name()
|> validate_password(opts)
end
@doc false
def changeset(user, attrs) do
user
|> cast(attrs, [:name, :password, :group_score, :playoff_score])
|> unique_constraint(:name)
|> validate_required([:name, :password])
end
defp validate_name(changeset) do
changeset
|> validate_required([:name])
|> unsafe_validate_unique(:name, Jalka2021.Repo)
|> unique_constraint(:name)
|> check_whitelist
end
defp validate_email(changeset) do
changeset
|> validate_required([:email])
|> validate_format(:email, ~r/^[^\s]+@[^\s]+$/, message: "must have the @ sign and no spaces")
|> validate_length(:email, max: 160)
|> unsafe_validate_unique(:email, Jalka2021.Repo)
|> unique_constraint(:email)
end
defp validate_password(changeset, opts) do
changeset
|> validate_required([:password])
|> validate_length(:password, min: 5, max: 80)
# |> validate_format(:password, ~r/[a-z]/, message: "at least one lower case character")
# |> validate_format(:password, ~r/[A-Z]/, message: "at least one upper case character")
# |> validate_format(:password, ~r/[!?@#$%^&*_0-9]/, message: "at least one digit or punctuation character")
|> maybe_hash_password(opts)
end
defp maybe_hash_password(changeset, opts) do
hash_password? = Keyword.get(opts, :hash_password, true)
password = get_change(changeset, :password)
if hash_password? && password && changeset.valid? do
changeset
|> put_change(:hashed_password, Bcrypt.hash_pwd_salt(password))
|> delete_change(:password)
else
changeset
end
end
defp check_whitelist(changeset) do
case Accounts.get_allowed_users_exactly_by_name(get_field(changeset, :name)) do
[] -> add_error(changeset, :name, "ei kuulu nimekirja")
_ -> changeset
end
end
@doc """
A user changeset for changing the email.
It requires the email to change otherwise an error is added.
"""
def email_changeset(user, attrs) do
user
|> cast(attrs, [:email])
|> validate_email()
|> case do
%{changes: %{email: _}} = changeset -> changeset
%{} = changeset -> add_error(changeset, :email, "did not change")
end
end
@doc """
A user changeset for changing the password.
## Options
* `:hash_password` - Hashes the password so it can be stored securely
in the database and ensures the password field is cleared to prevent
leaks in the logs. If password hashing is not needed and clearing the
password field is not desired (like when using this changeset for
validations on a LiveView form), this option can be set to `false`.
Defaults to `true`.
"""
def password_changeset(user, attrs, opts \\ []) do
user
|> cast(attrs, [:password])
|> validate_confirmation(:password, message: "does not match password")
|> validate_password(opts)
end
@doc """
Confirms the account by setting `confirmed_at`.
"""
def confirm_changeset(user) do
now = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second)
change(user, confirmed_at: now)
end
@doc """
Verifies the password.
If there is no user or the user doesn't have a password, we call
`Bcrypt.no_user_verify/0` to avoid timing attacks.
"""
def valid_password?(%Jalka2021.Accounts.User{hashed_password: hashed_password}, password)
when is_binary(hashed_password) and byte_size(password) > 0 do
Bcrypt.verify_pass(password, hashed_password)
end
def valid_password?(_, _) do
Bcrypt.no_user_verify()
false
end
@doc """
Validates the current password otherwise adds an error to the changeset.
"""
def validate_current_password(changeset, password) do
if valid_password?(changeset.data, password) do
changeset
else
add_error(changeset, :current_password, "is not valid")
end
end
end
| 30.951515 | 112 | 0.685138 |
9e59c88157bffb6d88dd80f299688a6881585f38 | 1,775 | ex | Elixir | clients/storage/lib/google_api/storage/v1/model/bucket_access_controls.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/storage/lib/google_api/storage/v1/model/bucket_access_controls.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | 1 | 2020-12-18T09:25:12.000Z | 2020-12-18T09:25:12.000Z | clients/storage/lib/google_api/storage/v1/model/bucket_access_controls.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | 1 | 2020-10-04T10:12:44.000Z | 2020-10-04T10:12:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Storage.V1.Model.BucketAccessControls do
@moduledoc """
An access-control list.
## Attributes
* `items` (*type:* `list(GoogleApi.Storage.V1.Model.BucketAccessControl.t)`, *default:* `nil`) - The list of items.
* `kind` (*type:* `String.t`, *default:* `storage#bucketAccessControls`) - The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:items => list(GoogleApi.Storage.V1.Model.BucketAccessControl.t()),
:kind => String.t()
}
field(:items, as: GoogleApi.Storage.V1.Model.BucketAccessControl, type: :list)
field(:kind)
end
defimpl Poison.Decoder, for: GoogleApi.Storage.V1.Model.BucketAccessControls do
def decode(value, options) do
GoogleApi.Storage.V1.Model.BucketAccessControls.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Storage.V1.Model.BucketAccessControls do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 35.5 | 193 | 0.735775 |
9e5a2f228f293f59b744cd379476eb196d830094 | 2,002 | ex | Elixir | clients/compute/lib/google_api/compute/v1/model/region_instance_group_managers_set_target_pools_request.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/compute/lib/google_api/compute/v1/model/region_instance_group_managers_set_target_pools_request.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | 1 | 2020-12-18T09:25:12.000Z | 2020-12-18T09:25:12.000Z | clients/compute/lib/google_api/compute/v1/model/region_instance_group_managers_set_target_pools_request.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | 1 | 2020-10-04T10:12:44.000Z | 2020-10-04T10:12:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Compute.V1.Model.RegionInstanceGroupManagersSetTargetPoolsRequest do
@moduledoc """
## Attributes
* `fingerprint` (*type:* `String.t`, *default:* `nil`) - Fingerprint of the target pools information, which is a hash of the contents. This field is used for optimistic locking when you update the target pool entries. This field is optional.
* `targetPools` (*type:* `list(String.t)`, *default:* `nil`) - The URL of all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:fingerprint => String.t(),
:targetPools => list(String.t())
}
field(:fingerprint)
field(:targetPools, type: :list)
end
defimpl Poison.Decoder,
for: GoogleApi.Compute.V1.Model.RegionInstanceGroupManagersSetTargetPoolsRequest do
def decode(value, options) do
GoogleApi.Compute.V1.Model.RegionInstanceGroupManagersSetTargetPoolsRequest.decode(
value,
options
)
end
end
defimpl Poison.Encoder,
for: GoogleApi.Compute.V1.Model.RegionInstanceGroupManagersSetTargetPoolsRequest do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 36.4 | 251 | 0.745754 |
9e5a3561b13993cfdd3f38b2b1c66155f2df50fc | 3,873 | ex | Elixir | clients/sql_admin/lib/google_api/sql_admin/v1beta4/api/tiers.ex | MMore/elixir-google-api | 0574ec1439d9bbfe22d63965be1681b0f45a94c9 | [
"Apache-2.0"
] | null | null | null | clients/sql_admin/lib/google_api/sql_admin/v1beta4/api/tiers.ex | MMore/elixir-google-api | 0574ec1439d9bbfe22d63965be1681b0f45a94c9 | [
"Apache-2.0"
] | null | null | null | clients/sql_admin/lib/google_api/sql_admin/v1beta4/api/tiers.ex | MMore/elixir-google-api | 0574ec1439d9bbfe22d63965be1681b0f45a94c9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.SQLAdmin.V1beta4.Api.Tiers do
@moduledoc """
API calls for all endpoints tagged `Tiers`.
"""
alias GoogleApi.SQLAdmin.V1beta4.Connection
alias GoogleApi.Gax.{Request, Response}
@library_version Mix.Project.config() |> Keyword.get(:version, "")
@doc """
Lists all available machine types (tiers) for Cloud SQL, for example, `db-custom-1-3840`. For related information, see [Pricing](/sql/pricing).
## Parameters
* `connection` (*type:* `GoogleApi.SQLAdmin.V1beta4.Connection.t`) - Connection to server
* `project` (*type:* `String.t`) - Project ID of the project for which to list tiers.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.SQLAdmin.V1beta4.Model.TiersListResponse{}}` on success
* `{:error, info}` on failure
"""
@spec sql_tiers_list(Tesla.Env.client(), String.t(), keyword(), keyword()) ::
{:ok, GoogleApi.SQLAdmin.V1beta4.Model.TiersListResponse.t()}
| {:ok, Tesla.Env.t()}
| {:ok, list()}
| {:error, any()}
def sql_tiers_list(connection, project, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/sql/v1beta4/projects/{project}/tiers", %{
"project" => URI.encode(project, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.SQLAdmin.V1beta4.Model.TiersListResponse{}])
end
end
| 44.011364 | 196 | 0.650658 |
9e5a4905d1f9846aff869118d7db2d32db724171 | 205 | ex | Elixir | server/apps/boardr_api/lib/boardr_api/api_root/api_root_controller.ex | AlphaHydrae/boardr | 98eed02801f88c065a24bf13051c5cf96270a5f7 | [
"MIT"
] | 1 | 2021-04-08T17:26:27.000Z | 2021-04-08T17:26:27.000Z | server/apps/boardr_api/lib/boardr_api/api_root/api_root_controller.ex | AlphaHydrae/boardr | 98eed02801f88c065a24bf13051c5cf96270a5f7 | [
"MIT"
] | 1 | 2022-02-13T05:50:46.000Z | 2022-02-13T05:50:46.000Z | server/apps/boardr_api/lib/boardr_api/api_root/api_root_controller.ex | AlphaHydrae/boardr | 98eed02801f88c065a24bf13051c5cf96270a5f7 | [
"MIT"
] | null | null | null | defmodule BoardrApi.ApiRootController do
use BoardrApi, :controller
def index(%Conn{} = conn, _params) do
conn
|> put_resp_content_type("application/hal+json")
|> render(:index)
end
end
| 20.5 | 52 | 0.702439 |
9e5add832c842d83683d3bb74df9f3cace0283cc | 1,125 | exs | Elixir | config/config.exs | edenlabllc/translit-ukrainian-latin | be447149785bfbd8e0e70541a1d404e4cc18de4c | [
"MIT"
] | 2 | 2021-02-06T12:01:35.000Z | 2021-05-06T11:41:58.000Z | config/config.exs | edenlabllc/translit | be447149785bfbd8e0e70541a1d404e4cc18de4c | [
"MIT"
] | null | null | null | config/config.exs | edenlabllc/translit | be447149785bfbd8e0e70541a1d404e4cc18de4c | [
"MIT"
] | null | null | null | # This file is responsible for configuring your application
# and its dependencies with the aid of the Mix.Config module.
use Mix.Config
# This configuration is loaded before any dependency and is restricted
# to this project. If another project depends on this project, this
# file won't be loaded nor affect the parent project. For this reason,
# if you want to provide default values for your application for
# 3rd-party users, it should be done in your "mix.exs" file.
# You can configure your application as:
#
# config :translit, key: :value
#
# and access this configuration in your application as:
#
# Application.get_env(:translit, :key)
#
# You can also configure a 3rd-party app:
#
# config :logger, level: :info
#
# It is also possible to import configuration files, relative to this
# directory. For example, you can emulate configuration per environment
# by uncommenting the line below and defining dev.exs, test.exs and such.
# Configuration from the imported file will override the ones defined
# here (which is why it is important to import them last).
#
# import_config "#{Mix.env}.exs"
| 36.290323 | 73 | 0.752 |
9e5ae9149f2007541ef5c629288fa065241c3cb2 | 19,061 | ex | Elixir | lib/mongo/topology_description.ex | IgorPolyakov/mongodb | 8ffce5761590d608ab64b1eccfa9e21426cd38c5 | [
"Apache-2.0"
] | 286 | 2017-06-06T04:21:31.000Z | 2021-09-11T16:37:59.000Z | lib/mongo/topology_description.ex | IgorPolyakov/mongodb | 8ffce5761590d608ab64b1eccfa9e21426cd38c5 | [
"Apache-2.0"
] | 202 | 2017-05-28T13:22:01.000Z | 2020-05-15T20:15:51.000Z | lib/mongo/topology_description.ex | IgorPolyakov/mongodb | 8ffce5761590d608ab64b1eccfa9e21426cd38c5 | [
"Apache-2.0"
] | 120 | 2016-12-16T17:05:12.000Z | 2020-05-15T16:20:17.000Z | defmodule Mongo.TopologyDescription do
@moduledoc false
# This acts as a single topology consisting of many connections, built on top
# of the existing connection API's. It implements the Server Discovery and
# Monitoring specification, along with the `Mongo.ServerMonitor` module.
@wire_protocol_range 0..5
alias Mongo.ServerDescription
alias Mongo.ReadPreference
# see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#topologydescription
@type type ::
:unknown | :single | :replica_set_no_primary | :replica_set_with_primary | :sharded
@type t :: %{
type: type,
set_name: String.t() | nil,
max_set_version: non_neg_integer | nil,
max_election_id: BSON.ObjectId.t(),
servers: %{String.t() => Mongo.ServerDescription.t()},
compatible: boolean,
compatibility_error: String.t() | nil,
local_threshold_ms: non_neg_integer
}
def defaults(map \\ %{}) do
default_servers = %{"localhost:27017" => ServerDescription.defaults(%{})}
Map.merge(
%{
type: :unknown,
set_name: nil,
max_set_version: nil,
max_election_id: nil,
servers: default_servers,
compatible: true,
compatibility_error: nil,
local_threshold_ms: 15
},
map
)
end
@doc """
This method allows you to check if current topology contains the readable server.
"""
def has_readable_server?(topology, opts \\ []) do
{:ok, servers, _, _} = select_servers(topology, :read, opts)
Enum.any?(servers)
end
def has_writable_server?(topology) do
topology.type in [:single, :sharded, :replica_set_with_primary]
end
def update(topology, server_description, num_seeds) do
check_server_supported(topology, server_description, num_seeds)
end
# steps 3-4
def select_servers(topology, type, opts \\ []) do
read_preference =
Keyword.get(opts, :read_preference)
|> ReadPreference.defaults()
if topology[:compatible] == false do
{:error, :invalid_wire_version}
else
{servers, slave_ok, mongos?} =
case topology.type do
:unknown ->
{[], false, false}
:single ->
server = topology.servers |> Map.values() |> Enum.at(0, %{type: :unknown})
{topology.servers, type != :write and server.type != :mongos, server.type == :mongos}
:sharded ->
mongos_servers =
topology.servers
|> Enum.filter(fn {_, server} -> server.type == :mongos end)
{mongos_servers, false, true}
_ ->
case type do
:read ->
{select_replica_set_server(topology, read_preference.mode, read_preference), true,
false}
:write ->
if topology.type == :replica_set_with_primary do
{select_replica_set_server(topology, :primary, ReadPreference.defaults()),
false, false}
else
{[], false, false}
end
end
end
servers =
for {server, _} <- servers do
server
end
{:ok, servers, slave_ok, mongos?}
end
end
## Private Functions
defp select_replica_set_server(topology, :primary, _read_preference) do
Enum.filter(topology.servers, fn {_, server} ->
server.type == :rs_primary
end)
end
defp select_replica_set_server(topology, :primary_preferred, read_preference) do
preferred = select_replica_set_server(topology, :primary, read_preference)
if Enum.empty?(preferred) do
select_replica_set_server(topology, :secondary, read_preference)
else
preferred
end
end
defp select_replica_set_server(topology, :secondary_preferred, read_preference) do
preferred = select_replica_set_server(topology, :secondary, read_preference)
if Enum.empty?(preferred) do
select_replica_set_server(topology, :primary, read_preference)
else
preferred
end
end
defp select_replica_set_server(topology, mode, read_preference)
when mode in [:secondary, :nearest] do
topology.servers
|> Enum.filter(fn {_, server} ->
server.type == :rs_secondary || mode == :nearest
end)
|> Enum.into(%{})
|> filter_out_stale(topology, read_preference.max_staleness_ms)
|> select_tag_sets(read_preference.tag_sets)
|> filter_latency_window(topology.local_threshold_ms)
end
defp filter_out_stale(servers, topology, max_staleness_ms) do
if max_staleness_ms == 0 || max_staleness_ms == nil do
servers
else
extra =
case topology.type do
:replica_set_no_primary ->
{_, server} =
Enum.reduce(servers, {0, nil}, fn {_, server}, {max, max_server} ->
if server.last_write_date > max do
{server.last_write_date, server}
else
{max, max_server}
end
end)
server
:replica_set_with_primary ->
servers
|> Enum.filter(fn {_, server} ->
server.type == :rs_primary
end)
|> Enum.at(0)
end
servers
|> Enum.filter(fn {_, server} ->
case server.type do
:rs_secondary ->
case topology.type do
:replica_set_no_primary ->
staleness =
extra.last_write_date + (server.last_update_time - extra.last_update_time) -
server.last_write_date + topology.heartbeat_frequency_ms
staleness <= max_staleness_ms
:replica_set_with_primary ->
staleness =
extra.last_write_date - server.last_write_date + topology.heartbeat_frequency_ms
staleness <= max_staleness_ms
end
_ ->
true
end
end)
|> Enum.into(%{})
end
end
defp select_tag_sets(servers, tag_sets) do
if Enum.empty?(tag_sets) do
servers
else
tag_sets
|> Enum.reduce_while(servers, fn tag_set, servers ->
new_servers =
Enum.filter(servers, fn {_, server} ->
tag_set_ms = MapSet.new(tag_set)
server_tag_set_ms = MapSet.new(server.tag_set)
MapSet.subset?(tag_set_ms, server_tag_set_ms)
end)
if Enum.empty?(new_servers) do
{:cont, servers}
else
{:halt, new_servers}
end
end)
|> Enum.into(%{})
end
end
defp filter_latency_window(servers, local_threshold_ms) do
if Enum.empty?(servers) do
servers
else
min_server =
servers
|> Enum.min_by(fn {_, server} ->
server.round_trip_time
end)
|> elem(1)
latency_window = min_server.round_trip_time + local_threshold_ms
Enum.filter(servers, fn {_, server} ->
server.round_trip_time <= latency_window
end)
end
end
defp check_server_supported(topology, server_description, num_seeds) do
server_supported_range =
server_description.min_wire_version..server_description.max_wire_version
server_supported? =
Enum.any?(server_supported_range, fn version ->
version in @wire_protocol_range
end)
if server_supported? do
check_for_single_topology(topology, server_description, num_seeds)
else
topology =
topology
|> Map.put(:compatible, false)
|> Map.put(
:compatibility_error,
"Server at #{server_description.address} uses wire protocol " <>
"versions #{server_description.min_wire_version} through " <>
"#{server_description.max_wire_version}, but client only " <>
"supports #{Enum.min(@wire_protocol_range)} through " <>
"#{Enum.max(@wire_protocol_range)}."
)
{[], topology}
end
end
# see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#topologytype-single
defp check_for_single_topology(topology, server_description, num_seeds) do
case topology.type do
:single ->
previous_description = topology.servers |> Map.values() |> hd
{[{previous_description, server_description}],
put_in(topology.servers[server_description.address], server_description)}
_ ->
check_server_in_topology(topology, server_description, num_seeds)
end
end
# see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#other-topologytypes
defp check_server_in_topology(topology, server_description, num_seeds) do
if server_description.address not in Map.keys(topology.servers) do
{[], topology}
else
address = server_description.address
old_description = topology.servers[address]
{actions, topology} =
topology
|> put_in([:servers, address], server_description)
|> update_topology(topology.type, server_description, num_seeds)
{[{old_description, server_description} | actions], topology}
end
end
# see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#topologytype-explanations
defp update_topology(topology, :unknown, server_description, num_seeds) do
case server_description.type do
:unknown ->
{[], %{topology | type: :unknown}}
:rs_ghost ->
{[], %{topology | type: :unknown}}
:standalone ->
update_unknown_with_standalone(topology, server_description, num_seeds)
:mongos ->
{[], %{topology | type: :sharded}}
:rs_primary ->
topology
|> Map.put(:set_name, server_description.set_name)
|> update_rs_from_primary(server_description)
type when type in [:rs_secondary, :rs_arbiter, :rs_other] ->
topology
|> Map.put(:set_name, server_description.set_name)
|> update_rs_without_primary(server_description)
_ ->
# don't touch broken states...
{[], topology}
end
end
defp update_topology(topology, :sharded, server_description, _) do
case server_description.type do
type when type in [:unknown, :mongos] ->
{[], topology}
type
when type in [:rs_ghost, :standalone, :rs_primary, :rs_secondary, :rs_arbiter, :rs_other] ->
{_, new_topology} = pop_in(topology.servers[server_description.address])
{[], new_topology}
_ ->
{[], topology}
end
end
defp update_topology(topology, :replica_set_no_primary, server_description, _) do
case server_description.type do
type when type in [:unknown, :rs_ghost] ->
{[], topology}
type when type in [:standalone, :mongos] ->
{_, new_topology} = pop_in(topology.servers[server_description.address])
{[], new_topology}
:rs_primary ->
update_rs_from_primary(topology, server_description)
type when type in [:rs_secondary, :rs_arbiter, :rs_ghost] ->
update_rs_without_primary(topology, server_description)
_ ->
{[], topology}
end
end
defp update_topology(topology, :replica_set_with_primary, server_description, _) do
case server_description.type do
:unknown ->
topology |> check_if_has_primary
:rs_ghost ->
topology |> check_if_has_primary
type when type in [:standalone, :mongos] ->
{_, new_topology} = pop_in(topology.servers[server_description.address])
check_if_has_primary(new_topology)
:rs_primary ->
update_rs_from_primary(topology, server_description)
type when type in [:rs_secondary, :rs_arbiter, :rs_ghost] ->
update_rs_with_primary_from_member(topology, server_description)
_ ->
{[], topology}
end
end
# see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#actions
defp not_in_servers?(topology, server_description) do
server_description.address not in Map.keys(topology.servers)
end
def invalid_set_name?(topology, server_description) do
topology.set_name != server_description.set_name and
topology.set_name != nil
end
defp update_unknown_with_standalone(topology, server_description, num_seeds) do
if not_in_servers?(topology, server_description) do
{[], topology}
else
if num_seeds == 1 do
{[], Map.put(topology, :type, :single)}
else
{_, new_topology} = pop_in(topology.servers[server_description.address])
{[], new_topology}
end
end
end
defp update_rs_without_primary(topology, server_description) do
if not_in_servers?(topology, server_description) do
{[], topology}
else
if invalid_set_name?(topology, server_description) do
{_, new_topology} = pop_in(topology.servers[server_description.address])
{[], new_topology}
else
{actions, topology} =
topology
|> Map.put(:set_name, server_description.set_name)
|> add_new_servers(server_description)
if server_description.address != server_description.me do
{_, topology} = pop_in(topology.servers[server_description.address])
{actions, topology}
else
{actions, topology}
end
end
end
end
defp add_new_servers({actions, topology}, server_description) do
{[], new_topology} = add_new_servers(topology, server_description)
{actions, new_topology}
end
defp add_new_servers(topology, server_description) do
all_hosts =
server_description.hosts ++ server_description.passives ++ server_description.arbiters
topology =
Enum.reduce(all_hosts, topology, fn host, topology ->
if host not in Map.keys(topology.servers) do
# this is kinda like an "upsert"
put_in(topology.servers[host], ServerDescription.defaults(%{address: host}))
else
topology
end
end)
{[], topology}
end
defp update_rs_with_primary_from_member(topology, server_description) do
if not_in_servers?(topology, server_description) do
{[], topology}
else
topology =
if invalid_set_name?(topology, server_description) do
{_, new_topology} = pop_in(topology.servers[server_description.address])
new_topology
else
topology
end
if server_description.address != server_description.me do
{_, new_topology} = pop_in(topology.servers[server_description.address])
check_if_has_primary(new_topology)
else
if Enum.any?(topology.servers, fn
{_, server_description} ->
server_description.type == :rs_primary
end) do
{[], topology}
else
{[], %{topology | type: :replica_set_no_primary}}
end
end
end
end
defp update_rs_from_primary(topology, server_description) do
if not_in_servers?(topology, server_description) do
{[], topology}
else
if invalid_set_name?(topology, server_description) do
{_, new_topology} = pop_in(topology.servers[server_description.address])
check_if_has_primary(new_topology)
else
topology
|> Map.put(:set_name, server_description.set_name)
|> handle_election_id(server_description)
end
end
end
defp handle_election_id(topology, server_description) do
# yes, this is really in the spec
if server_description[:set_version] != nil and
server_description[:election_id] != nil do
has_set_version_and_election_id? =
topology[:max_set_version] != nil and
topology[:max_election_id] != nil
newer_set_version? = topology.max_set_version > server_description.set_version
same_set_version? = topology.max_set_version == server_description.set_version
greater_election_id? = topology.max_election_id > server_description.election_id
if has_set_version_and_election_id? and
(newer_set_version? or (same_set_version? and greater_election_id?)) do
new_server_description =
ServerDescription.defaults(%{address: server_description.address})
topology
|> put_in([:servers, new_server_description.address], new_server_description)
|> check_if_has_primary
else
topology
|> Map.put(:max_election_id, server_description.election_id)
|> continue(server_description)
end
else
topology
|> continue(server_description)
end
end
defp continue(topology, server_description) do
topology
|> handle_set_version(server_description)
|> invalidate_stale_primary(server_description)
|> add_new_servers(server_description)
|> remove_dead_nodes(server_description)
|> check_if_has_primary
end
defp handle_set_version(topology, server_description) do
if server_description.set_version != nil and
(topology.max_set_version == nil or
server_description.set_version > topology.max_set_version) do
Map.put(topology, :max_set_version, server_description.set_version)
else
topology
end
end
def invalidate_stale_primary(topology, server_description) do
{actions, new_servers} =
Enum.reduce(topology.servers, {[], %{}}, fn {address, %{type: type} = server},
{acts, servers} ->
if address != server_description.address and type == :rs_primary do
{[{:force_check, address} | acts],
Map.put(servers, address, ServerDescription.defaults(%{address: address}))}
else
{acts, Map.put(servers, address, server)}
end
end)
{actions, Map.put(topology, :servers, new_servers)}
end
def remove_dead_nodes({actions, topology}, server_description) do
all_hosts =
server_description.hosts ++ server_description.passives ++ server_description.arbiters
topology =
update_in(
topology.servers,
&Enum.into(
Enum.filter(&1, fn {address, _} ->
address in all_hosts
end),
%{}
)
)
{actions, topology}
end
defp check_if_has_primary({actions, topology}) do
{[], new_topology} = check_if_has_primary(topology)
{actions, new_topology}
end
defp check_if_has_primary(topology) do
any_primary? =
Enum.any?(topology.servers, fn {_, server_description} ->
server_description.type == :rs_primary
end)
if any_primary? do
{[], %{topology | type: :replica_set_with_primary}}
else
{[], %{topology | type: :replica_set_no_primary}}
end
end
end
| 31.453795 | 162 | 0.643828 |
9e5afa56dbdf858f8bdb6b30d847ac5f123f09ce | 2,035 | ex | Elixir | lib/plumbapius/coverage/report/interaction_report.ex | Amuhar/plumbapius | a9066512f520f2ad97e677b04d70cc62695f2def | [
"Apache-2.0"
] | 10 | 2020-08-25T07:52:23.000Z | 2020-12-06T12:44:44.000Z | lib/plumbapius/coverage/report/interaction_report.ex | Amuhar/plumbapius | a9066512f520f2ad97e677b04d70cc62695f2def | [
"Apache-2.0"
] | 3 | 2020-10-13T11:49:32.000Z | 2021-05-28T08:34:41.000Z | lib/plumbapius/coverage/report/interaction_report.ex | Amuhar/plumbapius | a9066512f520f2ad97e677b04d70cc62695f2def | [
"Apache-2.0"
] | 2 | 2020-09-03T14:29:00.000Z | 2021-05-26T11:07:37.000Z | defmodule Plumbapius.Coverage.Report.InteractionReport do
alias Plumbapius.Coverage.CoverageTracker.CoveredCase
alias Plumbapius.Coverage.CoverageTracker
alias Plumbapius.Coverage.Report.MultiChoiceSchema
defstruct interaction: nil,
covered_multi_choices: [],
missed_multi_choices: []
@type t :: %__MODULE__{
interaction: CoverageTracker.interaction(),
covered_multi_choices: list(MultiChoiceSchema.multi_choice()),
missed_multi_choices: list(MultiChoiceSchema.multi_choice())
}
@spec new(CoveredCase.t()) :: t
def new(%CoveredCase{} = covered_case) do
{req_schema, resp_schema} = covered_case.interaction
{covered_req_choices, missed_req_choices} = check_choices(req_schema.bodies, covered_case.req_body)
{covered_resp_choices, missed_resp_choices} = check_choices([resp_schema.body], covered_case.resp_body)
new(
covered_case.interaction,
covered_req_choices ++ covered_resp_choices,
missed_req_choices ++ missed_resp_choices
)
end
@spec new(
CoverageTracker.interaction(),
list(MultiChoiceSchema.multi_choice()),
list(MultiChoiceSchema.multi_choice())
) :: t
def new(interaction, covered_multi_choices \\ [], missed_multi_choices \\ []) do
%__MODULE__{
interaction: interaction,
covered_multi_choices: covered_multi_choices,
missed_multi_choices: missed_multi_choices
}
end
defp check_choices(schema_bodies, body_to_check) do
all_choices = Enum.flat_map(schema_bodies, &MultiChoiceSchema.new(&1.schema))
covered_choices =
Enum.filter(all_choices, fn choice ->
Enum.any?(schema_bodies, &choice_covered?(choice, &1, body_to_check))
end)
{covered_choices, all_choices -- covered_choices}
end
defp choice_covered?({path, choice_schema}, schema, body_to_check) do
body_fragment = get_in(body_to_check, path)
ExJsonSchema.Validator.valid_fragment?(schema, choice_schema, body_fragment)
end
end
| 34.491525 | 107 | 0.725799 |
9e5b118deed3aa6c6729631f246bb15ab2d7b162 | 1,386 | ex | Elixir | hello_naked/lib/hello_naked_web/endpoint.ex | playgd/learning-elixir | 3909b540ccd7f54b174de9c6c6f258e94865ba6b | [
"MIT"
] | 16 | 2021-11-08T12:15:27.000Z | 2022-02-11T01:00:20.000Z | hello_naked/lib/hello_naked_web/endpoint.ex | playgd/learning-elixir | 3909b540ccd7f54b174de9c6c6f258e94865ba6b | [
"MIT"
] | null | null | null | hello_naked/lib/hello_naked_web/endpoint.ex | playgd/learning-elixir | 3909b540ccd7f54b174de9c6c6f258e94865ba6b | [
"MIT"
] | 2 | 2021-11-19T11:32:58.000Z | 2022-01-16T00:36:32.000Z | defmodule HelloNakedWeb.Endpoint do
use Phoenix.Endpoint, otp_app: :hello_naked
# The session will be stored in the cookie and signed,
# this means its contents can be read but not tampered with.
# Set :encryption_salt if you would also like to encrypt it.
@session_options [
store: :cookie,
key: "_hello_naked_key",
signing_salt: "sh0l6Pwt"
]
socket "/live", Phoenix.LiveView.Socket, websocket: [connect_info: [session: @session_options]]
# Serve at "/" the static files from "priv/static" directory.
#
# You should set gzip to true if you are running phx.digest
# when deploying your static files in production.
plug Plug.Static,
at: "/",
from: :hello_naked,
gzip: false,
only: ~w(assets fonts images favicon.ico robots.txt)
# Code reloading can be explicitly enabled under the
# :code_reloader configuration of your endpoint.
if code_reloading? do
plug Phoenix.CodeReloader
end
plug Phoenix.LiveDashboard.RequestLogger,
param_key: "request_logger",
cookie_key: "request_logger"
plug Plug.RequestId
plug Plug.Telemetry, event_prefix: [:phoenix, :endpoint]
plug Plug.Parsers,
parsers: [:urlencoded, :multipart, :json],
pass: ["*/*"],
json_decoder: Phoenix.json_library()
plug Plug.MethodOverride
plug Plug.Head
plug Plug.Session, @session_options
plug HelloNakedWeb.Router
end
| 28.875 | 97 | 0.718615 |
9e5b367d262f3c2a59b701fbea829a8171b6396a | 1,876 | ex | Elixir | lib/scaffolding/internal/implementation/domain_object/json/entity.implementation.default.ex | noizu-labs/advanced_elixir_scaffolding | 9e7a1199a4b25fcc16fc5a795104b926d2f238df | [
"MIT"
] | null | null | null | lib/scaffolding/internal/implementation/domain_object/json/entity.implementation.default.ex | noizu-labs/advanced_elixir_scaffolding | 9e7a1199a4b25fcc16fc5a795104b926d2f238df | [
"MIT"
] | null | null | null | lib/scaffolding/internal/implementation/domain_object/json/entity.implementation.default.ex | noizu-labs/advanced_elixir_scaffolding | 9e7a1199a4b25fcc16fc5a795104b926d2f238df | [
"MIT"
] | null | null | null | #-------------------------------------------------------------------------------
# Author: Keith Brings <[email protected]>
# Copyright (C) 2021 Noizu Labs Inc. All rights reserved.
#-------------------------------------------------------------------------------
defmodule Noizu.AdvancedScaffolding.Internal.Json.Entity.Implementation.Default do
@moduledoc """
Default Implementation.
"""
@pii_levels %{
level_0: 0,
level_1: 1,
level_2: 2,
level_3: 3,
level_4: 4,
level_5: 5,
level_6: 6,
default: 6,
}
def __strip_pii__(_m, entity, max_level) do
max_level = @pii_levels[max_level] || @pii_levels[:level_3]
v = Enum.map(
Map.from_struct(entity),
fn ({field, value}) ->
cond do
(@pii_levels[entity.__struct__.__noizu_info__(:field_attributes)[field][:pii]]) >= max_level -> {field, value}
:else -> {field, :"*RESTRICTED*"}
end
end
)
struct(entity.__struct__, v)
end
def from_json(m, format, json, context, options) do
field_types = m.__noizu_info__(:field_types)
fields = Map.keys(struct(m.__struct__(), [])) -- [:__struct__, :__transient__, :initial]
full_kind = Atom.to_string(m)
partial_kind = String.split(full_kind, ".") |> String.slice(-2 .. -1) |> Enum.join(".")
if json["kind"] == full_kind || json["kind"] == partial_kind do
# todo if entity identifier is set then we should load the existing entity and only apply the delta here,
Enum.map(
fields,
fn (field) ->
# @todo check for a json as clause
v = json[Atom.to_string(field)]
cond do
type = field_types[field] ->
{field, type.handler.from_json(format, v, context, options)}
:else -> {field, v}
end
end
)
|> m.__struct__()
end
end
end
| 30.754098 | 120 | 0.549041 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.