hexsha
stringlengths 40
40
| size
int64 2
991k
| ext
stringclasses 2
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
208
| max_stars_repo_name
stringlengths 6
106
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
33.5k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
208
| max_issues_repo_name
stringlengths 6
106
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
16.3k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
208
| max_forks_repo_name
stringlengths 6
106
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
6.91k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
991k
| avg_line_length
float64 1
36k
| max_line_length
int64 1
977k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9e701081c52f0536127d0ba6beae3d5092c668c2 | 582 | exs | Elixir | mix.exs | aforward-oss/dynamo_demo | 816d7fab1cd46a7800b6bf5ed2225f22af62b599 | [
"MIT"
] | null | null | null | mix.exs | aforward-oss/dynamo_demo | 816d7fab1cd46a7800b6bf5ed2225f22af62b599 | [
"MIT"
] | null | null | null | mix.exs | aforward-oss/dynamo_demo | 816d7fab1cd46a7800b6bf5ed2225f22af62b599 | [
"MIT"
] | null | null | null | defmodule DynamoDemo.Mixfile do
use Mix.Project
def project do
[ app: :dynamo_demo,
version: "0.0.1",
compilers: [:elixir, :dynamo, :app],
deps: deps ]
end
# Configuration for the OTP application
def application do
[]
end
defp deps do
[ { :mimetypes, git: "https://github.com/spawngrid/mimetypes.git" },
{ :cowboy, "0.6.1", git: "https://github.com/josevalim/cowboy.git" },
{ :dynamo, "0.1.0.dev", git: "https://github.com/josevalim/dynamo.git" },
{ :exjson, git: "https://github.com/guedes/exjson.git" } ]
end
end
| 25.304348 | 79 | 0.611684 |
9e701f75a255bce5d172cdb076af259b1785e875 | 2,257 | ex | Elixir | lib/erlef_web.ex | kianmeng/website | 1948f46eeeb1c1b598408bfb049bbe24189ace2e | [
"Apache-2.0"
] | 1 | 2019-10-31T19:59:41.000Z | 2019-10-31T19:59:41.000Z | lib/erlef_web.ex | kianmeng/website | 1948f46eeeb1c1b598408bfb049bbe24189ace2e | [
"Apache-2.0"
] | null | null | null | lib/erlef_web.ex | kianmeng/website | 1948f46eeeb1c1b598408bfb049bbe24189ace2e | [
"Apache-2.0"
] | null | null | null | defmodule ErlefWeb do
@moduledoc """
The entrypoint for defining your web interface, such
as controllers, views, channels and so on.
This can be used in your application as:
use ErlefWeb, :controller
use ErlefWeb, :view
The definitions below will be executed for every view,
controller, etc, so keep them short and clean, focused
on imports, uses and aliases.
Do NOT define functions inside the quoted expressions
below. Instead, define any helper function in modules
and import those modules here.
"""
def controller do
quote do
use Phoenix.Controller, namespace: ErlefWeb
import Plug.Conn
import ErlefWeb.Gettext
import Phoenix.LiveView.Controller
alias ErlefWeb.Router.Helpers, as: Routes
def audit(conn) do
%{member_id: conn.assigns.current_user.id}
end
end
end
def view do
quote do
use Phoenix.View,
root: "lib/erlef_web/templates",
namespace: ErlefWeb
# Import convenience functions from controllers
import Phoenix.Controller, only: [get_flash: 1, get_flash: 2, view_module: 1]
unquote(view_helpers())
end
end
def live_view do
quote do
use Phoenix.LiveView,
unquote(view_helpers())
end
end
def live_component do
quote do
use Phoenix.LiveComponent
unquote(view_helpers())
end
end
def router do
quote do
use Phoenix.Router
import Plug.Conn
import Phoenix.Controller
import Phoenix.LiveView.Router
end
end
defp view_helpers do
quote do
# Use all HTML functionality (forms, tags, etc)
use Phoenix.HTML
# Import LiveView helpers (live_render, live_component, live_patch, etc)
import Phoenix.LiveView.Helpers
# Import basic rendering functionality (render, render_layout, etc)
import Phoenix.View
import ErlefWeb.ErrorHelpers
import ErlefWeb.Gettext
import ErlefWeb.HTML
import ErlefWeb.ViewHelpers
alias ErlefWeb.Router.Helpers, as: Routes
end
end
@doc """
When used, dispatch to the appropriate controller/view/etc.
"""
defmacro __using__(which) when is_atom(which) do
apply(__MODULE__, which, [])
end
end
| 22.57 | 83 | 0.680106 |
9e702677d1d83e395988000641f7a0a4f04b0995 | 1,593 | ex | Elixir | clients/big_query/lib/google_api/big_query/v2/model/job_cancel_response.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/big_query/lib/google_api/big_query/v2/model/job_cancel_response.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | 1 | 2020-12-18T09:25:12.000Z | 2020-12-18T09:25:12.000Z | clients/big_query/lib/google_api/big_query/v2/model/job_cancel_response.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | 1 | 2020-10-04T10:12:44.000Z | 2020-10-04T10:12:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.BigQuery.V2.Model.JobCancelResponse do
@moduledoc """
## Attributes
* `job` (*type:* `GoogleApi.BigQuery.V2.Model.Job.t`, *default:* `nil`) - The final state of the job.
* `kind` (*type:* `String.t`, *default:* `bigquery#jobCancelResponse`) - The resource type of the response.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:job => GoogleApi.BigQuery.V2.Model.Job.t(),
:kind => String.t()
}
field(:job, as: GoogleApi.BigQuery.V2.Model.Job)
field(:kind)
end
defimpl Poison.Decoder, for: GoogleApi.BigQuery.V2.Model.JobCancelResponse do
def decode(value, options) do
GoogleApi.BigQuery.V2.Model.JobCancelResponse.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.BigQuery.V2.Model.JobCancelResponse do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 31.86 | 111 | 0.722536 |
9e702e5e88360e3e8359f8f32b8a09bf8e8f2db2 | 547 | ex | Elixir | apps/firestorm_web/lib/firestorm_web/web/channels/users_channel.ex | CircleCI-Public/firestorm | 9ca2c46a2b2377370347ad94d6003eeb77be38d6 | [
"MIT"
] | 10 | 2017-06-28T08:06:52.000Z | 2022-03-19T17:49:21.000Z | apps/firestorm_web/lib/firestorm_web/web/channels/users_channel.ex | CircleCI-Public/firestorm | 9ca2c46a2b2377370347ad94d6003eeb77be38d6 | [
"MIT"
] | null | null | null | apps/firestorm_web/lib/firestorm_web/web/channels/users_channel.ex | CircleCI-Public/firestorm | 9ca2c46a2b2377370347ad94d6003eeb77be38d6 | [
"MIT"
] | 2 | 2017-10-21T12:01:02.000Z | 2021-01-29T10:26:22.000Z | defmodule FirestormWeb.Web.UsersChannel do
use FirestormWeb.Web, :channel
alias FirestormWeb.Web.Api.V1.{
FetchView
}
import Ecto.Query
intercept ["update"]
def join("users:" <> id, payload, socket) do
if authorized?(payload) do
{:ok, socket}
else
{:error, %{reason: "unauthorized"}}
end
end
# Add authorization logic here as required.
defp authorized?(_payload) do
true
end
def handle_out("update", msg, socket) do
push socket, "update", FetchView.render("index.json", msg)
end
end
| 20.259259 | 62 | 0.66362 |
9e707345bb4fa09a9cae68eec7601687ce2daa3a | 258 | ex | Elixir | testData/org/elixir_lang/parser_definition/matched_qualified_multiple_aliases_parsing_test_case/MatchedUnqualifiedCallOperation.ex | keyno63/intellij-elixir | 4033e319992c53ddd42a683ee7123a97b5e34f02 | [
"Apache-2.0"
] | 1,668 | 2015-01-03T05:54:27.000Z | 2022-03-25T08:01:20.000Z | testData/org/elixir_lang/parser_definition/matched_qualified_multiple_aliases_parsing_test_case/MatchedUnqualifiedCallOperation.ex | keyno63/intellij-elixir | 4033e319992c53ddd42a683ee7123a97b5e34f02 | [
"Apache-2.0"
] | 2,018 | 2015-01-01T22:43:39.000Z | 2022-03-31T20:13:08.000Z | testData/org/elixir_lang/parser_definition/matched_qualified_multiple_aliases_parsing_test_case/MatchedUnqualifiedCallOperation.ex | keyno63/intellij-elixir | 4033e319992c53ddd42a683ee7123a97b5e34f02 | [
"Apache-2.0"
] | 145 | 2015-01-15T11:37:16.000Z | 2021-12-22T05:51:02.000Z | relative_identifier(
One.{}
)(
Two.{Three}
).{Four, Five}
relative_identifier key: One.{}
relative_identifier unqualified One.{Two},
key: Three.{Four, Five}
relative_identifier One.{},
key: Two.{Three}
| 23.454545 | 55 | 0.573643 |
9e707e60d30997e0b5da6ab42699ed4e4326869e | 756 | ex | Elixir | plugins/ucc_ui_flex_tab/lib/ucc_ui_flex_tab_web/views/tab_bar_view.ex | josephkabraham/ucx_ucc | 0dbd9e3eb5940336b4870cff033482ceba5f6ee7 | [
"MIT"
] | null | null | null | plugins/ucc_ui_flex_tab/lib/ucc_ui_flex_tab_web/views/tab_bar_view.ex | josephkabraham/ucx_ucc | 0dbd9e3eb5940336b4870cff033482ceba5f6ee7 | [
"MIT"
] | null | null | null | plugins/ucc_ui_flex_tab/lib/ucc_ui_flex_tab_web/views/tab_bar_view.ex | josephkabraham/ucx_ucc | 0dbd9e3eb5940336b4870cff033482ceba5f6ee7 | [
"MIT"
] | null | null | null | defmodule UccUiFlexTabWeb.TabBarView do
@moduledoc """
View helpers for the TabBar templates.
"""
use UccUiFlexTabWeb, :view
alias UcxUcc.TabBar
@doc """
Test if a group is visible for a given tag.
## Examples
iex> tab = %{groups: [:one, :two]}
iex> UccUiFlexTabWeb.TabBarView.visible? tab, :one
true
iex> UccUiFlexTabWeb.TabBarView.visible? tab, :three
false
"""
# @spec visible?(UcxUcc.TabBar.Tab.t, atom | list) :: boolean
def visible?(tab, groups) when is_list(groups) do
Enum.reduce groups, false, fn group, acc ->
acc || visible?(tab, group)
end
end
def visible?(tab, group) do
group in Map.get(tab, :groups, [])
end
def buttons do
TabBar.get_buttons
end
end
| 21 | 63 | 0.642857 |
9e70969e9a59c9a6f3623632a67ce0750752e239 | 1,411 | exs | Elixir | 11_fuctions_or_not.exs | Meyhem/hackerrank-fp-elixir | b1b3343d5f05152d37ba92f11955003602e4699b | [
"MIT"
] | null | null | null | 11_fuctions_or_not.exs | Meyhem/hackerrank-fp-elixir | b1b3343d5f05152d37ba92f11955003602e4699b | [
"MIT"
] | null | null | null | 11_fuctions_or_not.exs | Meyhem/hackerrank-fp-elixir | b1b3343d5f05152d37ba92f11955003602e4699b | [
"MIT"
] | null | null | null | # echo -e "2\n3\n1 2\n2 3\n3 4\n2\n1 2\n1 3\n" | elixir 11_fuctions_or_not.exs
defmodule Solution do
def read_float() do
case IO.gets "" do
line when is_binary(line) -> { :ok, String.to_float String.trim line }
:eof -> { :error }
{ :error, _} -> { :error }
end
end
def read_int() do
case IO.gets "" do
line when is_binary(line) -> { :ok, String.to_integer String.trim line }
:eof -> { :error }
{ :error, _} -> { :error }
end
end
def read_int_list() do
case IO.gets "" do
line when is_binary(line) -> {
:ok,
String.trim(line, "\n") |> String.split |> Enum.map(&String.to_integer/1)
}
:eof -> { :error }
{ :error, _} -> { :error }
end
end
def relation_list_reducer(e, acc) do
[k, v] = e
if Map.has_key?(acc, k) and acc[k] != v do
Map.put(acc, :hasCollision, true)
else
Map.put(acc, k, v)
end
end
def map_bool(true), do: "YES"
def map_bool(false), do: "NO"
def process_test_case(n) do
IO.puts Enum.map(1..n, fn _ -> elem(read_int_list(), 1) end)
|> Enum.reduce(%{:hasCollision => false}, &relation_list_reducer/2)
|> Map.fetch!(:hasCollision)
|> Kernel.not
|> map_bool
end
def main() do
{ _, cases } = read_int()
for _ <- 1..cases do
{ _, n } = read_int()
process_test_case n
end
end
end
Solution.main
| 22.046875 | 81 | 0.557052 |
9e70af67c663aa7a967e02bf76b5db4c0b3fd459 | 69 | ex | Elixir | lib/zero_phoenix_web/views/page_view.ex | idkjs/zero-to-graphql-using-phoenix | 5e7f99af40030d57a603d9457e4906324fdfa4ea | [
"MIT"
] | 9 | 2019-03-26T22:16:08.000Z | 2021-09-27T12:42:34.000Z | lib/zero_phoenix_web/views/page_view.ex | idkjs/zero-to-graphql-using-phoenix | 5e7f99af40030d57a603d9457e4906324fdfa4ea | [
"MIT"
] | 3 | 2020-06-08T23:03:49.000Z | 2020-06-09T03:11:12.000Z | lib/zero_phoenix_web/views/page_view.ex | idkjs/zero-to-graphql-using-phoenix | 5e7f99af40030d57a603d9457e4906324fdfa4ea | [
"MIT"
] | 7 | 2019-01-15T17:00:07.000Z | 2021-07-18T23:30:29.000Z | defmodule ZeroPhoenixWeb.PageView do
use ZeroPhoenixWeb, :view
end
| 17.25 | 36 | 0.826087 |
9e70b86c5aa2cde430e7d6c43559e260ff0add80 | 940 | exs | Elixir | test/githubist/argument_parser_test.exs | alpcanaydin/githubist-api | 6481f8177c5b8573da2d5df52ffaff41340b25d0 | [
"MIT"
] | 33 | 2018-10-13T16:40:36.000Z | 2021-05-23T14:13:34.000Z | test/githubist/argument_parser_test.exs | 5l1v3r1/githubist-api | 6481f8177c5b8573da2d5df52ffaff41340b25d0 | [
"MIT"
] | 1 | 2018-12-23T19:59:05.000Z | 2018-12-24T18:08:00.000Z | test/githubist/argument_parser_test.exs | 5l1v3r1/githubist-api | 6481f8177c5b8573da2d5df52ffaff41340b25d0 | [
"MIT"
] | 3 | 2018-10-13T22:18:38.000Z | 2020-03-29T23:41:23.000Z | defmodule Githubist.GraphQLArgumentParserTests do
@moduledoc false
use Githubist.DataCase
alias Githubist.GraphQLArgumentParser
describe "parse_limit/2" do
test "updates limit if it bigger then max" do
params = %{limit: 200}
assert %{limit: 100} = GraphQLArgumentParser.parse_limit(params, max_limit: 100)
end
test "doesn't do anything if limit is lower than max" do
params = %{limit: 20}
assert %{limit: 20} = GraphQLArgumentParser.parse_limit(params, max_limit: 100)
end
end
describe "parse_order_by/1" do
test "converts map to tuple" do
params = %{order_by: %{direction: :desc, field: :id}}
assert %{order_by: {:desc, :id}} = GraphQLArgumentParser.parse_order_by(params)
end
test "doesn't do anything if order_by does not exist" do
params = %{limit: 10}
assert %{limit: 10} = GraphQLArgumentParser.parse_order_by(params)
end
end
end
| 26.111111 | 86 | 0.68617 |
9e70c01835c84a9772021c25e76564ccc1a9449a | 4,516 | ex | Elixir | lib/level/connections.ex | renesugar/level | 57ffcaddeb30f176706b3b97221886e624466d7d | [
"Apache-2.0"
] | null | null | null | lib/level/connections.ex | renesugar/level | 57ffcaddeb30f176706b3b97221886e624466d7d | [
"Apache-2.0"
] | null | null | null | lib/level/connections.ex | renesugar/level | 57ffcaddeb30f176706b3b97221886e624466d7d | [
"Apache-2.0"
] | null | null | null | defmodule Level.Connections do
@moduledoc """
Functions for loading connections between resources, designed to be used in
GraphQL query resolution.
"""
alias Level.Connections.GroupMemberships
alias Level.Connections.GroupPosts
alias Level.Connections.Groups
alias Level.Connections.SpaceUsers
alias Level.Connections.UserGroupMemberships
alias Level.Groups.Group
alias Level.Groups.GroupUser
alias Level.Pagination
alias Level.Spaces
alias Level.Spaces.Space
alias Level.Spaces.SpaceUser
alias Level.Users.User
@typedoc "A context map containing the current user"
@type authenticated_context :: %{context: %{current_user: User.t()}}
@typedoc "The return value for paginated connections"
@type paginated_result :: {:ok, Pagination.Result.t()} | {:error, String.t()}
@doc """
Fetches a space by id.
"""
@spec space(term(), map(), authenticated_context()) :: {:ok, Space.t()} | {:error, String.t()}
def space(parent, args, info)
def space(_root, %{id: id}, %{context: %{current_user: user}}) do
case Spaces.get_space(user, id) do
{:ok, %{space: space}} ->
{:ok, space}
error ->
error
end
end
@doc """
Fetches a space membership by space id.
"""
@spec space_user(User.t(), map(), authenticated_context()) ::
{:ok, SpaceUser.t()} | {:error, String.t()}
def space_user(parent, args, info)
def space_user(_parent, %{space_id: id}, %{context: %{current_user: user}}) do
case Spaces.get_space(user, id) do
{:ok, %{space_user: space_user}} ->
{:ok, space_user}
error ->
error
end
end
@doc """
Fetches spaces that a user belongs to.
"""
@spec space_users(User.t(), SpaceUsers.t(), authenticated_context()) :: paginated_result()
def space_users(user, args, info) do
SpaceUsers.get(user, struct(SpaceUsers, args), info)
end
@doc """
Fetches groups for given a space that are visible to the current user.
"""
@spec groups(Space.t(), Groups.t(), authenticated_context()) :: paginated_result()
def groups(space, args, info) do
Groups.get(space, struct(Groups, args), info)
end
@doc """
Fetches a group by id.
"""
@spec group(Space.t(), map(), authenticated_context()) ::
{:ok, Group.t()} | {:error, String.t()}
def group(space, %{id: id} = _args, %{context: %{current_user: user}}) do
with {:ok, %{space_user: space_user}} <- Spaces.get_space(user, space.id),
{:ok, group} <- Level.Groups.get_group(space_user, id) do
{:ok, group}
else
error ->
error
end
end
@doc """
Fetches group memberships.
"""
@spec group_memberships(User.t(), UserGroupMemberships.t(), authenticated_context()) ::
paginated_result()
@spec group_memberships(Group.t(), GroupMemberships.t(), authenticated_context()) ::
paginated_result()
def group_memberships(%User{} = user, args, info) do
UserGroupMemberships.get(user, struct(UserGroupMemberships, args), info)
end
def group_memberships(%Group{} = user, args, info) do
GroupMemberships.get(user, struct(GroupMemberships, args), info)
end
@doc """
Fetches featured group memberships.
"""
@spec featured_group_memberships(Group.t(), map(), authenticated_context) ::
{:ok, [GroupUser.t()]} | no_return()
def featured_group_memberships(group, _args, _info) do
Level.Groups.list_featured_memberships(group)
end
@doc """
Fetches the current user's membership.
"""
@spec group_membership(Group.t(), map(), authenticated_context()) ::
{:ok, GroupUser.t()} | {:error, String.t()}
def group_membership(%Group{} = group, _args, %{context: %{current_user: user}}) do
case Spaces.get_space(user, group.space_id) do
{:ok, %{space_user: space_user, space: space}} ->
case Level.Groups.get_group_membership(group, space_user) do
{:ok, group_user} ->
{:ok, group_user}
_ ->
virtual_group_user = %GroupUser{
state: "NOT_SUBSCRIBED",
space: space,
group: group,
space_user: space_user
}
{:ok, virtual_group_user}
end
error ->
error
end
end
@doc """
Fetches posts within a given group.
"""
@spec group_posts(Group.t(), GroupPosts.t(), authenticated_context()) :: paginated_result()
def group_posts(group, args, info) do
GroupPosts.get(group, struct(GroupPosts, args), info)
end
end
| 29.907285 | 96 | 0.641718 |
9e70cd114890fdfa100dabd15db2e53c9ca67393 | 1,732 | ex | Elixir | lib/ash/filter/expression.ex | elbow-jason/ash | eb63bc9d4d24187ad07d9892088b4e55ad6258e4 | [
"MIT"
] | 1 | 2021-12-27T09:43:29.000Z | 2021-12-27T09:43:29.000Z | lib/ash/filter/expression.ex | elbow-jason/ash | eb63bc9d4d24187ad07d9892088b4e55ad6258e4 | [
"MIT"
] | null | null | null | lib/ash/filter/expression.ex | elbow-jason/ash | eb63bc9d4d24187ad07d9892088b4e55ad6258e4 | [
"MIT"
] | null | null | null | defmodule Ash.Filter.Expression do
@moduledoc "Represents a boolean expression"
defstruct [:op, :left, :right]
def new(_, nil, nil), do: nil
def new(:and, false, _), do: false
def new(:and, _, false), do: false
def new(:or, true, _), do: true
def new(:or, _, true), do: true
def new(_, nil, right), do: right
def new(_, left, nil), do: left
def new(op, %__MODULE__{op: left_op} = left, %__MODULE__{op: op} = right) when left_op != op do
%__MODULE__{op: op, left: right, right: left}
end
def new(op, left, right) do
%__MODULE__{op: op, left: left, right: right}
end
end
defimpl Inspect, for: Ash.Filter.Expression do
import Inspect.Algebra
def inspect(
%{left: left, right: right, op: op},
opts
) do
container_type = container_type(opts)
opts = put_container_type(opts, op)
if container_type && op != container_type do
concat(["(", to_doc(left, opts), " ", to_string(op), " ", to_doc(right, opts), ")"])
else
concat([to_doc(left, opts), " ", to_string(op), " ", to_doc(right, opts)])
end
end
# custom options not available before Elixir 1.9
defp container_type(%{custom_options: %{container_type: container_type}}), do: container_type
defp container_type(_), do: nil
defp put_container_type(opts, container_type) do
custom_options = apply(Map, :get, [opts, :custom_options])
apply(Map, :put, [
opts,
:custom_options,
Keyword.put(custom_options, :container_type, container_type)
])
# above version required to avoid dialyzer warnings on lack of custom_options in pre-1.9 elixir
# %{opts | custom_options: Keyword.put(opts.custom_options, :container_type, container_type)}
end
end
| 29.862069 | 99 | 0.658776 |
9e70f1d34885db12dac36f1244b8c82f84b814ab | 224 | exs | Elixir | apps/alert_processor/priv/repo/migrations/20170509151223_add_last_push_notification.exs | mbta/alerts_concierge | d8e643445ef06f80ca273f2914c6959daea146f6 | [
"MIT"
] | null | null | null | apps/alert_processor/priv/repo/migrations/20170509151223_add_last_push_notification.exs | mbta/alerts_concierge | d8e643445ef06f80ca273f2914c6959daea146f6 | [
"MIT"
] | 21 | 2021-03-12T17:05:30.000Z | 2022-02-16T21:48:35.000Z | apps/alert_processor/priv/repo/migrations/20170509151223_add_last_push_notification.exs | mbta/alerts_concierge | d8e643445ef06f80ca273f2914c6959daea146f6 | [
"MIT"
] | 1 | 2021-12-09T15:09:53.000Z | 2021-12-09T15:09:53.000Z | defmodule MbtaServer.Repo.Migrations.AddLastPushNotification do
use Ecto.Migration
def change do
alter table(:notifications, primary_key: false) do
add :last_push_notification, :utc_datetime
end
end
end
| 22.4 | 63 | 0.767857 |
9e70f43dfcfc615dfa2ce49cd0678e4936c87a40 | 61,828 | ex | Elixir | clients/memcache/lib/google_api/memcache/v1beta2/api/projects.ex | EVLedger/elixir-google-api | 61edef19a5e2c7c63848f7030c6d8d651e4593d4 | [
"Apache-2.0"
] | null | null | null | clients/memcache/lib/google_api/memcache/v1beta2/api/projects.ex | EVLedger/elixir-google-api | 61edef19a5e2c7c63848f7030c6d8d651e4593d4 | [
"Apache-2.0"
] | null | null | null | clients/memcache/lib/google_api/memcache/v1beta2/api/projects.ex | EVLedger/elixir-google-api | 61edef19a5e2c7c63848f7030c6d8d651e4593d4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Memcache.V1beta2.Api.Projects do
@moduledoc """
API calls for all endpoints tagged `Projects`.
"""
alias GoogleApi.Memcache.V1beta2.Connection
alias GoogleApi.Gax.{Request, Response}
@library_version Mix.Project.config() |> Keyword.get(:version, "")
@doc """
Gets information about a location.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `name`. Resource name for the location.
* `locations_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.Location{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_get(
Tesla.Env.client(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.Location.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_get(
connection,
projects_id,
locations_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/v1beta2/projects/{projectsId}/locations/{locationsId}", %{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.Location{}])
end
@doc """
Lists information about the supported locations for this service.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `name`. The resource that owns the locations collection, if applicable.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:filter` (*type:* `String.t`) - The standard list filter.
* `:pageSize` (*type:* `integer()`) - The standard list page size.
* `:pageToken` (*type:* `String.t`) - The standard list page token.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.ListLocationsResponse{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_list(Tesla.Env.client(), String.t(), keyword(), keyword()) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.ListLocationsResponse.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_list(connection, projects_id, optional_params \\ [], opts \\ []) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:filter => :query,
:pageSize => :query,
:pageToken => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/v1beta2/projects/{projectsId}/locations", %{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(
opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.ListLocationsResponse{}]
)
end
@doc """
ApplyParameters will update current set of Parameters to the set of
specified nodes of the Memcached Instance.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `name`. Required. Resource name of the Memcached instance for which parameter group updates
should be applied.
* `locations_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `instances_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:body` (*type:* `GoogleApi.Memcache.V1beta2.Model.ApplyParametersRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_instances_apply_parameters(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.Operation.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_instances_apply_parameters(
connection,
projects_id,
locations_id,
instances_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url(
"/v1beta2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:applyParameters",
%{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1),
"instancesId" => URI.encode(instances_id, &URI.char_unreserved?/1)
}
)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.Operation{}])
end
@doc """
Creates a new Instance in a given project and location.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `parent`. Required. The resource name of the instance location using the form:
`projects/{project_id}/locations/{location_id}`
where `location_id` refers to a GCP region
* `locations_id` (*type:* `String.t`) - Part of `parent`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:instanceId` (*type:* `String.t`) - Required. The logical name of the Memcached instance in the user
project with the following restrictions:
* Must contain only lowercase letters, numbers, and hyphens.
* Must start with a letter.
* Must be between 1-40 characters.
* Must end with a number or a letter.
* Must be unique within the user project / location
* `:body` (*type:* `GoogleApi.Memcache.V1beta2.Model.Instance.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_instances_create(
Tesla.Env.client(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.Operation.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_instances_create(
connection,
projects_id,
locations_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:instanceId => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/v1beta2/projects/{projectsId}/locations/{locationsId}/instances", %{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.Operation{}])
end
@doc """
Deletes a single Instance.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `name`. Required. Memcached instance resource name in the format:
`projects/{project_id}/locations/{location_id}/instances/{instance_id}`
where `location_id` refers to a GCP region
* `locations_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `instances_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_instances_delete(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.Operation.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_instances_delete(
connection,
projects_id,
locations_id,
instances_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query
}
request =
Request.new()
|> Request.method(:delete)
|> Request.url(
"/v1beta2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}",
%{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1),
"instancesId" => URI.encode(instances_id, &URI.char_unreserved?/1)
}
)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.Operation{}])
end
@doc """
Gets details of a single Instance.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `name`. Required. Memcached instance resource name in the format:
`projects/{project_id}/locations/{location_id}/instances/{instance_id}`
where `location_id` refers to a GCP region
* `locations_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `instances_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.Instance{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_instances_get(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.Instance.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_instances_get(
connection,
projects_id,
locations_id,
instances_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url(
"/v1beta2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}",
%{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1),
"instancesId" => URI.encode(instances_id, &URI.char_unreserved?/1)
}
)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.Instance{}])
end
@doc """
Gets the access control policy for a resource.
Returns an empty policy if the resource exists and does not have a policy
set.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `resource`. REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this field.
* `locations_id` (*type:* `String.t`) - Part of `resource`. See documentation of `projectsId`.
* `instances_id` (*type:* `String.t`) - Part of `resource`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:"options.requestedPolicyVersion"` (*type:* `integer()`) - Optional. The policy format version to be returned.
Valid values are 0, 1, and 3. Requests specifying an invalid value will be
rejected.
Requests for policies with any conditional bindings must specify version 3.
Policies without any conditional bindings may specify any valid value or
leave the field unset.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.Policy{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_instances_get_iam_policy(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.Policy.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_instances_get_iam_policy(
connection,
projects_id,
locations_id,
instances_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:"options.requestedPolicyVersion" => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url(
"/v1beta2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:getIamPolicy",
%{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1),
"instancesId" => URI.encode(instances_id, &URI.char_unreserved?/1)
}
)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.Policy{}])
end
@doc """
Lists Instances in a given project and location.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `parent`. Required. The resource name of the instance location using the form:
`projects/{project_id}/locations/{location_id}`
where `location_id` refers to a GCP region
* `locations_id` (*type:* `String.t`) - Part of `parent`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:filter` (*type:* `String.t`) - List filter. For example, exclude all Memcached instances with name as
my-instance by specifying "name != my-instance".
* `:orderBy` (*type:* `String.t`) - Sort results. Supported values are "name", "name desc" or "" (unsorted).
* `:pageSize` (*type:* `integer()`) - The maximum number of items to return.
If not specified, a default value of 1000 will be used by the service.
Regardless of the page_size value, the response may include a partial list
and a caller should only rely on response's
next_page_token
to determine if there are more instances left to be queried.
* `:pageToken` (*type:* `String.t`) - The next_page_token value returned from a previous List request,
if any.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.ListInstancesResponse{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_instances_list(
Tesla.Env.client(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.ListInstancesResponse.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_instances_list(
connection,
projects_id,
locations_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:filter => :query,
:orderBy => :query,
:pageSize => :query,
:pageToken => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/v1beta2/projects/{projectsId}/locations/{locationsId}/instances", %{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(
opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.ListInstancesResponse{}]
)
end
@doc """
Updates an existing Instance in a given project and location.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `resource.name`. Required. Unique name of the resource in this scope including project and
location using the form:
`projects/{project_id}/locations/{location_id}/instances/{instance_id}`
Note: Memcached instances are managed and addressed at regional level so
location_id here refers to a GCP region; however, users may choose which
zones Memcached nodes within an instances should be provisioned in.
Refer to [zones] field for more details.
* `locations_id` (*type:* `String.t`) - Part of `resource.name`. See documentation of `projectsId`.
* `instances_id` (*type:* `String.t`) - Part of `resource.name`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:updateMask` (*type:* `String.t`) - Required. Mask of fields to update.
* `displayName`
* `:body` (*type:* `GoogleApi.Memcache.V1beta2.Model.Instance.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_instances_patch(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.Operation.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_instances_patch(
connection,
projects_id,
locations_id,
instances_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:updateMask => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:patch)
|> Request.url(
"/v1beta2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}",
%{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1),
"instancesId" => URI.encode(instances_id, &URI.char_unreserved?/1)
}
)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.Operation{}])
end
@doc """
Sets the access control policy on the specified resource. Replaces any
existing policy.
Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `resource`. REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this field.
* `locations_id` (*type:* `String.t`) - Part of `resource`. See documentation of `projectsId`.
* `instances_id` (*type:* `String.t`) - Part of `resource`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:body` (*type:* `GoogleApi.Memcache.V1beta2.Model.SetIamPolicyRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.Policy{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_instances_set_iam_policy(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.Policy.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_instances_set_iam_policy(
connection,
projects_id,
locations_id,
instances_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url(
"/v1beta2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:setIamPolicy",
%{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1),
"instancesId" => URI.encode(instances_id, &URI.char_unreserved?/1)
}
)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.Policy{}])
end
@doc """
Returns permissions that a caller has on the specified resource.
If the resource does not exist, this will return an empty set of
permissions, not a NOT_FOUND error.
Note: This operation is designed to be used for building permission-aware
UIs and command-line tools, not for authorization checking. This operation
may "fail open" without warning.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `resource`. REQUIRED: The resource for which the policy detail is being requested.
See the operation documentation for the appropriate value for this field.
* `locations_id` (*type:* `String.t`) - Part of `resource`. See documentation of `projectsId`.
* `instances_id` (*type:* `String.t`) - Part of `resource`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:body` (*type:* `GoogleApi.Memcache.V1beta2.Model.TestIamPermissionsRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.TestIamPermissionsResponse{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_instances_test_iam_permissions(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.TestIamPermissionsResponse.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_instances_test_iam_permissions(
connection,
projects_id,
locations_id,
instances_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url(
"/v1beta2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:testIamPermissions",
%{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1),
"instancesId" => URI.encode(instances_id, &URI.char_unreserved?/1)
}
)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(
opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.TestIamPermissionsResponse{}]
)
end
@doc """
Updates the defined Memcached Parameters for an existing Instance.
This method only stages the parameters, it must be followed by
ApplyParameters to apply the parameters to nodes of the Memcached Instance.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `name`. Required. Resource name of the Memcached instance for which the parameters should be
updated.
* `locations_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `instances_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:body` (*type:* `GoogleApi.Memcache.V1beta2.Model.UpdateParametersRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_instances_update_parameters(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.Operation.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_instances_update_parameters(
connection,
projects_id,
locations_id,
instances_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:patch)
|> Request.url(
"/v1beta2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:updateParameters",
%{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1),
"instancesId" => URI.encode(instances_id, &URI.char_unreserved?/1)
}
)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.Operation{}])
end
@doc """
Starts asynchronous cancellation on a long-running operation. The server
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`. Clients can use
Operations.GetOperation or
other methods to check whether the cancellation succeeded or whether the
operation completed despite cancellation. On successful cancellation,
the operation is not deleted; instead, it becomes an operation with
an Operation.error value with a google.rpc.Status.code of 1,
corresponding to `Code.CANCELLED`.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `name`. The name of the operation resource to be cancelled.
* `locations_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `operations_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:body` (*type:* `GoogleApi.Memcache.V1beta2.Model.CancelOperationRequest.t`) -
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.Empty{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_operations_cancel(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.Empty.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_operations_cancel(
connection,
projects_id,
locations_id,
operations_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url(
"/v1beta2/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel",
%{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1),
"operationsId" => URI.encode(operations_id, &URI.char_unreserved?/1)
}
)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.Empty{}])
end
@doc """
Deletes a long-running operation. This method indicates that the client is
no longer interested in the operation result. It does not cancel the
operation. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `name`. The name of the operation resource to be deleted.
* `locations_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `operations_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.Empty{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_operations_delete(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.Empty.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_operations_delete(
connection,
projects_id,
locations_id,
operations_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query
}
request =
Request.new()
|> Request.method(:delete)
|> Request.url(
"/v1beta2/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}",
%{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1),
"operationsId" => URI.encode(operations_id, &URI.char_unreserved?/1)
}
)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.Empty{}])
end
@doc """
Gets the latest state of a long-running operation. Clients can use this
method to poll the operation result at intervals as recommended by the API
service.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `name`. The name of the operation resource.
* `locations_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `operations_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.Operation{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_operations_get(
Tesla.Env.client(),
String.t(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.Operation.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_operations_get(
connection,
projects_id,
locations_id,
operations_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url(
"/v1beta2/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}",
%{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1),
"operationsId" => URI.encode(operations_id, &URI.char_unreserved?/1)
}
)
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.Operation{}])
end
@doc """
Lists operations that match the specified filter in the request. If the
server doesn't support this method, it returns `UNIMPLEMENTED`.
NOTE: the `name` binding allows API services to override the binding
to use different resource name schemes, such as `users/*/operations`. To
override the binding, API services can add a binding such as
`"/v1/{name=users/*}/operations"` to their service configuration.
For backwards compatibility, the default name includes the operations
collection id, however overriding users must ensure the name binding
is the parent resource, without the operations collection id.
## Parameters
* `connection` (*type:* `GoogleApi.Memcache.V1beta2.Connection.t`) - Connection to server
* `projects_id` (*type:* `String.t`) - Part of `name`. The name of the operation's parent resource.
* `locations_id` (*type:* `String.t`) - Part of `name`. See documentation of `projectsId`.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:filter` (*type:* `String.t`) - The standard list filter.
* `:pageSize` (*type:* `integer()`) - The standard list page size.
* `:pageToken` (*type:* `String.t`) - The standard list page token.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.Memcache.V1beta2.Model.ListOperationsResponse{}}` on success
* `{:error, info}` on failure
"""
@spec memcache_projects_locations_operations_list(
Tesla.Env.client(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.Memcache.V1beta2.Model.ListOperationsResponse.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def memcache_projects_locations_operations_list(
connection,
projects_id,
locations_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:filter => :query,
:pageSize => :query,
:pageToken => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/v1beta2/projects/{projectsId}/locations/{locationsId}/operations", %{
"projectsId" => URI.encode(projects_id, &URI.char_unreserved?/1),
"locationsId" => URI.encode(locations_id, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(
opts ++ [struct: %GoogleApi.Memcache.V1beta2.Model.ListOperationsResponse{}]
)
end
end
| 44.576784 | 196 | 0.617924 |
9e70f7f49e3f62629360eced8e4f3bb0a2032126 | 10,231 | exs | Elixir | integration_test/sql/migration.exs | photomattmills/ecto | ad3a455aa3d5e4f932a5bb8ae1590b21d95cce54 | [
"Apache-2.0"
] | 1 | 2019-05-07T15:05:52.000Z | 2019-05-07T15:05:52.000Z | integration_test/sql/migration.exs | photomattmills/ecto | ad3a455aa3d5e4f932a5bb8ae1590b21d95cce54 | [
"Apache-2.0"
] | null | null | null | integration_test/sql/migration.exs | photomattmills/ecto | ad3a455aa3d5e4f932a5bb8ae1590b21d95cce54 | [
"Apache-2.0"
] | null | null | null | defmodule Ecto.Integration.MigrationTest do
use ExUnit.Case
alias Ecto.Integration.TestRepo
defmodule CreateMigration do
use Ecto.Migration
@table table(:create_table_migration)
@index index(:create_table_migration, [:value], unique: true)
def up do
create @table do
add :value, :integer
end
create @index
end
def down do
drop @index
drop @table
end
end
defmodule AddColumnMigration do
use Ecto.Migration
def up do
create table(:add_col_migration) do
add :value, :integer
end
alter table(:add_col_migration) do
add :to_be_added, :integer
end
execute "INSERT INTO add_col_migration (value, to_be_added) VALUES (1, 2)"
end
def down do
drop table(:add_col_migration)
end
end
defmodule AlterColumnMigration do
use Ecto.Migration
def up do
create table(:alter_col_migration) do
add :from_null_to_not_null, :integer
add :from_not_null_to_null, :integer, null: false
add :from_default_to_no_default, :integer, default: 0
add :from_no_default_to_default, :integer
end
alter table(:alter_col_migration) do
modify :from_null_to_not_null, :string, null: false
modify :from_not_null_to_null, :string, null: true
modify :from_default_to_no_default, :integer, default: nil
modify :from_no_default_to_default, :integer, default: 0
end
execute "INSERT INTO alter_col_migration (from_null_to_not_null) VALUES ('foo')"
end
def down do
drop table(:alter_col_migration)
end
end
defmodule AlterForeignKeyMigration do
use Ecto.Migration
def up do
create table(:alter_fk_users)
create table(:alter_fk_posts) do
add :alter_fk_user_id, :id
end
alter table(:alter_fk_posts) do
modify :alter_fk_user_id, references(:alter_fk_users, on_delete: :nilify_all)
end
execute "INSERT INTO alter_fk_users (id) VALUES ('1')"
execute "INSERT INTO alter_fk_posts (id, alter_fk_user_id) VALUES ('1', '1')"
execute "DELETE FROM alter_fk_users"
end
def down do
drop table(:alter_fk_posts)
drop table(:alter_fk_users)
end
end
defmodule DropColumnMigration do
use Ecto.Migration
def up do
create table(:drop_col_migration) do
add :value, :integer
add :to_be_removed, :integer
end
execute "INSERT INTO drop_col_migration (value, to_be_removed) VALUES (1, 2)"
alter table(:drop_col_migration) do
remove :to_be_removed
end
end
def down do
drop table(:drop_col_migration)
end
end
defmodule RenameColumnMigration do
use Ecto.Migration
def up do
create table(:rename_col_migration) do
add :to_be_renamed, :integer
end
rename table(:rename_col_migration), :to_be_renamed, to: :was_renamed
execute "INSERT INTO rename_col_migration (was_renamed) VALUES (1)"
end
def down do
drop table(:rename_col_migration)
end
end
defmodule OnDeleteMigration do
use Ecto.Migration
def up do
create table(:parent1)
create table(:parent2)
create table(:ref_migration) do
add :parent1, references(:parent1, on_delete: :nilify_all)
end
alter table(:ref_migration) do
add :parent2, references(:parent2, on_delete: :delete_all)
end
end
def down do
drop table(:ref_migration)
drop table(:parent1)
drop table(:parent2)
end
end
defmodule ReferencesRollbackMigration do
use Ecto.Migration
def change do
create table(:parent) do
add :name, :string
end
create table(:child) do
add :parent_id, references(:parent)
end
end
end
defmodule RenameMigration do
use Ecto.Migration
@table_current table(:posts_migration)
@table_new table(:new_posts_migration)
def up do
create @table_current
rename @table_current, to: @table_new
end
def down do
drop @table_new
end
end
defmodule PrefixMigration do
use Ecto.Migration
@prefix "ecto_prefix_test"
def up do
execute TestRepo.create_prefix(@prefix)
create table(:first, prefix: @prefix)
create table(:second, prefix: @prefix) do
add :first_id, references(:first)
end
end
def down do
drop table(:second, prefix: @prefix)
drop table(:first, prefix: @prefix)
execute TestRepo.drop_prefix(@prefix)
end
end
defmodule NoSQLMigration do
use Ecto.Migration
def up do
create table(:collection, options: [capped: true])
execute create: "collection"
end
end
defmodule Parent do
use Ecto.Schema
schema "parent" do
end
end
defmodule NoErrorTableMigration do
use Ecto.Migration
def change do
create_if_not_exists table(:existing) do
add :name, :string
end
create_if_not_exists table(:existing) do
add :name, :string
end
create_if_not_exists table(:existing)
drop_if_exists table(:existing)
drop_if_exists table(:existing)
end
end
defmodule NoErrorIndexMigration do
use Ecto.Migration
def change do
create_if_not_exists index(:posts, [:title])
create_if_not_exists index(:posts, [:title])
drop_if_exists index(:posts, [:title])
drop_if_exists index(:posts, [:title])
end
end
defmodule InferredDropIndexMigration do
use Ecto.Migration
def change do
create index(:posts, [:title])
end
end
import Ecto.Query, only: [from: 2]
import Ecto.Migrator, only: [up: 4, down: 4]
test "create and drop table and indexes" do
assert :ok == up(TestRepo, 20050906120000, CreateMigration, log: false)
assert :ok == down(TestRepo, 20050906120000, CreateMigration, log: false)
end
test "correctly infers how to drop index" do
assert :ok == up(TestRepo, 20050906120000, InferredDropIndexMigration, log: false)
assert :ok == down(TestRepo, 20050906120000, InferredDropIndexMigration, log: false)
end
test "supports references" do
assert :ok == up(TestRepo, 20050906120000, OnDeleteMigration, log: false)
parent1 = TestRepo.insert! Ecto.put_meta(%Parent{}, source: "parent1")
parent2 = TestRepo.insert! Ecto.put_meta(%Parent{}, source: "parent2")
writer = "INSERT INTO ref_migration (parent1, parent2) VALUES (#{parent1.id}, #{parent2.id})"
Ecto.Adapters.SQL.query! TestRepo, writer, []
reader = from r in "ref_migration", select: {r.parent1, r.parent2}
assert TestRepo.all(reader) == [{parent1.id, parent2.id}]
TestRepo.delete!(parent1)
assert TestRepo.all(reader) == [{nil, parent2.id}]
TestRepo.delete!(parent2)
assert TestRepo.all(reader) == []
assert :ok == down(TestRepo, 20050906120000, OnDeleteMigration, log: false)
end
test "rolls back references in change/1" do
assert :ok == up(TestRepo, 19850423000000, ReferencesRollbackMigration, log: false)
assert :ok == down(TestRepo, 19850423000000, ReferencesRollbackMigration, log: false)
end
test "create table if not exists and drop table if exists does not raise on failure" do
assert :ok == up(TestRepo, 19850423000001, NoErrorTableMigration, log: false)
end
@tag :create_index_if_not_exists
test "create index if not exists and drop index if exists does not raise on failure" do
assert :ok == up(TestRepo, 19850423000002, NoErrorIndexMigration, log: false)
end
test "raises on NoSQL migrations" do
assert_raise ArgumentError, ~r"does not support keyword lists in :options", fn ->
up(TestRepo, 20150704120000, NoSQLMigration, log: false)
end
end
@tag :add_column
test "add column" do
assert :ok == up(TestRepo, 20070906120000, AddColumnMigration, log: false)
assert [2] == TestRepo.all from p in "add_col_migration", select: p.to_be_added
:ok = down(TestRepo, 20070906120000, AddColumnMigration, log: false)
end
@tag :modify_column
test "modify column" do
assert :ok == up(TestRepo, 20080906120000, AlterColumnMigration, log: false)
assert ["foo"] ==
TestRepo.all from p in "alter_col_migration", select: p.from_null_to_not_null
assert [nil] ==
TestRepo.all from p in "alter_col_migration", select: p.from_not_null_to_null
assert [nil] ==
TestRepo.all from p in "alter_col_migration", select: p.from_default_to_no_default
assert [0] ==
TestRepo.all from p in "alter_col_migration", select: p.from_no_default_to_default
query = "INSERT INTO alter_col_migration (from_not_null_to_null) VALUES ('foo')"
assert catch_error(Ecto.Adapters.SQL.query!(TestRepo, query, []))
:ok = down(TestRepo, 20080906120000, AlterColumnMigration, log: false)
end
@tag :modify_foreign_key
test "modify foreign key" do
assert :ok == up(TestRepo, 20130802170000, AlterForeignKeyMigration, log: false)
assert [nil] == TestRepo.all from p in "alter_fk_posts", select: p.alter_fk_user_id
:ok = down(TestRepo, 20130802170000, AlterForeignKeyMigration, log: false)
end
@tag :remove_column
test "remove column" do
assert :ok == up(TestRepo, 20090906120000, DropColumnMigration, log: false)
assert catch_error(TestRepo.all from p in "drop_col_migration", select: p.to_be_removed)
:ok = down(TestRepo, 20090906120000, DropColumnMigration, log: false)
end
@tag :rename_column
test "rename column" do
assert :ok == up(TestRepo, 20150718120000, RenameColumnMigration, log: false)
assert [1] == TestRepo.all from p in "rename_col_migration", select: p.was_renamed
:ok = down(TestRepo, 20150718120000, RenameColumnMigration, log: false)
end
@tag :rename_table
test "rename table" do
assert :ok == up(TestRepo, 20150712120000, RenameMigration, log: false)
assert :ok == down(TestRepo, 20150712120000, RenameMigration, log: false)
end
@tag :prefix
test "prefix" do
assert :ok == up(TestRepo, 20151012120000, PrefixMigration, log: false)
assert :ok == down(TestRepo, 20151012120000, PrefixMigration, log: false)
end
end
| 27.137931 | 97 | 0.684097 |
9e711cd422b9a6993994f0dcb446f8dfb0a1c19e | 1,756 | ex | Elixir | clients/content/lib/google_api/content/v2/model/accountstatuses_list_response.ex | GoNZooo/elixir-google-api | cf3ad7392921177f68091f3d9001f1b01b92f1cc | [
"Apache-2.0"
] | null | null | null | clients/content/lib/google_api/content/v2/model/accountstatuses_list_response.ex | GoNZooo/elixir-google-api | cf3ad7392921177f68091f3d9001f1b01b92f1cc | [
"Apache-2.0"
] | null | null | null | clients/content/lib/google_api/content/v2/model/accountstatuses_list_response.ex | GoNZooo/elixir-google-api | cf3ad7392921177f68091f3d9001f1b01b92f1cc | [
"Apache-2.0"
] | 1 | 2018-07-28T20:50:50.000Z | 2018-07-28T20:50:50.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule GoogleApi.Content.V2.Model.AccountstatusesListResponse do
@moduledoc """
## Attributes
- kind (String): Identifies what kind of resource this is. Value: the fixed string \"content#accountstatusesListResponse\". Defaults to: `null`.
- nextPageToken (String): The token for the retrieval of the next page of account statuses. Defaults to: `null`.
- resources (List[AccountStatus]): Defaults to: `null`.
"""
defstruct [
:"kind",
:"nextPageToken",
:"resources"
]
end
defimpl Poison.Decoder, for: GoogleApi.Content.V2.Model.AccountstatusesListResponse do
import GoogleApi.Content.V2.Deserializer
def decode(value, options) do
value
|> deserialize(:"resources", :list, GoogleApi.Content.V2.Model.AccountStatus, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Content.V2.Model.AccountstatusesListResponse do
def encode(value, options) do
GoogleApi.Content.V2.Deserializer.serialize_non_nil(value, options)
end
end
| 33.769231 | 156 | 0.750569 |
9e712aa3f0fdfeb453ae222dc75ba5cac5a3e722 | 1,662 | ex | Elixir | clients/slides/lib/google_api/slides/v1/model/write_control.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/slides/lib/google_api/slides/v1/model/write_control.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/slides/lib/google_api/slides/v1/model/write_control.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Slides.V1.Model.WriteControl do
@moduledoc """
Provides control over how write requests are executed.
## Attributes
* `requiredRevisionId` (*type:* `String.t`, *default:* `nil`) - The revision ID of the presentation required for the write request. If
specified and the `required_revision_id` doesn't exactly match the
presentation's current `revision_id`, the request will not be processed and
will return a 400 bad request error.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:requiredRevisionId => String.t()
}
field(:requiredRevisionId)
end
defimpl Poison.Decoder, for: GoogleApi.Slides.V1.Model.WriteControl do
def decode(value, options) do
GoogleApi.Slides.V1.Model.WriteControl.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Slides.V1.Model.WriteControl do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 33.24 | 138 | 0.741276 |
9e715264cd829141e6c1a5221f9ab3bd98bfc4bf | 2,385 | exs | Elixir | test/plugs/event_hook_test.exs | EdBondArcher/okta-elixir | f7831125e40a1b4f8b488528b6a09b87d654db45 | [
"MIT"
] | 7 | 2021-07-19T10:41:43.000Z | 2022-02-23T20:56:51.000Z | test/plugs/event_hook_test.exs | EdBondArcher/okta-elixir | f7831125e40a1b4f8b488528b6a09b87d654db45 | [
"MIT"
] | 10 | 2019-08-18T11:31:43.000Z | 2019-09-24T18:12:24.000Z | test/plugs/event_hook_test.exs | EdBondArcher/okta-elixir | f7831125e40a1b4f8b488528b6a09b87d654db45 | [
"MIT"
] | 2 | 2019-08-08T08:22:10.000Z | 2019-09-06T06:54:43.000Z | defmodule Okta.Plug.EventHookTest do
use ExUnit.Case, async: true
alias Plug.Conn
alias Okta.Plug.EventHook
@request_key "authorization_token"
@opts EventHook.init(
event_handler: EventHookHandlerMock,
secret_key: @request_key
)
defmodule MockMFA do
def secret_key do
"authorization_token"
end
end
test "returns 404 with unauthorized requests" do
conn =
:post
|> Plug.Test.conn("/okta/event-hooks")
|> EventHook.call(@opts)
assert conn.status == 404
end
test "veryfying event hooks" do
conn =
:get
|> build_conn()
|> Conn.put_req_header("x-okta-verification-challenge", "my verification challenge")
|> EventHook.call(@opts)
assert conn.status == 200
assert conn.resp_body == Jason.encode!(%{verification: "my verification challenge"})
assert conn
|> Conn.get_resp_header("content-type")
|> Enum.at(0)
|> String.contains?("application/json")
"application/json; charset=utf-8"
end
test "receiving events" do
Mox.expect(EventHookHandlerMock, :handle_event, fn params ->
assert params == "some data"
end)
conn =
:post
|> build_conn()
|> Map.put(:body_params, "some data")
|> EventHook.call(@opts)
assert conn.status == 204
end
describe "validating required configs" do
test "validates event handler" do
opts = EventHook.init(secret_key: @request_key)
conn = build_conn(:post)
assert_raise ArgumentError, fn ->
EventHook.call(conn, opts)
end
end
test "validates secret key" do
opts = EventHook.init([])
conn = build_conn(:post)
assert_raise ArgumentError, fn ->
EventHook.call(conn, opts)
end
end
end
test "using mfa for configurating secret key" do
Mox.expect(EventHookHandlerMock, :handle_event, fn params -> params end)
opts =
EventHook.init(
event_handler: EventHookHandlerMock,
secret_key: {MockMFA, :secret_key, []}
)
conn =
:post
|> build_conn()
|> Map.put(:body_params, "some data")
|> EventHook.call(opts)
assert conn.status == 204
end
defp build_conn(method) do
method
|> Plug.Test.conn("/okta/event-hooks")
|> Conn.put_req_header("authorization", @request_key)
end
end
| 22.932692 | 90 | 0.627673 |
9e719f5c5f7a320d2dc44cae37caa666099284b1 | 2,682 | ex | Elixir | clients/cloud_search/lib/google_api/cloud_search/v1/model/integer_property_options.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | null | null | null | clients/cloud_search/lib/google_api/cloud_search/v1/model/integer_property_options.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | 1 | 2020-12-18T09:25:12.000Z | 2020-12-18T09:25:12.000Z | clients/cloud_search/lib/google_api/cloud_search/v1/model/integer_property_options.ex | MasashiYokota/elixir-google-api | 975dccbff395c16afcb62e7a8e411fbb58e9ab01 | [
"Apache-2.0"
] | 1 | 2020-10-04T10:12:44.000Z | 2020-10-04T10:12:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.CloudSearch.V1.Model.IntegerPropertyOptions do
@moduledoc """
Options for integer properties.
## Attributes
* `maximumValue` (*type:* `String.t`, *default:* `nil`) - The maximum value of the property. The minimum and maximum values for the property are used to rank results according to the ordered ranking. Indexing requests with values greater than the maximum are accepted and ranked with the same weight as items indexed with the maximum value.
* `minimumValue` (*type:* `String.t`, *default:* `nil`) - The minimum value of the property. The minimum and maximum values for the property are used to rank results according to the ordered ranking. Indexing requests with values less than the minimum are accepted and ranked with the same weight as items indexed with the minimum value.
* `operatorOptions` (*type:* `GoogleApi.CloudSearch.V1.Model.IntegerOperatorOptions.t`, *default:* `nil`) - If set, describes how the integer should be used as a search operator.
* `orderedRanking` (*type:* `String.t`, *default:* `nil`) - Used to specify the ordered ranking for the integer. Can only be used if isRepeatable is false.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:maximumValue => String.t(),
:minimumValue => String.t(),
:operatorOptions => GoogleApi.CloudSearch.V1.Model.IntegerOperatorOptions.t(),
:orderedRanking => String.t()
}
field(:maximumValue)
field(:minimumValue)
field(:operatorOptions, as: GoogleApi.CloudSearch.V1.Model.IntegerOperatorOptions)
field(:orderedRanking)
end
defimpl Poison.Decoder, for: GoogleApi.CloudSearch.V1.Model.IntegerPropertyOptions do
def decode(value, options) do
GoogleApi.CloudSearch.V1.Model.IntegerPropertyOptions.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.CloudSearch.V1.Model.IntegerPropertyOptions do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 47.892857 | 344 | 0.747576 |
9e72113e2e4f771609829a768ee60b034226e609 | 8,179 | ex | Elixir | clients/you_tube/lib/google_api/you_tube/v3/api/activities.ex | hauptbenutzer/elixir-google-api | 7b9e3a114a49cfc774a7afd03e299a0d43e4e6b2 | [
"Apache-2.0"
] | null | null | null | clients/you_tube/lib/google_api/you_tube/v3/api/activities.ex | hauptbenutzer/elixir-google-api | 7b9e3a114a49cfc774a7afd03e299a0d43e4e6b2 | [
"Apache-2.0"
] | null | null | null | clients/you_tube/lib/google_api/you_tube/v3/api/activities.ex | hauptbenutzer/elixir-google-api | 7b9e3a114a49cfc774a7afd03e299a0d43e4e6b2 | [
"Apache-2.0"
] | 1 | 2020-11-10T16:58:27.000Z | 2020-11-10T16:58:27.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule GoogleApi.YouTube.V3.Api.Activities do
@moduledoc """
API calls for all endpoints tagged `Activities`.
"""
alias GoogleApi.YouTube.V3.Connection
alias GoogleApi.Gax.{Request, Response}
@doc """
Posts a bulletin for a specific channel. (The user submitting the request must be authorized to act on the channel's behalf.) Note: Even though an activity resource can contain information about actions like a user rating a video or marking a video as a favorite, you need to use other API methods to generate those activity resources. For example, you would use the API's videos.rate() method to rate a video and the playlistItems.insert() method to mark a video as a favorite.
## Parameters
- connection (GoogleApi.YouTube.V3.Connection): Connection to server
- part (String.t): The part parameter serves two purposes in this operation. It identifies the properties that the write operation will set as well as the properties that the API response will include.
- opts (KeywordList): [optional] Optional parameters
- :alt (String.t): Data format for the response.
- :fields (String.t): Selector specifying which fields to include in a partial response.
- :key (String.t): API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
- :oauth_token (String.t): OAuth 2.0 token for the current user.
- :prettyPrint (boolean()): Returns response with indentations and line breaks.
- :quotaUser (String.t): An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
- :userIp (String.t): Deprecated. Please use quotaUser instead.
- :body (Activity):
## Returns
{:ok, %GoogleApi.YouTube.V3.Model.Activity{}} on success
{:error, info} on failure
"""
@spec youtube_activities_insert(Tesla.Env.client(), String.t(), keyword()) ::
{:ok, GoogleApi.YouTube.V3.Model.Activity.t()} | {:error, Tesla.Env.t()}
def youtube_activities_insert(connection, part, opts \\ []) do
optional_params = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:body => :body
}
request =
Request.new()
|> Request.method(:post)
|> Request.url("/youtube/v3/activities")
|> Request.add_param(:query, :part, part)
|> Request.add_optional_params(optional_params, opts)
connection
|> Connection.execute(request)
|> Response.decode(struct: %GoogleApi.YouTube.V3.Model.Activity{})
end
@doc """
Returns a list of channel activity events that match the request criteria. For example, you can retrieve events associated with a particular channel, events associated with the user's subscriptions and Google+ friends, or the YouTube home page feed, which is customized for each user.
## Parameters
- connection (GoogleApi.YouTube.V3.Connection): Connection to server
- part (String.t): The part parameter specifies a comma-separated list of one or more activity resource properties that the API response will include. If the parameter identifies a property that contains child properties, the child properties will be included in the response. For example, in an activity resource, the snippet property contains other properties that identify the type of activity, a display title for the activity, and so forth. If you set part=snippet, the API response will also contain all of those nested properties.
- opts (KeywordList): [optional] Optional parameters
- :alt (String.t): Data format for the response.
- :fields (String.t): Selector specifying which fields to include in a partial response.
- :key (String.t): API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
- :oauth_token (String.t): OAuth 2.0 token for the current user.
- :prettyPrint (boolean()): Returns response with indentations and line breaks.
- :quotaUser (String.t): An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
- :userIp (String.t): Deprecated. Please use quotaUser instead.
- :channelId (String.t): The channelId parameter specifies a unique YouTube channel ID. The API will then return a list of that channel's activities.
- :home (boolean()): Set this parameter's value to true to retrieve the activity feed that displays on the YouTube home page for the currently authenticated user.
- :maxResults (integer()): The maxResults parameter specifies the maximum number of items that should be returned in the result set.
- :mine (boolean()): Set this parameter's value to true to retrieve a feed of the authenticated user's activities.
- :pageToken (String.t): The pageToken parameter identifies a specific page in the result set that should be returned. In an API response, the nextPageToken and prevPageToken properties identify other pages that could be retrieved.
- :publishedAfter (DateTime.t): The publishedAfter parameter specifies the earliest date and time that an activity could have occurred for that activity to be included in the API response. If the parameter value specifies a day, but not a time, then any activities that occurred that day will be included in the result set. The value is specified in ISO 8601 (YYYY-MM-DDThh:mm:ss.sZ) format.
- :publishedBefore (DateTime.t): The publishedBefore parameter specifies the date and time before which an activity must have occurred for that activity to be included in the API response. If the parameter value specifies a day, but not a time, then any activities that occurred that day will be excluded from the result set. The value is specified in ISO 8601 (YYYY-MM-DDThh:mm:ss.sZ) format.
- :regionCode (String.t): The regionCode parameter instructs the API to return results for the specified country. The parameter value is an ISO 3166-1 alpha-2 country code. YouTube uses this value when the authorized user's previous activity on YouTube does not provide enough information to generate the activity feed.
## Returns
{:ok, %GoogleApi.YouTube.V3.Model.ActivityListResponse{}} on success
{:error, info} on failure
"""
@spec youtube_activities_list(Tesla.Env.client(), String.t(), keyword()) ::
{:ok, GoogleApi.YouTube.V3.Model.ActivityListResponse.t()} | {:error, Tesla.Env.t()}
def youtube_activities_list(connection, part, opts \\ []) do
optional_params = %{
:alt => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:userIp => :query,
:channelId => :query,
:home => :query,
:maxResults => :query,
:mine => :query,
:pageToken => :query,
:publishedAfter => :query,
:publishedBefore => :query,
:regionCode => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/youtube/v3/activities")
|> Request.add_param(:query, :part, part)
|> Request.add_optional_params(optional_params, opts)
connection
|> Connection.execute(request)
|> Response.decode(struct: %GoogleApi.YouTube.V3.Model.ActivityListResponse{})
end
end
| 59.70073 | 545 | 0.722704 |
9e721eb96aa0f2fd45f109013e0d2de76a2e4d73 | 1,076 | ex | Elixir | lib/ashes/endpoint.ex | hamiltop/ashes | 74882221af8d4fd96cd5d88e32fa6a6b3df44c77 | [
"MIT"
] | 1 | 2019-09-04T10:06:04.000Z | 2019-09-04T10:06:04.000Z | lib/ashes/endpoint.ex | hamiltop/ashes | 74882221af8d4fd96cd5d88e32fa6a6b3df44c77 | [
"MIT"
] | null | null | null | lib/ashes/endpoint.ex | hamiltop/ashes | 74882221af8d4fd96cd5d88e32fa6a6b3df44c77 | [
"MIT"
] | null | null | null | defmodule Ashes.Endpoint do
use Phoenix.Endpoint, otp_app: :ashes
socket "/socket", Ashes.UserSocket
# Serve at "/" the static files from "priv/static" directory.
#
# You should set gzip to true if you are running phoenix.digest
# when deploying your static files in production.
plug Plug.Static,
at: "/", from: :ashes, gzip: false,
only: ~w(css fonts images js favicon.ico robots.txt)
# Code reloading can be explicitly enabled under the
# :code_reloader configuration of your endpoint.
if code_reloading? do
socket "/phoenix/live_reload/socket", Phoenix.LiveReloader.Socket
plug Phoenix.LiveReloader
plug Phoenix.CodeReloader
end
plug Plug.RequestId
plug Plug.Logger
plug GithubWebhookPlug, mount: "api", secret: System.get_env("GITHUB_SECRET")
plug Plug.Parsers,
parsers: [:urlencoded, :multipart, :json],
pass: ["*/*"],
json_decoder: Poison
plug Plug.MethodOverride
plug Plug.Head
plug Plug.Session,
store: :cookie,
key: "_ashes_key",
signing_salt: "7LC202NS"
plug Ashes.Router
end
| 26.243902 | 79 | 0.710037 |
9e723d901db5bf3d31f0680c41705b8124475546 | 3,857 | ex | Elixir | lib/sentinel/web/controllers/html/password_controller.ex | suranyami/sentinel | aeb421e2e61a4bc14abe89b4a92cb1943a5965fb | [
"MIT"
] | null | null | null | lib/sentinel/web/controllers/html/password_controller.ex | suranyami/sentinel | aeb421e2e61a4bc14abe89b4a92cb1943a5965fb | [
"MIT"
] | null | null | null | lib/sentinel/web/controllers/html/password_controller.ex | suranyami/sentinel | aeb421e2e61a4bc14abe89b4a92cb1943a5965fb | [
"MIT"
] | null | null | null | defmodule Sentinel.Controllers.Html.PasswordController do
@moduledoc """
Handles the password create and reset actions
"""
use Phoenix.Controller
alias Sentinel.{Changeset.PasswordResetter, Config, Mailer, RedirectHelper, Util}
plug :put_layout, {Config.layout_view, Config.layout}
plug Sentinel.AuthenticatedPipeline when action in [:authenticated_update]
def new(conn, _params) do
render(conn, Config.views.password, "new.html", %{conn: conn})
end
def create(conn, %{"password" => %{"email" => email}}) do
user = Config.repo.get_by(Config.user_model, email: email)
if is_nil(user) do
send_redirect_and_flash(conn)
else
auth = Config.repo.get_by(Sentinel.Ueberauth, user_id: user.id, provider: "identity")
if is_nil(auth) do
send_redirect_and_flash(conn)
else
{password_reset_token, changeset} = auth |> PasswordResetter.create_changeset
case Config.repo.update(changeset) do
{:ok, updated_auth} ->
updated_auth
|> Config.repo.preload([:user])
|> Map.get(:user)
|> Mailer.send_password_reset_email(password_reset_token)
_ -> nil
end
send_redirect_and_flash(conn)
end
end
end
defp send_redirect_and_flash(conn) do
conn
|> put_flash(:info, "You'll receive an email with instructions about how to reset your password in a few minutes.")
|> RedirectHelper.redirect_from(:password_create)
end
def edit(conn, params)
def edit(conn, %{"user_id" => user_id, "password_reset_token" => password_reset_token}) do
render(conn, Config.views.password, "edit.html", %{conn: conn, password_reset_token: password_reset_token, user_id: user_id})
end
def edit(conn, _params) do
conn
|> put_flash(:error, "Invalid password reset link. Please try again.")
|> RedirectHelper.redirect_from(:password_update_error)
end
@doc """
Resets a users password if the provided token matches
Params should be:
{user_id: 1, password_reset_token: "abc123"}
"""
def update(conn, params)
def update(conn, %{"password" => %{"user_id" => user_id, "password_reset_token" => _password_reset_params} = params}) do
user = Config.repo.get(Config.user_model, user_id)
case Sentinel.Update.update_password(user_id, params) do
{:ok, _auth} ->
conn
|> Sentinel.Guardian.Plug.sign_in(user)
|> put_flash(:info, "Successfully updated password")
|> RedirectHelper.redirect_from(:password_update)
{:error, _changeset} ->
conn
|> put_status(422)
|> put_flash(:error, "Unable to reset your password")
|> RedirectHelper.redirect_from(:password_update_unsuccessful)
end
end
def update(conn, _params) do
conn
|> put_status(422)
|> put_flash(:error, "Unable to reset your password")
|> RedirectHelper.redirect_from(:password_update_unsuccessful)
end
def authenticated_update(conn, %{"account" => params}) do
current_user = Sentinel.Guardian.Plug.current_resource(conn)
auth = Config.repo.get_by(Sentinel.Ueberauth, user_id: current_user.id, provider: "identity")
{password_reset_token, changeset} = auth |> PasswordResetter.create_changeset
updated_auth = Config.repo.update!(changeset)
password_reset_params = Util.params_to_ueberauth_auth_struct(params, password_reset_token)
changeset =
updated_auth
|> PasswordResetter.reset_changeset(password_reset_params)
case Config.repo.update(changeset) do
{:ok, _updated_auth} ->
conn
|> put_flash(:info, "Update successful")
|> RedirectHelper.redirect_from(:password_update)
{:error, changeset} ->
render(conn, Config.views.password, "edit.html", %{conn: conn, user: current_user, changeset: changeset})
end
end
end
| 35.385321 | 129 | 0.688359 |
9e727ed236892a9a9a2f54a9eb522950229d0b7f | 52,425 | ex | Elixir | lib/data_layer.ex | simpers/ash_postgres | 22ba737d0d1e57da3d3150bcf94961a99b145ae9 | [
"MIT"
] | null | null | null | lib/data_layer.ex | simpers/ash_postgres | 22ba737d0d1e57da3d3150bcf94961a99b145ae9 | [
"MIT"
] | null | null | null | lib/data_layer.ex | simpers/ash_postgres | 22ba737d0d1e57da3d3150bcf94961a99b145ae9 | [
"MIT"
] | null | null | null | defmodule AshPostgres.DataLayer do
@manage_tenant %Ash.Dsl.Section{
name: :manage_tenant,
describe: """
Configuration for the behavior of a resource that manages a tenant
""",
examples: [
"""
manage_tenant do
template ["organization_", :id]
create? true
update? false
end
"""
],
schema: [
template: [
type: {:custom, __MODULE__, :tenant_template, []},
required: true,
doc: """
A template that will cause the resource to create/manage the specified schema.
Use this if you have a resource that, when created, it should create a new tenant
for you. For example, if you have a `customer` resource, and you want to create
a schema for each customer based on their id, e.g `customer_10` set this option
to `["customer_", :id]`. Then, when this is created, it will create a schema called
`["customer_", :id]`, and run your tenant migrations on it. Then, if you were to change
that customer's id to `20`, it would rename the schema to `customer_20`. Generally speaking
you should avoid changing the tenant id.
"""
],
create?: [
type: :boolean,
default: true,
doc: "Whether or not to automatically create a tenant when a record is created"
],
update?: [
type: :boolean,
default: true,
doc: "Whether or not to automatically update the tenant name if the record is udpated"
]
]
}
@postgres %Ash.Dsl.Section{
name: :postgres,
describe: """
Postgres data layer configuration
""",
sections: [
@manage_tenant
],
modules: [
:repo
],
examples: [
"""
postgres do
repo MyApp.Repo
table "organizations"
end
"""
],
schema: [
repo: [
type: :atom,
required: true,
doc:
"The repo that will be used to fetch your data. See the `AshPostgres.Repo` documentation for more"
],
migrate?: [
type: :boolean,
default: true,
doc:
"Whether or not to include this resource in the generated migrations with `mix ash.generate_migrations`"
],
base_filter_sql: [
type: :string,
doc:
"A raw sql version of the base_filter, e.g `representative = true`. Required if trying to create a unique constraint on a resource with a base_filter"
],
skip_unique_indexes: [
type: {:custom, __MODULE__, :validate_skip_unique_indexes, []},
default: false,
doc: "Skip generating unique indexes when generating migrations"
],
unique_index_names: [
type: :any,
default: [],
doc: """
A list of unique index names that could raise errors, or an mfa to a function that takes a changeset
and returns the list. Must be in the format `{[:affected, :keys], "name_of_constraint"}` or `{[:affected, :keys], "name_of_constraint", "custom error message"}`
"""
],
foreign_key_names: [
type: :any,
default: [],
doc: """
A list of foreign keys that could raise errors, or an mfa to a function that takes a changeset and returns the list.
Must be in the format `{:key, "name_of_constraint"}` or `{:key, "name_of_constraint", "custom error message"}`
"""
],
table: [
type: :string,
doc:
"The table to store and read the resource from. Required unless `polymorphic?` is true."
],
polymorphic?: [
type: :boolean,
default: false,
doc: """
Declares this resource as polymorphic.
Polymorphic resources cannot be read or updated unless the table is provided in the query/changeset context.
For example:
PolymorphicResource
|> Ash.Query.set_context(%{data_layer: %{table: "table"}})
|> MyApi.read!()
When relating to polymorphic resources, you'll need to use the `context` option on relationships,
e.g
belongs_to :polymorphic_association, PolymorphicResource,
context: %{data_layer: %{table: "table"}}
"""
]
]
}
alias Ash.Filter
alias Ash.Query.{BooleanExpression, Not, Ref}
alias Ash.Query.Function.{Ago, Contains}
alias Ash.Query.Operator.IsNil
alias AshPostgres.Functions.{Fragment, TrigramSimilarity, Type}
import AshPostgres, only: [repo: 1]
@behaviour Ash.DataLayer
@sections [@postgres]
@moduledoc """
A postgres data layer that levereges Ecto's postgres capabilities.
# Table of Contents
#{Ash.Dsl.Extension.doc_index(@sections)}
#{Ash.Dsl.Extension.doc(@sections)}
"""
use Ash.Dsl.Extension,
sections: @sections,
transformers: [
AshPostgres.Transformers.VerifyRepo,
AshPostgres.Transformers.EnsureTableOrPolymorphic
]
@doc false
def tenant_template(value) do
value = List.wrap(value)
if Enum.all?(value, &(is_binary(&1) || is_atom(&1))) do
{:ok, value}
else
{:error, "Expected all values for `manages_tenant` to be strings or atoms"}
end
end
@doc false
def validate_skip_unique_indexes(indexes) do
indexes = List.wrap(indexes)
if Enum.all?(indexes, &is_atom/1) do
{:ok, indexes}
else
{:error, "All indexes to skip must be atoms"}
end
end
import Ecto.Query, only: [from: 2, subquery: 1]
@impl true
def can?(_, :async_engine), do: true
def can?(_, :transact), do: true
def can?(_, :composite_primary_key), do: true
def can?(_, :upsert), do: true
def can?(resource, {:join, other_resource}) do
data_layer = Ash.DataLayer.data_layer(resource)
other_data_layer = Ash.DataLayer.data_layer(other_resource)
data_layer == other_data_layer and repo(data_layer) == repo(other_data_layer)
end
def can?(resource, {:lateral_join, other_resource}) do
data_layer = Ash.DataLayer.data_layer(resource)
other_data_layer = Ash.DataLayer.data_layer(other_resource)
data_layer == other_data_layer and repo(data_layer) == repo(other_data_layer)
end
def can?(_, :boolean_filter), do: true
def can?(_, {:aggregate, :count}), do: true
def can?(_, :aggregate_filter), do: true
def can?(_, :aggregate_sort), do: true
def can?(_, :create), do: true
def can?(_, :read), do: true
def can?(_, :update), do: true
def can?(_, :destroy), do: true
def can?(_, :filter), do: true
def can?(_, :limit), do: true
def can?(_, :offset), do: true
def can?(_, :multitenancy), do: true
def can?(_, {:filter_expr, _}), do: true
def can?(_, :nested_expressions), do: true
def can?(_, {:query_aggregate, :count}), do: true
def can?(_, :sort), do: true
def can?(_, {:sort, _}), do: true
def can?(_, _), do: false
@impl true
def in_transaction?(resource) do
repo(resource).in_transaction?()
end
@impl true
def limit(query, nil, _), do: {:ok, query}
def limit(query, limit, _resource) do
{:ok, from(row in query, limit: ^limit)}
end
@impl true
def source(resource) do
AshPostgres.table(resource) || ""
end
@impl true
def set_context(resource, data_layer_query, context) do
if context[:data_layer][:table] do
{:ok,
%{
data_layer_query
| from: %{data_layer_query.from | source: {context[:data_layer][:table], resource}}
}}
else
{:ok, data_layer_query}
end
end
@impl true
def offset(query, nil, _), do: query
def offset(%{offset: old_offset} = query, 0, _resource) when old_offset in [0, nil] do
{:ok, query}
end
def offset(query, offset, _resource) do
{:ok, from(row in query, offset: ^offset)}
end
@impl true
def run_query(query, resource) do
if AshPostgres.polymorphic?(resource) && no_table?(query) do
raise_table_error!(resource, :read)
else
{:ok, repo(resource).all(query, repo_opts(query))}
end
end
defp no_table?(%{from: %{source: {"", _}}}), do: true
defp no_table?(_), do: false
defp repo_opts(%Ash.Changeset{tenant: tenant, resource: resource}) do
repo_opts(%{tenant: tenant, resource: resource})
end
defp repo_opts(%{tenant: tenant, resource: resource}) when not is_nil(tenant) do
if Ash.Resource.Info.multitenancy_strategy(resource) == :context do
[prefix: tenant]
else
[]
end
end
defp repo_opts(_), do: []
@impl true
def functions(resource) do
config = repo(resource).config()
functions = [AshPostgres.Functions.Type, AshPostgres.Functions.Fragment]
if "pg_trgm" in (config[:installed_extensions] || []) do
functions ++
[
AshPostgres.Functions.TrigramSimilarity
]
else
functions
end
end
@impl true
def run_aggregate_query(query, aggregates, resource) do
subquery = from(row in subquery(query), select: %{})
query =
Enum.reduce(
aggregates,
subquery,
&add_subquery_aggregate_select(&2, &1, resource)
)
{:ok, repo(resource).one(query, repo_opts(query))}
end
@impl true
def set_tenant(_resource, query, tenant) do
{:ok, Ecto.Query.put_query_prefix(query, to_string(tenant))}
end
@impl true
def run_aggregate_query_with_lateral_join(
query,
aggregates,
root_data,
source_resource,
destination_resource,
source_field,
destination_field
) do
lateral_join_query =
lateral_join_query(
query,
root_data,
source_resource,
source_field,
destination_field
)
subquery = from(row in subquery(lateral_join_query), select: %{})
query =
Enum.reduce(
aggregates,
subquery,
&add_subquery_aggregate_select(&2, &1, destination_resource)
)
{:ok, repo(source_resource).one(query, repo_opts(:query))}
end
@impl true
def run_query_with_lateral_join(
query,
root_data,
source_resource,
_destination_resource,
source_field,
destination_field
) do
query =
lateral_join_query(
query,
root_data,
source_resource,
source_field,
destination_field
)
{:ok, repo(source_resource).all(query, repo_opts(query))}
end
defp lateral_join_query(
query,
root_data,
source_resource,
source_field,
destination_field
) do
source_values = Enum.map(root_data, &Map.get(&1, source_field))
subquery =
subquery(
from(destination in query,
where:
field(destination, ^destination_field) ==
field(parent_as(:source_record), ^source_field)
)
)
source_resource
|> Ash.Query.new()
|> Ash.Query.data_layer_query()
|> case do
{:ok, data_layer_query} ->
from(source in data_layer_query,
as: :source_record,
where: field(source, ^source_field) in ^source_values,
inner_lateral_join: destination in ^subquery,
on: field(source, ^source_field) == field(destination, ^destination_field),
select: destination
)
{:error, error} ->
{:error, error}
end
end
@impl true
def resource_to_query(resource, _),
do: Ecto.Queryable.to_query({AshPostgres.table(resource) || "", resource})
@impl true
def create(resource, changeset) do
changeset.data
|> Map.update!(:__meta__, &Map.put(&1, :source, table(resource, changeset)))
|> ecto_changeset(changeset, :create)
|> repo(resource).insert(repo_opts(changeset))
|> handle_errors()
|> case do
{:ok, result} ->
maybe_create_tenant!(resource, result)
{:ok, result}
{:error, error} ->
{:error, error}
end
end
defp maybe_create_tenant!(resource, result) do
if AshPostgres.manage_tenant_create?(resource) do
tenant_name = tenant_name(resource, result)
AshPostgres.MultiTenancy.create_tenant!(tenant_name, repo(resource))
else
:ok
end
end
defp maybe_update_tenant(resource, changeset, result) do
if AshPostgres.manage_tenant_update?(resource) do
changing_tenant_name? =
resource
|> AshPostgres.manage_tenant_template()
|> Enum.filter(&is_atom/1)
|> Enum.any?(&Ash.Changeset.changing_attribute?(changeset, &1))
if changing_tenant_name? do
old_tenant_name = tenant_name(resource, changeset.data)
new_tenant_name = tenant_name(resource, result)
AshPostgres.MultiTenancy.rename_tenant(repo(resource), old_tenant_name, new_tenant_name)
end
end
:ok
end
defp tenant_name(resource, result) do
resource
|> AshPostgres.manage_tenant_template()
|> Enum.map_join(fn item ->
if is_binary(item) do
item
else
result
|> Map.get(item)
|> to_string()
end
end)
end
defp handle_errors({:error, %Ecto.Changeset{errors: errors}}) do
{:error, Enum.map(errors, &to_ash_error/1)}
end
defp handle_errors({:ok, val}), do: {:ok, val}
defp to_ash_error({field, {message, vars}}) do
Ash.Error.Changes.InvalidAttribute.exception(field: field, message: message, vars: vars)
end
defp ecto_changeset(record, changeset, type) do
ecto_changeset =
record
|> set_table(changeset, type)
|> Ecto.Changeset.change(changeset.attributes)
case type do
:create ->
ecto_changeset
|> add_unique_indexes(record.__struct__, changeset.tenant, changeset)
|> add_my_foreign_key_constraints(record.__struct__)
|> add_configured_foreign_key_constraints(record.__struct__)
type when type in [:upsert, :update] ->
ecto_changeset
|> add_unique_indexes(record.__struct__, changeset.tenant, changeset)
|> add_my_foreign_key_constraints(record.__struct__)
|> add_related_foreign_key_constraints(record.__struct__)
|> add_configured_foreign_key_constraints(record.__struct__)
:delete ->
ecto_changeset
|> add_unique_indexes(record.__struct__, changeset.tenant, changeset)
|> add_related_foreign_key_constraints(record.__struct__)
|> add_configured_foreign_key_constraints(record.__struct__)
end
end
defp set_table(record, changeset, operation) do
if AshPostgres.polymorphic?(record.__struct__) do
table = changeset.context[:data_layer][:table] || AshPostgres.table(record.__struct)
if table do
Ecto.put_meta(record, source: table)
else
raise_table_error!(changeset.resource, operation)
end
else
record
end
end
defp add_related_foreign_key_constraints(changeset, resource) do
# TODO: this doesn't guarantee us to get all of them, because if something is related to this
# schema and there is no back-relation, then this won't catch it's foreign key constraints
resource
|> Ash.Resource.Info.relationships()
|> Enum.map(& &1.destination)
|> Enum.uniq()
|> Enum.flat_map(fn related ->
related
|> Ash.Resource.Info.relationships()
|> Enum.filter(&(&1.destination == resource))
|> Enum.map(&Map.take(&1, [:source, :source_field, :destination_field]))
end)
|> Enum.uniq()
|> Enum.reduce(changeset, fn %{
source: source,
source_field: source_field,
destination_field: destination_field
},
changeset ->
Ecto.Changeset.foreign_key_constraint(changeset, destination_field,
name: "#{AshPostgres.table(source)}_#{source_field}_fkey",
message: "would leave records behind"
)
end)
end
defp add_my_foreign_key_constraints(changeset, resource) do
resource
|> Ash.Resource.Info.relationships()
|> Enum.reduce(changeset, &Ecto.Changeset.foreign_key_constraint(&2, &1.source_field))
end
defp add_configured_foreign_key_constraints(changeset, resource) do
resource
|> AshPostgres.foreign_key_names()
|> case do
{m, f, a} -> List.wrap(apply(m, f, [changeset | a]))
value -> List.wrap(value)
end
|> Enum.reduce(changeset, fn
{key, name}, changeset ->
Ecto.Changeset.foreign_key_constraint(changeset, key, name: name)
{key, name, message}, changeset ->
Ecto.Changeset.foreign_key_constraint(changeset, key, name: name, message: message)
end)
end
defp add_unique_indexes(changeset, resource, tenant, ash_changeset) do
changeset =
resource
|> Ash.Resource.Info.identities()
|> Enum.reduce(changeset, fn identity, changeset ->
name =
if tenant do
"#{tenant}_#{table(resource, ash_changeset)}_#{identity.name}_unique_index"
else
"#{table(resource, ash_changeset)}_#{identity.name}_unique_index"
end
opts =
if Map.get(identity, :message) do
[name: name, message: identity.message]
else
[name: name]
end
Ecto.Changeset.unique_constraint(changeset, identity.keys, opts)
end)
names =
resource
|> AshPostgres.unique_index_names()
|> case do
{m, f, a} -> List.wrap(apply(m, f, [changeset | a]))
value -> List.wrap(value)
end
names = [
{Ash.Resource.Info.primary_key(resource), table(resource, ash_changeset) <> "_pkey"} | names
]
Enum.reduce(names, changeset, fn
{keys, name}, changeset ->
Ecto.Changeset.unique_constraint(changeset, List.wrap(keys), name: name)
{keys, name, message}, changeset ->
Ecto.Changeset.unique_constraint(changeset, List.wrap(keys), name: name, message: message)
end)
end
@impl true
def upsert(resource, changeset) do
repo_opts =
changeset
|> repo_opts()
|> Keyword.put(:on_conflict, {:replace, Map.keys(changeset.attributes)})
|> Keyword.put(:conflict_target, Ash.Resource.Info.primary_key(resource))
if AshPostgres.manage_tenant_update?(resource) do
{:error, "Cannot currently upsert a resource that owns a tenant"}
else
changeset.data
|> Map.update!(:__meta__, &Map.put(&1, :source, table(resource, changeset)))
|> ecto_changeset(changeset, :upsert)
|> repo(resource).insert(repo_opts)
|> handle_errors()
end
end
@impl true
def update(resource, changeset) do
changeset.data
|> Map.update!(:__meta__, &Map.put(&1, :source, table(resource, changeset)))
|> ecto_changeset(changeset, :update)
|> repo(resource).update(repo_opts(changeset))
|> handle_errors()
|> case do
{:ok, result} ->
maybe_update_tenant(resource, changeset, result)
{:ok, result}
{:error, error} ->
{:error, error}
end
end
@impl true
def destroy(resource, %{data: record} = changeset) do
record
|> ecto_changeset(changeset, :delete)
|> repo(resource).delete(repo_opts(changeset))
|> case do
{:ok, _record} ->
:ok
{:error, error} ->
handle_errors({:error, error})
end
end
@impl true
def sort(query, sort, resource) do
query = default_bindings(query, resource)
sort
|> sanitize_sort()
|> Enum.reduce({:ok, query}, fn {order, sort}, {:ok, query} ->
binding =
case Map.fetch(query.__ash_bindings__.aggregates, sort) do
{:ok, binding} ->
binding
:error ->
0
end
new_query =
Map.update!(query, :order_bys, fn order_bys ->
order_bys = order_bys || []
sort_expr = %Ecto.Query.QueryExpr{
expr: [
{order, {{:., [], [{:&, [], [binding]}, sort]}, [], []}}
]
}
order_bys ++ [sort_expr]
end)
{:ok, new_query}
end)
end
defp sanitize_sort(sort) do
sort
|> List.wrap()
|> Enum.map(fn
{sort, :asc_nils_last} -> {:asc_nulls_last, sort}
{sort, :asc_nils_first} -> {:asc_nulls_first, sort}
{sort, :desc_nils_last} -> {:desc_nulls_last, sort}
{sort, :desc_nils_first} -> {:desc_nulls_first, sort}
{sort, order} -> {order, sort}
sort -> sort
end)
end
@impl true
def filter(query, %{expression: false}, _resource) do
impossible_query = from(row in query, where: false)
{:ok, Map.put(impossible_query, :__impossible__, true)}
end
def filter(query, filter, _resource) do
relationship_paths =
filter
|> Filter.relationship_paths()
|> Enum.map(fn path ->
if can_inner_join?(path, filter) do
{:inner, relationship_path_to_relationships(filter.resource, path)}
else
{:left, relationship_path_to_relationships(filter.resource, path)}
end
end)
new_query =
query
|> join_all_relationships(relationship_paths)
|> add_filter_expression(filter)
{:ok, new_query}
end
defp default_bindings(query, resource) do
Map.put_new(query, :__ash_bindings__, %{
current: Enum.count(query.joins) + 1,
aggregates: %{},
bindings: %{0 => %{path: [], type: :root, source: resource}}
})
end
@known_inner_join_operators [
Eq,
GreaterThan,
GreaterThanOrEqual,
In,
LessThanOrEqual,
LessThan,
NotEq
]
|> Enum.map(&Module.concat(Ash.Query.Operator, &1))
@known_inner_join_functions [
Ago,
Contains
]
|> Enum.map(&Module.concat(Ash.Query.Function, &1))
@known_inner_join_predicates @known_inner_join_functions ++ @known_inner_join_operators
# For consistency's sake, this logic was removed.
# We can revisit it sometime though.
defp can_inner_join?(path, expr, seen_an_or? \\ false)
defp can_inner_join?(path, %{expression: expr}, seen_an_or?),
do: can_inner_join?(path, expr, seen_an_or?)
defp can_inner_join?(_path, expr, _seen_an_or?) when expr in [nil, true, false], do: true
defp can_inner_join?(path, %BooleanExpression{op: :and, left: left, right: right}, seen_an_or?) do
can_inner_join?(path, left, seen_an_or?) || can_inner_join?(path, right, seen_an_or?)
end
defp can_inner_join?(path, %BooleanExpression{op: :or, left: left, right: right}, _) do
can_inner_join?(path, left, true) && can_inner_join?(path, right, true)
end
defp can_inner_join?(
_,
%Not{},
_
) do
false
end
defp can_inner_join?(
search_path,
%struct{__operator__?: true, left: %Ref{relationship_path: relationship_path}},
seen_an_or?
)
when search_path == relationship_path and struct in @known_inner_join_predicates do
not seen_an_or?
end
defp can_inner_join?(
search_path,
%struct{__operator__?: true, right: %Ref{relationship_path: relationship_path}},
seen_an_or?
)
when search_path == relationship_path and struct in @known_inner_join_predicates do
not seen_an_or?
end
defp can_inner_join?(
search_path,
%struct{__function__?: true, arguments: arguments},
seen_an_or?
)
when struct in @known_inner_join_predicates do
if Enum.any?(arguments, &match?(%Ref{relationship_path: ^search_path}, &1)) do
not seen_an_or?
else
true
end
end
defp can_inner_join?(_, _, _), do: false
@impl true
def add_aggregate(query, aggregate, _resource) do
resource = aggregate.resource
query = default_bindings(query, resource)
{query, binding} =
case get_binding(resource, aggregate.relationship_path, query, :aggregate) do
nil ->
relationship = Ash.Resource.Info.relationship(resource, aggregate.relationship_path)
subquery = aggregate_subquery(relationship, aggregate)
new_query =
join_all_relationships(
query,
[
{{:aggregate, aggregate.name, subquery},
relationship_path_to_relationships(resource, aggregate.relationship_path)}
]
)
{new_query, get_binding(resource, aggregate.relationship_path, new_query, :aggregate)}
binding ->
{query, binding}
end
query_with_aggregate_binding =
put_in(
query.__ash_bindings__.aggregates,
Map.put(query.__ash_bindings__.aggregates, aggregate.name, binding)
)
new_query =
query_with_aggregate_binding
|> add_aggregate_to_subquery(resource, aggregate, binding)
|> select_aggregate(resource, aggregate)
{:ok, new_query}
end
defp select_aggregate(query, resource, aggregate) do
binding = get_binding(resource, aggregate.relationship_path, query, :aggregate)
query =
if query.select do
query
else
from(row in query,
select: row,
select_merge: %{aggregates: %{}}
)
end
%{query | select: add_to_select(query.select, binding, aggregate)}
end
defp add_to_select(
%{expr: {:merge, _, [first, {:%{}, _, [{:aggregates, {:%{}, [], fields}}]}]}} = select,
binding,
%{load: nil} = aggregate
) do
accessed =
if aggregate.kind == :first do
{:fragment, [],
[
expr: {{:., [], [{:&, [], [binding]}, aggregate.name]}, [], []},
raw: "[1]"
]}
else
{{:., [], [{:&, [], [binding]}, aggregate.name]}, [], []}
end
field =
{:type, [],
[
accessed,
Ash.Type.ecto_type(aggregate.type)
]}
field_with_default =
if is_nil(aggregate.default_value) do
field
else
{:coalesce, [],
[
field,
aggregate.default_value
]}
end
new_fields = [
{aggregate.name, field_with_default}
| fields
]
%{select | expr: {:merge, [], [first, {:%{}, [], [{:aggregates, {:%{}, [], new_fields}}]}]}}
end
defp add_to_select(
%{expr: expr} = select,
binding,
%{load: load_as} = aggregate
) do
accessed =
if aggregate.kind == :first do
{:fragment, [],
[
raw: "",
expr: {{:., [], [{:&, [], [binding]}, aggregate.name]}, [], []},
raw: "[1]"
]}
else
{{:., [], [{:&, [], [binding]}, aggregate.name]}, [], []}
end
field =
{:type, [],
[
accessed,
Ash.Type.ecto_type(aggregate.type)
]}
field_with_default =
if is_nil(aggregate.default_value) do
field
else
{:coalesce, [],
[
field,
aggregate.default_value
]}
end
%{select | expr: {:merge, [], [expr, {:%{}, [], [{load_as, field_with_default}]}]}}
end
defp add_aggregate_to_subquery(query, resource, aggregate, binding) do
new_joins =
List.update_at(query.joins, binding - 1, fn join ->
aggregate_query =
if aggregate.authorization_filter do
{:ok, filter} =
filter(
join.source.from.source.query,
aggregate.authorization_filter,
Ash.Resource.Info.related(resource, aggregate.relationship_path)
)
filter
else
join.source.from.source.query
end
new_aggregate_query = add_subquery_aggregate_select(aggregate_query, aggregate, resource)
put_in(join.source.from.source.query, new_aggregate_query)
end)
%{
query
| joins: new_joins
}
end
defp aggregate_subquery(relationship, aggregate) do
query =
from(row in relationship.destination,
group_by: ^relationship.destination_field,
select: field(row, ^relationship.destination_field)
)
if aggregate.query && aggregate.query.tenant do
Ecto.Query.put_query_prefix(query, aggregate.query.tenant)
else
query
end
end
defp order_to_postgres_order(dir) do
case dir do
:asc -> nil
:asc_nils_last -> " ASC NULLS LAST"
:asc_nils_first -> " ASC NULLS FIRST"
:desc -> " DESC"
:desc_nils_last -> " DESC NULLS LAST"
:desc_nils_first -> " DESC NULLS FIRST"
end
end
defp add_subquery_aggregate_select(query, %{kind: :first} = aggregate, _resource) do
query = default_bindings(query, aggregate.resource)
key = aggregate.field
type = Ash.Type.ecto_type(aggregate.type)
field =
if aggregate.query && aggregate.query.sort && aggregate.query.sort != [] do
sort_expr =
aggregate.query.sort
|> Enum.map(fn {sort, order} ->
case order_to_postgres_order(order) do
nil ->
[expr: {{:., [], [{:&, [], [0]}, sort]}, [], []}]
order ->
[expr: {{:., [], [{:&, [], [0]}, sort]}, [], []}, raw: order]
end
end)
|> Enum.intersperse(raw: ", ")
|> List.flatten()
{:fragment, [],
[
raw: "array_agg(",
expr: {{:., [], [{:&, [], [0]}, key]}, [], []},
raw: "ORDER BY "
] ++
sort_expr ++ [raw: ")"]}
else
{:fragment, [],
[
raw: "array_agg(",
expr: {{:., [], [{:&, [], [0]}, key]}, [], []},
raw: ")"
]}
end
{params, filtered} =
if aggregate.query && aggregate.query.filter &&
not match?(%Ash.Filter{expression: nil}, aggregate.query.filter) do
{params, expr} =
filter_to_expr(
aggregate.query.filter,
query.__ash_bindings__.bindings,
query.select.params
)
{params, {:filter, [], [field, expr]}}
else
{[], field}
end
cast = {:type, [], [filtered, {:array, type}]}
new_expr = {:merge, [], [query.select.expr, {:%{}, [], [{aggregate.name, cast}]}]}
%{query | select: %{query.select | expr: new_expr, params: params}}
end
defp add_subquery_aggregate_select(query, %{kind: :count} = aggregate, resource) do
query = default_bindings(query, aggregate.resource)
key = aggregate.field || List.first(Ash.Resource.Info.primary_key(resource))
type = Ash.Type.ecto_type(aggregate.type)
field = {:count, [], [{{:., [], [{:&, [], [0]}, key]}, [], []}]}
{params, filtered} =
if aggregate.query && aggregate.query.filter &&
not match?(%Ash.Filter{expression: nil}, aggregate.query.filter) do
{params, expr} =
filter_to_expr(
aggregate.query.filter,
query.__ash_bindings__.bindings,
query.select.params
)
{params, {:filter, [], [field, expr]}}
else
{[], field}
end
cast = {:type, [], [filtered, type]}
new_expr = {:merge, [], [query.select.expr, {:%{}, [], [{aggregate.name, cast}]}]}
%{query | select: %{query.select | expr: new_expr, params: params}}
end
defp relationship_path_to_relationships(resource, path, acc \\ [])
defp relationship_path_to_relationships(_resource, [], acc), do: Enum.reverse(acc)
defp relationship_path_to_relationships(resource, [relationship | rest], acc) do
relationship = Ash.Resource.Info.relationship(resource, relationship)
relationship_path_to_relationships(relationship.destination, rest, [relationship | acc])
end
defp join_all_relationships(query, relationship_paths, path \\ [], source \\ nil) do
query = default_bindings(query, source)
Enum.reduce(relationship_paths, query, fn
{_join_type, []}, query ->
query
{join_type, [relationship | rest_rels]}, query ->
source = source || relationship.source
current_path = path ++ [relationship]
current_join_type =
case join_type do
{:aggregate, _name, _agg} when rest_rels != [] ->
:left
other ->
other
end
if has_binding?(source, Enum.reverse(current_path), query, current_join_type) do
query
else
joined_query =
join_relationship(
query,
relationship,
Enum.map(path, & &1.name),
current_join_type,
source
)
joined_query_with_distinct = add_distinct(relationship, join_type, joined_query)
join_all_relationships(
joined_query_with_distinct,
[{join_type, rest_rels}],
current_path,
source
)
end
end)
end
defp has_binding?(resource, path, query, {:aggregate, _, _}),
do: has_binding?(resource, path, query, :aggregate)
defp has_binding?(resource, candidate_path, %{__ash_bindings__: _} = query, type) do
Enum.any?(query.__ash_bindings__.bindings, fn
{_, %{path: path, source: source, type: ^type}} ->
Ash.SatSolver.synonymous_relationship_paths?(resource, path, candidate_path, source)
_ ->
false
end)
end
defp has_binding?(_, _, _, _), do: false
defp get_binding(resource, path, %{__ash_bindings__: _} = query, type) do
paths =
Enum.flat_map(query.__ash_bindings__.bindings, fn
{binding, %{path: path, type: ^type}} ->
[{binding, path}]
_ ->
[]
end)
Enum.find_value(paths, fn {binding, candidate_path} ->
Ash.SatSolver.synonymous_relationship_paths?(resource, candidate_path, path) && binding
end)
end
defp get_binding(_, _, _, _), do: nil
defp add_distinct(relationship, join_type, joined_query) do
if relationship.cardinality == :many and join_type == :left && !joined_query.distinct do
from(row in joined_query,
distinct: ^Ash.Resource.Info.primary_key(relationship.destination)
)
else
joined_query
end
end
defp join_relationship(query, relationship, path, join_type, source) do
case Map.get(query.__ash_bindings__.bindings, path) do
%{type: existing_join_type} when join_type != existing_join_type ->
raise "unreachable?"
nil ->
do_join_relationship(query, relationship, path, join_type, source)
_ ->
query
end
end
defp do_join_relationship(query, %{type: :many_to_many} = relationship, path, kind, source) do
relationship_through = maybe_get_resource_query(relationship.through)
relationship_destination =
Ecto.Queryable.to_query(maybe_get_resource_query(relationship.destination))
current_binding =
Enum.find_value(query.__ash_bindings__.bindings, 0, fn {binding, data} ->
if data.type == kind && data.path == Enum.reverse(path) do
binding
end
end)
new_query =
case kind do
{:aggregate, _, subquery} ->
subquery =
subquery(
from(destination in subquery,
where:
field(destination, ^relationship.destination_field) ==
field(
parent_as(:rel_through),
^relationship.destination_field_on_join_table
)
)
)
from([{row, current_binding}] in query,
left_join: through in ^relationship_through,
as: :rel_through,
on:
field(row, ^relationship.source_field) ==
field(through, ^relationship.source_field_on_join_table),
left_lateral_join: destination in ^subquery,
on:
field(destination, ^relationship.destination_field) ==
field(through, ^relationship.destination_field_on_join_table)
)
:inner ->
from([{row, current_binding}] in query,
join: through in ^relationship_through,
on:
field(row, ^relationship.source_field) ==
field(through, ^relationship.source_field_on_join_table),
join: destination in ^relationship_destination,
on:
field(destination, ^relationship.destination_field) ==
field(through, ^relationship.destination_field_on_join_table)
)
_ ->
from([{row, current_binding}] in query,
left_join: through in ^relationship_through,
on:
field(row, ^relationship.source_field) ==
field(through, ^relationship.source_field_on_join_table),
left_join: destination in ^relationship_destination,
on:
field(destination, ^relationship.destination_field) ==
field(through, ^relationship.destination_field_on_join_table)
)
end
join_path =
Enum.reverse([String.to_existing_atom(to_string(relationship.name) <> "_join_assoc") | path])
full_path = Enum.reverse([relationship.name | path])
binding_data =
case kind do
{:aggregate, name, _agg} ->
%{type: :aggregate, name: name, path: full_path, source: source}
_ ->
%{type: kind, path: full_path, source: source}
end
new_query
|> add_binding(%{path: join_path, type: :left, source: source})
|> add_binding(binding_data)
end
defp do_join_relationship(query, relationship, path, kind, source) do
relationship_destination =
Ecto.Queryable.to_query(maybe_get_resource_query(relationship.destination))
current_binding =
Enum.find_value(query.__ash_bindings__.bindings, 0, fn {binding, data} ->
if data.type == kind && data.path == Enum.reverse(path) do
binding
end
end)
new_query =
case kind do
{:aggregate, _, subquery} ->
subquery =
from(
sub in subquery(
from(destination in subquery,
where:
field(destination, ^relationship.destination_field) ==
field(parent_as(:rel_source), ^relationship.source_field)
)
),
select: field(sub, ^relationship.destination_field)
)
from([{row, current_binding}] in query,
as: :rel_source,
left_lateral_join: destination in ^subquery,
on:
field(row, ^relationship.source_field) ==
field(destination, ^relationship.destination_field)
)
:inner ->
from([{row, current_binding}] in query,
join: destination in ^relationship_destination,
on:
field(row, ^relationship.source_field) ==
field(destination, ^relationship.destination_field)
)
_ ->
from([{row, current_binding}] in query,
left_join: destination in ^relationship_destination,
on:
field(row, ^relationship.source_field) ==
field(destination, ^relationship.destination_field)
)
end
full_path = Enum.reverse([relationship.name | path])
binding_data =
case kind do
{:aggregate, name, _agg} ->
%{type: :aggregate, name: name, path: full_path, source: source}
_ ->
%{type: kind, path: full_path, source: source}
end
new_query
|> add_binding(binding_data)
end
defp add_filter_expression(query, filter) do
wheres =
filter
|> split_and_statements()
|> Enum.map(fn filter ->
{params, expr} = filter_to_expr(filter, query.__ash_bindings__.bindings, [])
%Ecto.Query.BooleanExpr{
expr: expr,
op: :and,
params: params
}
end)
%{query | wheres: query.wheres ++ wheres}
end
defp split_and_statements(%Filter{expression: expression}) do
split_and_statements(expression)
end
defp split_and_statements(%BooleanExpression{op: :and, left: left, right: right}) do
split_and_statements(left) ++ split_and_statements(right)
end
defp split_and_statements(%Not{expression: %Not{expression: expression}}) do
split_and_statements(expression)
end
defp split_and_statements(%Not{
expression: %BooleanExpression{op: :or, left: left, right: right}
}) do
split_and_statements(%BooleanExpression{
op: :and,
left: %Not{expression: left},
right: %Not{expression: right}
})
end
defp split_and_statements(other), do: [other]
defp filter_to_expr(expr, bindings, params, embedded? \\ false, type \\ nil)
defp filter_to_expr(%Filter{expression: expression}, bindings, params, embedded?, type) do
filter_to_expr(expression, bindings, params, embedded?, type)
end
# A nil filter means "everything"
defp filter_to_expr(nil, _, _, _, _), do: {[], true}
# A true filter means "everything"
defp filter_to_expr(true, _, _, _, _), do: {[], true}
# A false filter means "nothing"
defp filter_to_expr(false, _, _, _, _), do: {[], false}
defp filter_to_expr(expression, bindings, params, embedded?, type) do
do_filter_to_expr(expression, bindings, params, embedded?, type)
end
defp do_filter_to_expr(expr, bindings, params, embedded?, type \\ nil)
defp do_filter_to_expr(
%BooleanExpression{op: op, left: left, right: right},
bindings,
params,
embedded?,
_type
) do
{params, left_expr} = do_filter_to_expr(left, bindings, params, embedded?)
{params, right_expr} = do_filter_to_expr(right, bindings, params, embedded?)
{params, {op, [], [left_expr, right_expr]}}
end
defp do_filter_to_expr(%Not{expression: expression}, bindings, params, embedded?, _type) do
{params, new_expression} = do_filter_to_expr(expression, bindings, params, embedded?)
{params, {:not, [], [new_expression]}}
end
defp do_filter_to_expr(
%TrigramSimilarity{arguments: [arg1, arg2], embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
) do
{params, arg1} = do_filter_to_expr(arg1, bindings, params, pred_embedded? || embedded?)
{params, arg2} = do_filter_to_expr(arg2, bindings, params, pred_embedded? || embedded?)
{params, {:fragment, [], [raw: "similarity(", expr: arg1, raw: ", ", expr: arg2, raw: ")"]}}
end
defp do_filter_to_expr(
%Type{arguments: [arg1, arg2], embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
)
when pred_embedded? or embedded? do
{params, arg1} = do_filter_to_expr(arg1, bindings, params, true)
{params, arg2} = do_filter_to_expr(arg2, bindings, params, true)
case maybe_ecto_type(arg2) do
nil ->
{params, {:type, [], [arg1, arg2]}}
type ->
case arg1 do
%{__predicate__?: _} ->
{params, {:type, [], [arg1, arg2]}}
value ->
{params, %Ecto.Query.Tagged{value: value, type: type}}
end
end
end
defp do_filter_to_expr(
%Type{arguments: [arg1, arg2], embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
) do
{params, arg1} = do_filter_to_expr(arg1, bindings, params, pred_embedded? || embedded?)
{params, arg2} = do_filter_to_expr(arg2, bindings, params, pred_embedded? || embedded?)
arg2 = maybe_ecto_type(arg2)
{params, {:type, [], [arg1, arg2]}}
end
defp do_filter_to_expr(
%Fragment{arguments: arguments, embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
) do
{params, fragment_data} =
Enum.reduce(arguments, {params, []}, fn
{:raw, str}, {params, fragment_data} ->
{params, fragment_data ++ [{:raw, str}]}
{:expr, expr}, {params, fragment_data} ->
{params, expr} = do_filter_to_expr(expr, bindings, params, pred_embedded? || embedded?)
{params, fragment_data ++ [{:expr, expr}]}
end)
{params, {:fragment, [], fragment_data}}
end
defp do_filter_to_expr(
%IsNil{left: left, right: right, embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
) do
{params, left_expr} = do_filter_to_expr(left, bindings, params, pred_embedded? || embedded?)
{params, right_expr} = do_filter_to_expr(right, bindings, params, pred_embedded? || embedded?)
{params,
{:==, [],
[
{:is_nil, [], [left_expr]},
right_expr
]}}
end
defp do_filter_to_expr(
%Ago{arguments: [left, right], embedded?: _pred_embedded?},
_bindings,
params,
_embedded?,
_type
)
when is_integer(left) and (is_binary(right) or is_atom(right)) do
{params ++ [{DateTime.utc_now(), {:param, :any_datetime}}],
{:datetime_add, [], [{:^, [], [Enum.count(params)]}, left * -1, to_string(right)]}}
end
defp do_filter_to_expr(
%Contains{arguments: [left, %Ash.CiString{} = right], embedded?: pred_embedded?},
bindings,
params,
embedded?,
type
) do
do_filter_to_expr(
%Fragment{
embedded?: pred_embedded?,
arguments: [
raw: "strpos(",
expr: left,
raw: "::citext, ",
expr: right,
raw: ") > 0"
]
},
bindings,
params,
embedded?,
type
)
end
defp do_filter_to_expr(
%Contains{arguments: [left, right], embedded?: pred_embedded?},
bindings,
params,
embedded?,
type
) do
do_filter_to_expr(
%Fragment{
embedded?: pred_embedded?,
arguments: [
raw: "strpos(",
expr: left,
raw: ", ",
expr: right,
raw: ") > 0"
]
},
bindings,
params,
embedded?,
type
)
end
defp do_filter_to_expr(
%mod{
__predicate__?: _,
left: left,
right: right,
embedded?: pred_embedded?,
operator: op
},
bindings,
params,
embedded?,
_type
) do
{left_type, right_type} =
case determine_type(mod, left) do
nil ->
case determine_type(mod, right, true) do
nil ->
{nil, nil}
left_type ->
{left_type, nil}
end
right_type ->
if vague?(right_type) do
case determine_type(mod, right, true) do
nil ->
{nil, right_type}
left_type ->
{left_type, nil}
end
else
{nil, right_type}
end
end
{params, left_expr} =
do_filter_to_expr(left, bindings, params, pred_embedded? || embedded?, left_type)
{params, right_expr} =
do_filter_to_expr(right, bindings, params, pred_embedded? || embedded?, right_type)
{params,
{op, [],
[
left_expr,
right_expr
]}}
end
defp do_filter_to_expr(
%Ref{attribute: %{name: name}} = ref,
bindings,
params,
_embedded?,
_type
) do
{params, {{:., [], [{:&, [], [ref_binding(ref, bindings)]}, name]}, [], []}}
end
defp do_filter_to_expr({:embed, other}, _bindings, params, _true, _type) do
{params, other}
end
defp do_filter_to_expr(%Ash.CiString{string: string}, bindings, params, embedded?, type) do
do_filter_to_expr(
%Fragment{
embedded?: embedded?,
arguments: [
raw: "",
expr: string,
raw: "::citext"
]
},
bindings,
params,
embedded?,
type
)
end
defp do_filter_to_expr(%MapSet{} = mapset, bindings, params, embedded?, type) do
do_filter_to_expr(Enum.to_list(mapset), bindings, params, embedded?, type)
end
defp do_filter_to_expr(other, _bindings, params, true, _type) do
{params, other}
end
defp do_filter_to_expr(value, _bindings, params, false, type) do
type = type || :any
value = last_ditch_cast(value, type)
{params ++ [{value, type}], {:^, [], [Enum.count(params)]}}
end
defp maybe_ecto_type({:array, type}), do: {:array, maybe_ecto_type(type)}
defp maybe_ecto_type(type) when is_atom(type) do
if Ash.Type.ash_type?(type) do
Ash.Type.ecto_type(type)
end
end
defp maybe_ecto_type(_type), do: nil
defp last_ditch_cast(value, :string) when is_atom(value) do
to_string(value)
end
defp last_ditch_cast(value, _type) do
value
end
defp determine_type(mod, ref, flip? \\ false)
defp determine_type(mod, %Ref{attribute: %{type: type}}, flip?) do
Enum.find_value(mod.types(), fn types ->
types =
case types do
:same ->
[type]
:any ->
[]
other when is_list(other) ->
other =
if flip? do
Enum.reverse(other)
else
other
end
Enum.map(other, fn
{:array, :any} ->
{:in, :any}
{:array, :same} ->
{:in, type}
{:array, type} ->
{:in, type}
type ->
type
end)
other ->
[other]
end
types
|> Enum.sort_by(&vague?/1)
|> Enum.at(0)
|> case do
nil ->
nil
{:in, :any} ->
{:in, :any}
{:in, type} ->
if Ash.Type.ash_type?(type) do
{:in, Ash.Type.storage_type(type)}
else
{:in, type}
end
type ->
if Ash.Type.ash_type?(type) do
Ash.Type.storage_type(type)
else
type
end
end
end)
end
defp determine_type(_mod, _, _), do: nil
defp vague?({:in, :any}), do: true
defp vague?(:any), do: true
defp vague?(_), do: false
defp ref_binding(ref, bindings) do
case ref.attribute do
%Ash.Resource.Attribute{} ->
Enum.find_value(bindings, fn {binding, data} ->
data.path == ref.relationship_path && data.type in [:inner, :left, :root] && binding
end)
%Ash.Query.Aggregate{} = aggregate ->
Enum.find_value(bindings, fn {binding, data} ->
data.path == aggregate.relationship_path && data.type == :aggregate && binding
end)
end
end
defp add_binding(query, data) do
current = query.__ash_bindings__.current
bindings = query.__ash_bindings__.bindings
new_ash_bindings = %{
query.__ash_bindings__
| bindings: Map.put(bindings, current, data),
current: current + 1
}
%{query | __ash_bindings__: new_ash_bindings}
end
@impl true
def transaction(resource, func) do
repo(resource).transaction(func)
end
@impl true
def rollback(resource, term) do
repo(resource).rollback(term)
end
defp maybe_get_resource_query(resource) do
case Ash.Query.data_layer_query(Ash.Query.new(resource), only_validate_filter?: false) do
{:ok, query} -> query
{:error, error} -> {:error, error}
end
end
defp table(resource, changeset) do
changeset.context[:data_layer][:table] || AshPostgres.table(resource)
end
defp raise_table_error!(resource, operation) do
if AshPostgres.polymorphic?(resource) do
raise """
Could not determine table for #{operation} on #{inspect(resource)}.
Polymorphic resources require that the `data_layer[:table]` context is provided.
See the guide on polymorphic resources for more information.
"""
else
raise """
Could not determine table for #{operation} on #{inspect(resource)}.
"""
end
end
end
| 28.383866 | 168 | 0.588269 |
9e727f76492cfcfa98bd70f35cddfe67e4f471aa | 2,991 | exs | Elixir | test/controllers/user_role_controller_test.exs | roryqueue/code-corps-api | f23007e13fed2d7264fd2e2e97b1497488fb54ba | [
"MIT"
] | null | null | null | test/controllers/user_role_controller_test.exs | roryqueue/code-corps-api | f23007e13fed2d7264fd2e2e97b1497488fb54ba | [
"MIT"
] | null | null | null | test/controllers/user_role_controller_test.exs | roryqueue/code-corps-api | f23007e13fed2d7264fd2e2e97b1497488fb54ba | [
"MIT"
] | null | null | null | defmodule CodeCorps.UserRoleControllerTest do
use CodeCorps.ApiCase
alias CodeCorps.UserRole
alias CodeCorps.Repo
defp build_payload, do: %{ "data" => %{"type" => "user-role", "attributes" => %{}}}
defp put_relationships(payload, user, role) do
relationships = build_relationships(user, role)
payload |> put_in(["data", "relationships"], relationships)
end
defp build_relationships(user, role) do
%{
user: %{data: %{id: user.id}},
role: %{data: %{id: role.id}}
}
end
describe "create" do
@tag authenticated: :admin
test "creates and renders resource when data is valid", %{conn: conn} do
user = insert(:user)
role = insert(:role)
payload = build_payload |> put_relationships(user, role)
path = conn |> user_role_path(:create)
json = conn |> post(path, payload) |> json_response(201)
id = json["data"]["id"] |> String.to_integer
user_role = UserRole |> Repo.get!(id)
assert json["data"]["id"] == "#{user_role.id}"
assert json["data"]["type"] == "user-role"
assert json["data"]["relationships"]["user"]["data"]["id"] == "#{user_role.user_id}"
assert json["data"]["relationships"]["role"]["data"]["id"] == "#{user_role.role_id}"
end
@tag authenticated: :admin
test "does not create resource and renders errors when data is invalid", %{conn: conn} do
payload = build_payload
path = conn |> user_role_path(:create)
json = conn |> post(path, payload) |> json_response(422)
assert json["errors"] != %{}
end
test "does not create resource and renders 401 when unauthenticated", %{conn: conn} do
path = conn |> user_role_path(:create)
assert conn |> post(path) |> json_response(401)
end
@tag :authenticated
test "does not create resource and renders 401 when not authorized", %{conn: conn} do
path = conn |> user_role_path(:create)
assert conn |> post(path, build_payload) |> json_response(401)
end
end
describe "delete" do
@tag authenticated: :admin
test "deletes resource", %{conn: conn} do
user_role = insert(:user_role)
path = conn |> user_role_path(:delete, user_role)
assert conn |> delete(path) |> response(204)
end
test "does not delete resource and renders 401 when unauthenticated", %{conn: conn} do
path = conn |> user_role_path(:delete, "id not important")
assert conn |> delete(path) |> json_response(401)
end
@tag :authenticated
test "does not create resource and renders 401 when not authorized", %{conn: conn} do
user_role = insert(:user_role)
path = conn |> user_role_path(:delete, user_role)
assert conn |> delete(path) |> json_response(401)
end
@tag :authenticated
test "renders page not found when id is nonexistent on delete", %{conn: conn} do
path = conn |> user_role_path(:delete, -1)
assert conn |> delete(path) |> json_response(404)
end
end
end
| 33.988636 | 93 | 0.63992 |
9e72ae8edcb7fe470a7af737f003a9488aa0e64d | 1,728 | ex | Elixir | clients/testing/lib/google_api/testing/v1/model/device_file.ex | linjunpop/elixir-google-api | 444cb2b2fb02726894535461a474beddd8b86db4 | [
"Apache-2.0"
] | null | null | null | clients/testing/lib/google_api/testing/v1/model/device_file.ex | linjunpop/elixir-google-api | 444cb2b2fb02726894535461a474beddd8b86db4 | [
"Apache-2.0"
] | null | null | null | clients/testing/lib/google_api/testing/v1/model/device_file.ex | linjunpop/elixir-google-api | 444cb2b2fb02726894535461a474beddd8b86db4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule GoogleApi.Testing.V1.Model.DeviceFile do
@moduledoc """
A single device file description.
## Attributes
- obbFile (ObbFile): A reference to an opaque binary blob file. Defaults to: `null`.
- regularFile (RegularFile): A reference to a regular file. Defaults to: `null`.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:obbFile => GoogleApi.Testing.V1.Model.ObbFile.t(),
:regularFile => GoogleApi.Testing.V1.Model.RegularFile.t()
}
field(:obbFile, as: GoogleApi.Testing.V1.Model.ObbFile)
field(:regularFile, as: GoogleApi.Testing.V1.Model.RegularFile)
end
defimpl Poison.Decoder, for: GoogleApi.Testing.V1.Model.DeviceFile do
def decode(value, options) do
GoogleApi.Testing.V1.Model.DeviceFile.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Testing.V1.Model.DeviceFile do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 33.882353 | 86 | 0.740162 |
9e72b2d99a3803c8e0e48ce7f7d6b8136df725b9 | 544 | ex | Elixir | lib/sentinel/mailer/welcome.ex | suranyami/sentinel | aeb421e2e61a4bc14abe89b4a92cb1943a5965fb | [
"MIT"
] | 125 | 2016-01-29T11:46:20.000Z | 2021-06-08T09:25:38.000Z | lib/sentinel/mailer/welcome.ex | suranyami/sentinel | aeb421e2e61a4bc14abe89b4a92cb1943a5965fb | [
"MIT"
] | 54 | 2016-02-18T01:11:58.000Z | 2017-10-19T11:25:26.000Z | lib/sentinel/mailer/welcome.ex | suranyami/sentinel | aeb421e2e61a4bc14abe89b4a92cb1943a5965fb | [
"MIT"
] | 29 | 2016-02-20T12:59:16.000Z | 2018-04-11T14:29:41.000Z | defmodule Sentinel.Mailer.Welcome do
@moduledoc """
Responsible for the creation (and easy override) of the default welcome email
"""
import Bamboo.Email
import Bamboo.Phoenix
import Sentinel.Mailer
@doc """
Takes a user, and a confirmation token and returns an email. It does not send
the email
"""
def build(user, confirmation_token) do
user
|> base_email
|> assign(:user, user)
|> assign(:confirmation_token, confirmation_token)
|> subject("Hello #{user.email}")
|> render(:welcome)
end
end
| 23.652174 | 79 | 0.689338 |
9e72efb3a02c23fe32f2b277d2aa67e714eecef4 | 266 | exs | Elixir | priv/repo/migrations/20170418164826_create_actions.exs | lcr0815/namely | b7d245511377f6ad0f1e470fe38d6c1ff47d012a | [
"MIT"
] | null | null | null | priv/repo/migrations/20170418164826_create_actions.exs | lcr0815/namely | b7d245511377f6ad0f1e470fe38d6c1ff47d012a | [
"MIT"
] | null | null | null | priv/repo/migrations/20170418164826_create_actions.exs | lcr0815/namely | b7d245511377f6ad0f1e470fe38d6c1ff47d012a | [
"MIT"
] | null | null | null | defmodule Namely.Repo.Migrations.CreateActions do
use Ecto.Migration
def change do
create table(:actions) do
add :name, :string
add :entity_type, :string
add :entity_id, :string
add :data, :jsonb
timestamps()
end
end
end | 19 | 49 | 0.650376 |
9e734460136df46fbf8d71b35222af7dd5e3a260 | 149 | exs | Elixir | scenic_asteroids/play/config/config.exs | jordanhubbard/elixir-projects | dee341d672e83a45a17a4a85abd54a480f95c506 | [
"BSD-2-Clause"
] | 31 | 2018-12-25T19:52:35.000Z | 2022-03-20T01:06:46.000Z | play/config/config.exs | QuantumProductions/scenic_font_test | ff8d0df6ade399039b9d9e816e398cb1ad80a7db | [
"BSD-3-Clause"
] | 4 | 2018-12-23T18:34:20.000Z | 2021-05-10T04:05:45.000Z | play/config/config.exs | QuantumProductions/scenic_font_test | ff8d0df6ade399039b9d9e816e398cb1ad80a7db | [
"BSD-3-Clause"
] | 2 | 2019-04-09T18:35:51.000Z | 2020-12-22T15:19:18.000Z | use Mix.Config
config :play, :viewport, %{
name: :main_viewport,
size: {500, 500},
default_scene: {Play.Scene.Splash, Play.Scene.Asteroids}
}
| 18.625 | 58 | 0.691275 |
9e7345b9846409515b986df826d79956af3a0f03 | 3,008 | ex | Elixir | lib/logger_json/plug/metadata_formatters/google_cloud_logger.ex | taxjar/logger_json | b460d2cc1bc7e8b9f3ba1a36ff50795e731f1d93 | [
"MIT"
] | null | null | null | lib/logger_json/plug/metadata_formatters/google_cloud_logger.ex | taxjar/logger_json | b460d2cc1bc7e8b9f3ba1a36ff50795e731f1d93 | [
"MIT"
] | null | null | null | lib/logger_json/plug/metadata_formatters/google_cloud_logger.ex | taxjar/logger_json | b460d2cc1bc7e8b9f3ba1a36ff50795e731f1d93 | [
"MIT"
] | null | null | null | if Code.ensure_loaded?(Plug) do
defmodule LoggerJSON.Plug.MetadataFormatters.GoogleCloudLogger do
@moduledoc """
This formatter builds a metadata which is natively supported by Google Cloud Logger:
* `httpRequest` - see [LogEntry#HttpRequest](https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry#HttpRequest);
* `client.api_version` - version of API that was requested by a client;
* `phoenix.controller` - Phoenix controller that processed the request;
* `phoenix.action` - Phoenix action that processed the request;
* `node.hostname` - node hostname;
* `node.vm_pid` - Erlang VM process identifier.
"""
import Jason.Helpers, only: [json_map: 1]
@nanoseconds_in_second System.convert_time_unit(1, :second, :nanosecond)
@doc false
def build_metadata(conn, latency, client_version_header) do
latency_seconds = native_to_seconds(latency)
request_method = conn.method
request_path = conn.request_path
request_url = request_url(conn)
status = conn.status
user_agent = LoggerJSON.PlugUtils.get_header(conn, "user-agent")
remote_ip = LoggerJSON.PlugUtils.remote_ip(conn)
referer = LoggerJSON.PlugUtils.get_header(conn, "referer")
{hostname, vm_pid} = node_metadata()
client_metadata(conn, client_version_header) ++
phoenix_metadata(conn) ++
[
httpRequest:
json_map(
requestMethod: request_method,
requestPath: request_path,
requestUrl: request_url,
status: status,
userAgent: user_agent,
remoteIp: remote_ip,
referer: referer,
latency: latency_seconds
),
node: json_map(hostname: to_string(hostname), vm_pid: vm_pid)
]
end
defp native_to_seconds(nil) do
nil
end
defp native_to_seconds(native) do
seconds = System.convert_time_unit(native, :native, :nanosecond) / @nanoseconds_in_second
:erlang.float_to_binary(seconds, [{:decimals, 8}, :compact]) <> "s"
end
defp request_url(%{request_path: "/"} = conn), do: "#{conn.scheme}://#{conn.host}/"
defp request_url(conn), do: "#{conn.scheme}://#{Path.join(conn.host, conn.request_path)}"
defp client_metadata(conn, client_version_header) do
if api_version = LoggerJSON.PlugUtils.get_header(conn, client_version_header) do
[client: json_map(api_version: api_version)]
else
[]
end
end
defp phoenix_metadata(%{private: %{phoenix_controller: controller, phoenix_action: action}}) do
[phoenix: json_map(controller: controller, action: action)]
end
defp phoenix_metadata(_conn) do
[]
end
defp node_metadata do
{:ok, hostname} = :inet.gethostname()
vm_pid =
case Integer.parse(System.get_pid()) do
{pid, _units} -> pid
_ -> nil
end
{hostname, vm_pid}
end
end
end
| 34.181818 | 132 | 0.650598 |
9e738ba347f96c0bd18053798ee193af4d50f5cc | 874 | ex | Elixir | clients/chat/lib/google_api/chat/v1/metadata.ex | mopp/elixir-google-api | d496227d17600bccbdf8f6be9ad1b7e7219d7ec6 | [
"Apache-2.0"
] | null | null | null | clients/chat/lib/google_api/chat/v1/metadata.ex | mopp/elixir-google-api | d496227d17600bccbdf8f6be9ad1b7e7219d7ec6 | [
"Apache-2.0"
] | null | null | null | clients/chat/lib/google_api/chat/v1/metadata.ex | mopp/elixir-google-api | d496227d17600bccbdf8f6be9ad1b7e7219d7ec6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Chat.V1 do
@moduledoc """
API client metadata for GoogleApi.Chat.V1.
"""
@discovery_revision "20220125"
def discovery_revision(), do: @discovery_revision
end
| 32.37037 | 74 | 0.756293 |
9e738fa2f6d5cfe718fe46322e0430e89850ba8a | 316 | exs | Elixir | test/asset/public_key_test.exs | FarmBot/farmbot_os | 5ebdca3afd672eb6b0af5c71cfca02488b32569a | [
"MIT"
] | 843 | 2016-10-05T23:46:05.000Z | 2022-03-14T04:31:55.000Z | farmbot_core/test/asset/public_key_test.exs | gdwb/farmbot_os | 0ef2697c580c9fbf37a22daa063a64addfcb778d | [
"MIT"
] | 455 | 2016-10-15T08:49:16.000Z | 2022-03-15T12:23:04.000Z | farmbot_core/test/asset/public_key_test.exs | gdwb/farmbot_os | 0ef2697c580c9fbf37a22daa063a64addfcb778d | [
"MIT"
] | 261 | 2016-10-10T04:37:06.000Z | 2022-03-13T21:07:38.000Z | defmodule FarmbotCore.Asset.PublicKeyTest do
use ExUnit.Case
alias FarmbotCore.Asset.PublicKey
@expected_keys [:id, :name, :public_key]
test "render/1" do
result = PublicKey.render(%PublicKey{})
mapper = fn key -> assert Map.has_key?(result, key) end
Enum.map(@expected_keys, mapper)
end
end
| 24.307692 | 59 | 0.71519 |
9e73aadc6fd5c89950214813d0fe308b102ecd4f | 839 | exs | Elixir | mix.exs | 490llc/phoenix_pubsub_rabbitmq | 192c7b2cd60496107e819ac56404bd831cd86746 | [
"MIT"
] | 1 | 2016-07-08T13:25:56.000Z | 2016-07-08T13:25:56.000Z | mix.exs | ebakan/phoenix_pubsub_rabbitmq | 192c7b2cd60496107e819ac56404bd831cd86746 | [
"MIT"
] | null | null | null | mix.exs | ebakan/phoenix_pubsub_rabbitmq | 192c7b2cd60496107e819ac56404bd831cd86746 | [
"MIT"
] | 1 | 2020-08-31T15:28:42.000Z | 2020-08-31T15:28:42.000Z | defmodule Phoenix.PubSub.RabbitMQ.Mixfile do
use Mix.Project
def project do
[app: :phoenix_pubsub_rabbitmq,
version: "0.0.1",
elixir: "~> 1.0",
description: description,
package: package,
source_url: "https://github.com/pma/phoenix_pubsub_rabbitmq",
deps: deps,
docs: [readme: "README.md", main: "README"]]
end
def application do
[applications: [:logger, :amqp, :poolboy]]
end
defp deps do
[{:poolboy, "~> 1.4.2"},
{:amqp, "~> 0.1.3"}]
end
defp description do
"""
RabbitMQ adapter for the Phoenix framework PubSub layer.
"""
end
defp package do
[files: ["lib", "mix.exs", "README.md", "LICENSE"],
contributors: ["Paulo Almeida"],
licenses: ["MIT"],
links: %{"GitHub" => "https://github.com/pma/phoenix_pubsub_rabbitmq"}]
end
end
| 22.675676 | 76 | 0.611442 |
9e73f974d28498da47c994a06b63900c1d627ba4 | 836 | ex | Elixir | lib/readtome_web/schema/location_type.ex | zephraph/readtome | 64a5f773bdc3c19d9c5ac50a04aa14e446e36c55 | [
"MIT"
] | 1 | 2021-09-05T20:54:57.000Z | 2021-09-05T20:54:57.000Z | lib/readtome_web/schema/location_type.ex | zephraph/readtome | 64a5f773bdc3c19d9c5ac50a04aa14e446e36c55 | [
"MIT"
] | 17 | 2019-07-06T17:31:56.000Z | 2021-06-22T15:31:06.000Z | lib/readtome_web/schema/location_type.ex | zephraph/readtome | 64a5f773bdc3c19d9c5ac50a04aa14e446e36c55 | [
"MIT"
] | 1 | 2021-03-15T20:50:27.000Z | 2021-03-15T20:50:27.000Z | defmodule ReadtomeWeb.Schema.LocationType do
@moduledoc """
The Location scalar type allows fields with locations.
"""
use Absinthe.Schema.Notation
scalar :location, name: "Location" do
description("""
Location including lat/lng.
""")
serialize(&encode/1)
parse(&decode/1)
end
@spec decode(Absinthe.Blueprint.Input.String.t()) :: {:ok, term()} | :error
@spec decode(Absinthe.Blueprint.Input.Null.t()) :: {:ok, nil}
defp decode(%Absinthe.Blueprint.Input.String{value: value}) do
{:ok, Geo.JSON.decode(value)}
end
defp decode(%Absinthe.Blueprint.Input.Null{}) do
{:ok, nil}
end
defp decode(_) do
:error
end
defp encode(value) do
{:ok, location} = Geo.JSON.encode(value)
[lng, lat] = location["coordinates"]
%{
lat: lat,
lng: lng
}
end
end
| 20.9 | 77 | 0.63756 |
9e741d3ea8b0d3878ad4003005b6969a2ed32e01 | 14,520 | ex | Elixir | lib/ex_aws/sns.ex | Frameio/ex_aws | 3b335b6ed7932b5cf991323d26cf5497e1e6c122 | [
"Unlicense",
"MIT"
] | null | null | null | lib/ex_aws/sns.ex | Frameio/ex_aws | 3b335b6ed7932b5cf991323d26cf5497e1e6c122 | [
"Unlicense",
"MIT"
] | null | null | null | lib/ex_aws/sns.ex | Frameio/ex_aws | 3b335b6ed7932b5cf991323d26cf5497e1e6c122 | [
"Unlicense",
"MIT"
] | null | null | null | defmodule ExAws.SNS do
import ExAws.Utils, only: [camelize_key: 1, camelize_keys: 1]
@moduledoc """
Operations on AWS SNS
http://docs.aws.amazon.com/sns/latest/api/API_Operations.html
"""
## Topics
######################
@type topic_name :: binary
@type topic_arn :: binary
@type topic_attribute_name ::
:policy |
:display_name |
:delivery_policy
@doc "List topics"
@spec list_topics() :: ExAws.Operation.Query.t
@spec list_topics(opts :: [next_token: binary]) :: ExAws.Operation.Query.t
def list_topics(opts \\ []) do
opts = opts
|> Map.new
|> camelize_keys
request(:list_topics, opts)
end
@doc "Create topic"
@spec create_topic(topic_name :: topic_name) :: ExAws.Operation.Query.t
def create_topic(topic_name) do
request(:create_topic, %{"Name" => topic_name})
end
@doc "Get topic attributes"
@spec get_topic_attributes(topic_arn :: topic_arn) :: ExAws.Operation.Query.t
def get_topic_attributes(topic_arn) do
request(:get_topic_attributes, %{"TopicArn" => topic_arn})
end
@doc "Set topic attributes"
@spec set_topic_attributes(attribute_name :: topic_attribute_name,
attribute_value :: binary,
topic_arn :: topic_arn) :: ExAws.Operation.Query.t
def set_topic_attributes(attribute_name, attribute_value, topic_arn) do
request(:set_topic_attributes, %{
"AttributeName" => attribute_name |> camelize_key,
"AttributeValue" => attribute_value,
"TopicArn" => topic_arn
})
end
@doc "Delete topic"
@spec delete_topic(topic_arn :: topic_arn) :: ExAws.Operation.Query.t
def delete_topic(topic_arn) do
request(:delete_topic, %{"TopicArn" => topic_arn})
end
@type message_attribute :: %{
:name => binary,
:data_type => :string | :number | :binary,
:value => {:string, binary} | {:binary, binary}
}
@type publish_opts :: [
{:message_attributes, [message_attribute]} |
{:message_structure, :json} |
{:subject, binary} |
{:phone_number, binary} |
{:target_arn, binary} |
{:topic_arn, binary}]
@doc """
Publish message to a target/topic ARN
You must set either :phone_number, :target_arn or :topic_arn but only one, via the options argument.
Do NOT assume that because your message is a JSON blob that you should set
message_structure: to :json. This has a very specific meaning, please see
http://docs.aws.amazon.com/sns/latest/api/API_Publish.html for details.
"""
@spec publish(message :: binary, opts :: publish_opts) :: ExAws.Operation.Query.t
def publish(message, opts) do
opts = opts |> Map.new
message_attrs = opts
|> Map.get(:message_attributes, [])
|> build_message_attributes
params = opts
|> Map.drop([:message_attributes])
|> camelize_keys
|> Map.put("Message", message)
|> Map.merge(message_attrs)
request(:publish, params)
end
defp build_message_attributes(attrs) do
attrs
|> Stream.with_index
|> Enum.reduce(%{}, &build_message_attribute/2)
end
def build_message_attribute({%{name: name, data_type: data_type, value: {value_type, value}}, i}, params) do
param_root = "MessageAttribute.entry.#{i + 1}"
value_type = value_type |> to_string |> String.capitalize
params
|> Map.put(param_root <> ".Name", name)
|> Map.put(param_root <> ".Value.#{value_type}Value", value)
|> Map.put(param_root <> ".Value.DataType", data_type |> to_string |> String.capitalize)
end
## Platform
######################
@type platform_application_arn:: binary
@doc "Create plaform application"
@spec create_platform_application(name :: binary, platform :: binary, attributes :: %{String.t => String.t}) :: ExAws.Operation.Query.t
def create_platform_application(name, platform, attributes) do
attributes =
attributes
|> build_kv_attrs
|> Map.merge(%{
"Name" => name,
"Platform" => platform,
})
request(:create_platform_application, attributes)
end
@doc "Delete platform application"
@spec delete_platform_application(platform_application_arn :: platform_application_arn) :: ExAws.Operation.Query.t
def delete_platform_application(platform_application_arn) do
request(:delete_platform_application, %{
"PlatformApplicationArn" => platform_application_arn
})
end
@doc "List platform applications"
@spec list_platform_applications() :: ExAws.Operation.Query.t
def list_platform_applications() do
request(:list_platform_applications, %{})
end
@spec list_platform_applications(next_token :: binary) :: ExAws.Operation.Query.t
def list_platform_applications(next_token) do
request(:list_platform_applications, %{"NextToken" => next_token})
end
@doc "Create platform endpoint"
@spec create_platform_endpoint(platform_application_arn :: platform_application_arn,
token :: binary) :: ExAws.Operation.Query.t
@spec create_platform_endpoint(platform_application_arn :: platform_application_arn,
token :: binary,
custom_user_data :: binary) :: ExAws.Operation.Query.t
def create_platform_endpoint(platform_application_arn, token, custom_user_data \\ nil) do
request(:create_platform_endpoint, %{
"PlatformApplicationArn" => platform_application_arn,
"Token" => token,
"CustomUserData" => custom_user_data
})
end
@doc "Get platform application attributes"
@spec get_platform_application_attributes(platform_application_arn :: platform_application_arn) :: ExAws.Operation.Query.t
def get_platform_application_attributes(platform_application_arn) do
request(:get_platform_application_attributes, %{"PlatformApplicationArn" => platform_application_arn})
end
## Subscriptions
######################
@type subscription_attribute_name :: :delivery_policy | :raw_message_delivery
@doc "Create Subscription"
@spec subscribe(topic_arn :: binary, protocol :: binary, endpoint :: binary) :: ExAws.Operation.Query.t
def subscribe(topic_arn, protocol, endpoint) do
request(:subscribe, %{
"TopicArn" => topic_arn,
"Protocol" => protocol,
"Endpoint" => endpoint,
})
end
@doc "Confirm Subscription"
@spec confirm_subscription(topic_arn :: binary, token :: binary, authenticate_on_unsubscribe :: boolean) :: ExAws.Operation.Query.t
def confirm_subscription(topic_arn, token, authenticate_on_unsubscribe \\ false) do
request(:confirm_subscription, %{
"TopicArn" => topic_arn,
"Token" => token,
"AuthenticateOnUnsubscribe" => to_string(authenticate_on_unsubscribe),
})
end
@doc "List Subscriptions"
@spec list_subscriptions() :: ExAws.Operation.Query.t
def list_subscriptions() do
request(:list_subscriptions, %{})
end
@spec list_subscriptions(next_token :: binary) :: ExAws.Operation.Query.t
def list_subscriptions(next_token) do
request(:list_subscriptions, %{"NextToken" => next_token})
end
@type list_subscriptions_by_topic_opt :: {:next_token, binary}
@doc "List Subscriptions by Topic"
@spec list_subscriptions_by_topic(topic_arn :: topic_arn) :: ExAws.Operation.Query.t
@spec list_subscriptions_by_topic(topic_arn :: topic_arn, [list_subscriptions_by_topic_opt]) :: ExAws.Operation.Query.t
def list_subscriptions_by_topic(topic_arn, opts \\ []) do
params = case opts do
[next_token: next_token] ->
%{"TopicArn" => topic_arn, "NextToken" => next_token}
_ ->
%{"TopicArn" => topic_arn}
end
request(:list_subscriptions_by_topic, params)
end
@doc "Unsubscribe"
@spec unsubscribe(subscription_arn :: binary) :: ExAws.Operation.Query.t
def unsubscribe(subscription_arn) do
request(:unsubscribe, %{
"SubscriptionArn" => subscription_arn
})
end
@doc "Get subscription attributes"
@spec get_subscription_attributes(subscription_arn :: binary) :: ExAws.Operation.Query.t
def get_subscription_attributes(subscription_arn) do
request(:get_subscription_attributes, %{
"SubscriptionArn" => subscription_arn
})
end
@doc "Set subscription attributes"
@spec set_subscription_attributes(attribute_name :: subscription_attribute_name,
attribute_value :: binary,
subscription_arn :: binary) :: ExAws.Operation.Query.t
def set_subscription_attributes(attribute_name, attribute_value, subscription_arn) do
request(:set_subscription_attributes, %{
"AttributeName" => attribute_name |> camelize_key,
"AttributeValue" => attribute_value,
"SubscriptionArn" => subscription_arn
})
end
@doc "List phone numbers opted out"
@spec list_phone_numbers_opted_out() :: ExAws.Operation.Query.t
def list_phone_numbers_opted_out() do
request(:list_phone_numbers_opted_out, %{})
end
@spec list_phone_numbers_opted_out(next_token :: binary) :: ExAws.Operation.Query.t
def list_phone_numbers_opted_out(next_token) do
request(:list_phone_numbers_opted_out, %{"NextToken" => next_token})
end
@doc "Opt in phone number"
@spec opt_in_phone_number(phone_number :: binary) :: ExAws.Operation.Query.t
def opt_in_phone_number(phone_number) do
request(:opt_in_phone_number, %{"PhoneNumber" => phone_number})
end
## Endpoints
######################
@type endpoint_arn :: binary
@type endpoint_attributes :: [
{:token, binary}
| {:enabled, boolean}
| {:custom_user_data, binary}
]
@doc "Get endpoint attributes"
@spec get_endpoint_attributes(endpoint_arn :: endpoint_arn) :: ExAws.Operation.Query.t
def get_endpoint_attributes(endpoint_arn) do
request(:get_endpoint_attributes, %{"EndpointArn" => endpoint_arn})
end
@doc "Set endpoint attributes"
@spec set_endpoint_attributes(endpoint_arn :: endpoint_arn, attributes :: endpoint_attributes) :: ExAws.Operation.Query.t
def set_endpoint_attributes(endpoint_arn, attributes) do
params =
attributes
|> build_attrs
request(:set_endpoint_attributes, Map.put(params, "EndpointArn", endpoint_arn))
end
@doc "Delete endpoint"
@spec delete_endpoint(endpoint_arn :: endpoint_arn) :: ExAws.Operation.Query.t
def delete_endpoint(endpoint_arn) do
request(:delete_endpoint, %{
"EndpointArn" => endpoint_arn
})
end
## Messages
######################
@notification_params ["Message", "MessageId", "Subject", "Timestamp", "TopicArn", "Type"]
@optional_notification_params ["Subject"]
@confirmation_params ["Message", "MessageId", "SubscribeURL", "Timestamp", "Token", "TopicArn", "Type"]
@signature_params ["SignatureVersion", "Signature", "SigningCertURL"]
@message_types ["SubscriptionConfirmation", "UnsubscribeConfirmation", "Notification"]
@doc "Verify message signature"
@spec verify_message(message_params :: %{String.t => String.t}) :: [:ok | {:error, String.t}]
def verify_message(message_params) do
with :ok <- validate_message_params(message_params),
:ok <- validate_signature_version(message_params["SignatureVersion"]),
{:ok, public_key} <- ExAws.SNS.PublicKeyCache.get(message_params["SigningCertURL"]) do
message_params
|> get_string_to_sign
|> verify(message_params["Signature"], public_key)
end
end
defp validate_message_params(message_params) do
with {:ok, required_params} <- get_required_params(message_params["Type"]) do
case required_params -- Map.keys(message_params) do
[] -> :ok
missing_params -> {:error, "The following parameters are missing: #{inspect missing_params}"}
end
end
end
defp get_required_params(message_type) do
case message_type do
"Notification" -> {:ok, (@notification_params -- @optional_notification_params) ++ @signature_params}
"SubscriptionConfirmation" -> {:ok, @confirmation_params ++ @signature_params}
"UnsubscribeConfirmation" -> {:ok, @confirmation_params ++ @signature_params}
type when is_binary(type) -> {:error, "Invalid Type, expected one of #{inspect @message_types}"}
type when is_nil(type) -> {:error, "Missing message type parameter (Type)"}
type -> {:error, "Invalid message type's type, expected a String, got #{inspect type}"}
end
end
defp validate_signature_version(version) do
case version do
"1" -> :ok
val when is_binary(val) -> {:error, "Unsupported SignatureVersion, expected \"1\", got #{version}"}
_ -> {:error, "Invalid SignatureVersion format, expected a String, got #{version}"}
end
end
defp get_string_to_sign(message_params) do
message_params
|> Map.take(get_params_to_sign(message_params["Type"]))
|> Enum.map(fn {key, value} -> [to_string(key), "\n", to_string(value), "\n"] end)
|> IO.iodata_to_binary
end
defp get_params_to_sign(message_type) do
case message_type do
"Notification" -> @notification_params
"SubscriptionConfirmation" -> @confirmation_params
"UnsubscribeConfirmation" -> @confirmation_params
end
end
defp verify(message, signature, public_key) do
case :public_key.verify(message, :sha, Base.decode64!(signature), public_key) do
true -> :ok
false -> {:error, "Signature is invalid"}
end
end
## Request
######################
defp request(action, params) do
action_string = action |> Atom.to_string |> Macro.camelize
%ExAws.Operation.Query{
path: "/",
params: params |> Map.put("Action", action_string),
service: :sns,
action: action,
parser: &ExAws.SNS.Parsers.parse/2
}
end
defp build_attrs(attrs) do
attrs
|> Enum.with_index(1)
|> Enum.map(&build_attr/1)
|> Enum.reduce(%{}, &Map.merge(&1, &2))
end
defp build_attr({{name, value}, index}) do
prefix = "Attributes.entry.#{index}."
%{}
|> Map.put(prefix <> "name", format_param_key(name))
|> Map.put(prefix <> "value", value)
end
defp build_kv_attrs(attrs) do
attrs
|> Enum.with_index(1)
|> Enum.map(&build_kv_attr/1)
|> Enum.reduce(%{}, &Map.merge(&1, &2))
end
defp build_kv_attr({{key, value}, index}) do
prefix = "Attributes.entry.#{index}."
%{}
|> Map.put(prefix <> "key", key)
|> Map.put(prefix <> "value", value)
end
defp format_param_key("*"), do: "*"
defp format_param_key(key) do
key
|> Atom.to_string
|> ExAws.Utils.camelize
end
end
| 34.164706 | 137 | 0.682713 |
9e742302726216bbe26f4047404d0c22f5126081 | 268 | exs | Elixir | priv/repo/migrations/20170709031442_create_posts_events.exs | Apps-Team/conferencetools | ce2e16a3e4a521dc4682e736a209e6dd380c050d | [
"Apache-2.0"
] | null | null | null | priv/repo/migrations/20170709031442_create_posts_events.exs | Apps-Team/conferencetools | ce2e16a3e4a521dc4682e736a209e6dd380c050d | [
"Apache-2.0"
] | 6 | 2017-10-05T20:16:34.000Z | 2017-10-05T20:36:11.000Z | priv/repo/migrations/20170709031442_create_posts_events.exs | apps-team/events-tools | ce2e16a3e4a521dc4682e736a209e6dd380c050d | [
"Apache-2.0"
] | null | null | null | defmodule EventsTools.Repo.Migrations.CreatePostsEvents do
use Ecto.Migration
def change do
create table(:posts_events, primary_key: true) do
add :post_id, references(:posts)
add :event_id, references(:events)
timestamps()
end
end
end
| 19.142857 | 58 | 0.712687 |
9e744ae9c425e6c5423f602dca293a395f4067c6 | 884 | ex | Elixir | clients/app_engine/lib/google_api/app_engine/v1/metadata.ex | jamesvl/elixir-google-api | 6c87fb31d996f08fb42ce6066317e9d652a87acc | [
"Apache-2.0"
] | null | null | null | clients/app_engine/lib/google_api/app_engine/v1/metadata.ex | jamesvl/elixir-google-api | 6c87fb31d996f08fb42ce6066317e9d652a87acc | [
"Apache-2.0"
] | 1 | 2020-12-18T09:25:12.000Z | 2020-12-18T09:25:12.000Z | clients/app_engine/lib/google_api/app_engine/v1/metadata.ex | myskoach/elixir-google-api | 4f8cbc2fc38f70ffc120fd7ec48e27e46807b563 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.AppEngine.V1 do
@moduledoc """
API client metadata for GoogleApi.AppEngine.V1.
"""
@discovery_revision "20201201"
def discovery_revision(), do: @discovery_revision
end
| 32.740741 | 74 | 0.75905 |
9e745c2da729d7b5ece15bfe47ce6cbb4cac1842 | 4,961 | ex | Elixir | lib/data/stats.ex | stevegrossi/ex_venture | e02d5a63fdb882d92cfb4af3e15f7b48ad7054aa | [
"MIT"
] | 1 | 2019-02-10T10:22:39.000Z | 2019-02-10T10:22:39.000Z | lib/data/stats.ex | stevegrossi/ex_venture | e02d5a63fdb882d92cfb4af3e15f7b48ad7054aa | [
"MIT"
] | null | null | null | lib/data/stats.ex | stevegrossi/ex_venture | e02d5a63fdb882d92cfb4af3e15f7b48ad7054aa | [
"MIT"
] | null | null | null | defmodule Data.Stats do
@moduledoc """
Item statistics
"""
import Data.Type
@type character :: %{
health_points: integer(),
max_health_points: integer(),
skill_points: integer(),
max_skill_points: integer(),
endurance_points: integer(),
max_endurance_points: integer(),
strength: integer(),
agility: integer(),
intelligence: integer(),
awareness: integer(),
vitality: integer(),
willpower: integer()
}
@type armor :: %{
slot: :atom
}
@type weapon :: %{}
@behaviour Ecto.Type
@impl Ecto.Type
def type, do: :map
@impl Ecto.Type
def cast(stats) when is_map(stats), do: {:ok, stats}
def cast(_), do: :error
@impl Ecto.Type
def load(stats) do
stats = for {key, val} <- stats, into: %{}, do: {String.to_atom(key), val}
stats = Enum.into(stats, %{}, &cast_val/1)
{:ok, stats}
end
defp cast_val({key, val}) do
case key do
:slot ->
{key, String.to_atom(val)}
_ ->
{key, val}
end
end
@impl Ecto.Type
def dump(stats) when is_map(stats), do: {:ok, Map.delete(stats, :__struct__)}
def dump(_), do: :error
@doc """
Set defaults for new statistics
A "migration" of stats to ensure new ones are always available. They should be
saved back in after the user loads their account.
"""
@spec default(Stats.t()) :: Stats.t()
def default(stats) do
stats
|> migrate()
|> ensure(:health_points, 50)
|> ensure(:max_health_points, 50)
|> ensure(:skill_points, 50)
|> ensure(:max_skill_points, 50)
|> ensure(:endurance_points, 20)
|> ensure(:max_endurance_points, 20)
|> ensure(:agility, 10)
|> ensure(:awareness, 10)
|> ensure(:intelligence, 10)
|> ensure(:strength, 10)
|> ensure(:vitality, 10)
|> ensure(:willpower, 10)
end
defp migrate(stats = %{health: health, max_health: max_health}) do
stats
|> Map.put(:health_points, health)
|> Map.put(:max_health_points, max_health)
|> Map.delete(:health)
|> Map.delete(:max_health)
end
defp migrate(stats), do: stats
@doc """
Slots on a character
"""
@spec slots() :: [atom]
def slots(),
do: [:chest, :head, :shoulders, :neck, :back, :hands, :waist, :legs, :feet, :finger]
@doc """
Fields in the statistics map
"""
@spec basic_fields() :: [atom]
def basic_fields(),
do: [
:agility,
:awareness,
:intelligence,
:strength,
:vitality,
:willpower
]
@doc """
Fields in the statistics map
"""
@spec fields() :: [atom]
def fields(),
do: [
:agility,
:awareness,
:endurance_points,
:health_points,
:intelligence,
:max_endurance_points,
:max_health_points,
:max_skill_points,
:skill_points,
:strength,
:vitality,
:willpower
]
@doc """
Validate a character's stats
iex> Data.Stats.valid_character?(%{health_points: 50, strength: 10})
false
iex> Data.Stats.valid_character?(%{})
false
"""
@spec valid_character?(Stats.character()) :: boolean()
def valid_character?(stats) do
keys(stats) == fields() && _integer_fields(stats)
end
def _integer_fields(stats) do
Enum.all?(fields(), fn field ->
is_integer(Map.get(stats, field))
end)
end
@doc """
Validate an armor item
iex> Data.Stats.valid_armor?(%{slot: :chest, armor: 10})
true
iex> Data.Stats.valid_armor?(%{slot: :chest, armor: :none})
false
iex> Data.Stats.valid_armor?(%{slot: :eye, armor: 10})
false
iex> Data.Stats.valid_armor?(%{})
false
"""
@spec valid_armor?(Stats.armor()) :: boolean()
def valid_armor?(stats) do
keys(stats) == [:armor, :slot] && valid_slot?(stats) && is_integer(stats.armor)
end
@doc """
Validate a weapon item
iex> Data.Stats.valid_weapon?(%{})
true
iex> Data.Stats.valid_weapon?(%{anything: true})
false
"""
@spec valid_weapon?(Stats.weapon()) :: boolean()
def valid_weapon?(stats) do
keys(stats) == []
end
@doc """
Validate an item stats based on type
iex> Data.Stats.valid?("basic", %{})
true
iex> Data.Stats.valid?("basic", %{slot: :chest})
false
"""
@spec valid?(String.t(), Stats.t()) :: boolean()
def valid?(type, stats)
def valid?("armor", stats) do
valid_armor?(stats)
end
def valid?("weapon", stats) do
valid_weapon?(stats)
end
def valid?("basic", stats) do
keys(stats) == []
end
def valid?(_, _), do: false
@doc """
Validate if the slot is right
iex> Data.Stats.valid_slot?(%{slot: :chest})
true
iex> Data.Stats.valid_slot?(%{slot: :eye})
false
"""
@spec valid_slot?(Stats.t()) :: boolean()
def valid_slot?(stats)
def valid_slot?(%{slot: slot}) do
slot in slots()
end
end
| 21.951327 | 88 | 0.58335 |
9e74701d46e3232c93362282a209ed66cecc83b1 | 21,297 | exs | Elixir | test/producer_test.exs | danielkv7/broadway_kafka | 4388117537ee81738ff4d6a89ed6a2c602d47d8c | [
"Apache-2.0"
] | 112 | 2020-01-20T22:04:30.000Z | 2022-03-25T10:41:47.000Z | test/producer_test.exs | danielkv7/broadway_kafka | 4388117537ee81738ff4d6a89ed6a2c602d47d8c | [
"Apache-2.0"
] | 71 | 2020-01-22T12:43:29.000Z | 2022-03-30T22:30:29.000Z | test/producer_test.exs | danielkv7/broadway_kafka | 4388117537ee81738ff4d6a89ed6a2c602d47d8c | [
"Apache-2.0"
] | 39 | 2020-02-21T15:10:42.000Z | 2022-02-11T17:34:09.000Z | defmodule BroadwayKafka.ProducerTest do
use ExUnit.Case
import ExUnit.CaptureLog
import Record, only: [defrecord: 2, extract: 2]
defrecord :brod_received_assignment,
extract(:brod_received_assignment, from_lib: "brod/include/brod.hrl")
defmodule MessageServer do
def start_link() do
Agent.start_link(fn -> %{} end)
end
def push_messages(server, messages, opts) do
topic = Keyword.fetch!(opts, :topic)
partition = Keyword.fetch!(opts, :partition)
key = key(topic, partition)
Agent.update(server, fn queue ->
Map.put(queue, key, (queue[key] || []) ++ messages)
end)
end
def take_messages(server, topic, partition, amount) do
key = key(topic, partition)
Agent.get_and_update(server, fn queue ->
{messages, rest} = Enum.split(queue[key] || [], amount)
{messages, Map.put(queue, key, rest)}
end)
end
defp key(topic, partition) do
"#{topic}-#{partition}"
end
end
defmodule FakeKafkaClient do
@behaviour BroadwayKafka.KafkaClient
import Record, only: [defrecord: 2, extract: 2]
defrecord :kafka_message, extract(:kafka_message, from_lib: "brod/include/brod.hrl")
@impl true
def init(opts), do: {:ok, Map.new(opts)}
@impl true
def setup(_stage_pid, client_id, _callback_module, config) do
if !Process.whereis(client_id) do
{:ok, _pid} = Agent.start(fn -> true end, name: client_id)
Process.monitor(client_id)
end
send(config[:test_pid], {:setup, client_id})
{:ok, :fake_coord}
end
@impl true
def fetch(_client_id, topic, partition, offset, _opts, config) do
n_messages = config[:max_bytes]
messages =
MessageServer.take_messages(config[:message_server], topic, partition, n_messages)
send(config[:test_pid], {:messages_fetched, length(messages)})
kafka_messages =
for {msg, i} <- Enum.with_index(messages, offset) do
kafka_message(
value: msg,
offset: i,
key: :fake_key,
ts: :fake_ts,
headers: :fake_headers
)
end
{:ok, {offset + length(kafka_messages), kafka_messages}}
end
@impl true
def ack(_group_coordinator, _generation_id, topic, partition, offset, config) do
info = %{offset: offset, topic: topic, partition: partition, pid: self()}
ack_raises_on_offset = config[:ack_raises_on_offset]
if ack_raises_on_offset && ack_raises_on_offset == offset do
raise "Ack failed on offset #{offset}"
end
send(config[:test_pid], {:ack, info})
end
@impl true
def connected?(client_id) do
connected? =
if pid = Process.whereis(client_id) do
Process.alive?(pid) && Agent.get(client_id, & &1)
end
connected?
end
@impl true
def stop_group_coordinator(_pid) do
:ok
end
@impl true
def resolve_offset(_topic, _partition, offset, _offset_reset_policy, _config) do
offset
end
@impl true
def update_topics(_client_id, _topics) do
:ok
end
end
defmodule Forwarder do
use Broadway
def handle_message(_, message, %{test_pid: test_pid}) do
meta = message.metadata
content = %{
data: message.data,
topic: meta.topic,
partition: meta.partition,
offset: meta.offset,
meta: meta,
pid: self()
}
send(test_pid, {:message_handled, content})
message
end
def handle_batch(_, messages, batch_info, %{test_pid: test_pid}) do
%{batch_key: {topic, partition}} = batch_info
content = %{
topic: topic,
partition: partition,
offset: List.last(messages).metadata.offset,
pid: self()
}
send(test_pid, {:batch_handled, content})
messages
end
end
defmacro assert_receive_in_order({type, content} = pattern, opts) do
offsets = Keyword.fetch!(opts, :offsets)
timeout = Keyword.get(opts, :timeout, 200)
quote do
for offset <- unquote(offsets) do
receive do
{unquote(type), unquote(content) = received_message} ->
assert received_message.offset == offset
after
unquote(timeout) ->
raise "no message matching #{unquote(Macro.to_string(pattern))}" <>
"after #{unquote(timeout)}ms"
end
end
end
end
test "do not allow users to set :partition_by for processors" do
Process.flag(:trap_exit, true)
Broadway.start_link(Forwarder,
name: new_unique_name(),
producer: [module: {BroadwayKafka.Producer, []}],
processors: [default: [partition_by: fn msg -> msg.data end]]
)
assert_receive {:EXIT, _, {%ArgumentError{message: message}, _}}
assert message ==
"cannot set option :partition_by for processors :default. " <>
"The option will be set automatically by BroadwayKafka.Producer"
end
test "do not allow users to set :partition_by for batchers" do
Process.flag(:trap_exit, true)
Broadway.start_link(Forwarder,
name: new_unique_name(),
producer: [module: {BroadwayKafka.Producer, []}],
processors: [default: []],
batchers: [default: [partition_by: fn msg -> msg.data end]]
)
assert_receive {:EXIT, _, {%ArgumentError{message: message}, _}}
assert message ==
"cannot set option :partition_by for batchers :default. " <>
"The option will be set automatically by BroadwayKafka.Producer"
end
test "append kafka metadata to message" do
{:ok, message_server} = MessageServer.start_link()
{:ok, pid} = start_broadway(message_server)
producer = get_producer(pid)
put_assignments(producer, [[topic: "topic", partition: 0]])
MessageServer.push_messages(message_server, 1..1, topic: "topic", partition: 0)
assert_receive {:message_handled, %{data: 1, meta: meta}}
assert meta == %{
topic: "topic",
partition: 0,
offset: 1,
key: :fake_key,
ts: :fake_ts,
headers: :fake_headers
}
stop_broadway(pid)
end
test "single producer receiving messages from a single topic/partition" do
{:ok, message_server} = MessageServer.start_link()
{:ok, pid} = start_broadway(message_server)
producer = get_producer(pid)
put_assignments(producer, [[topic: "topic", partition: 0]])
MessageServer.push_messages(message_server, 1..5, topic: "topic", partition: 0)
for msg <- 1..5 do
assert_receive {:message_handled, %{data: ^msg, partition: 0}}
end
stop_broadway(pid)
end
test "single producer receiving messages from multiple topic/partitions" do
{:ok, message_server} = MessageServer.start_link()
{:ok, pid} = start_broadway(message_server)
producer = get_producer(pid)
put_assignments(producer, [
[topic: "topic_1", partition: 0],
[topic: "topic_1", partition: 1],
[topic: "topic_2", partition: 0],
[topic: "topic_2", partition: 1]
])
MessageServer.push_messages(message_server, 1..5, topic: "topic_1", partition: 0)
MessageServer.push_messages(message_server, 6..10, topic: "topic_1", partition: 1)
MessageServer.push_messages(message_server, 11..15, topic: "topic_2", partition: 0)
MessageServer.push_messages(message_server, 16..20, topic: "topic_2", partition: 1)
for msg <- 1..5 do
assert_receive {:message_handled, %{data: ^msg}}
end
for msg <- 6..10 do
assert_receive {:message_handled, %{data: ^msg}}
end
for msg <- 11..15 do
assert_receive {:message_handled, %{data: ^msg}}
end
for msg <- 16..20 do
assert_receive {:message_handled, %{data: ^msg}}
end
stop_broadway(pid)
end
test "fetch messages by chunks according to :max_bytes" do
{:ok, message_server} = MessageServer.start_link()
{:ok, pid} = start_broadway(message_server)
producer = get_producer(pid)
put_assignments(producer, [[topic: "topic", partition: 0]])
MessageServer.push_messages(message_server, 1..25, topic: "topic", partition: 0)
assert_receive {:messages_fetched, 10}
assert_receive {:messages_fetched, 10}
assert_receive {:messages_fetched, 5}
stop_broadway(pid)
end
test "keep trying to receive new messages when the queue is empty" do
{:ok, message_server} = MessageServer.start_link()
{:ok, pid} = start_broadway(message_server)
producer = get_producer(pid)
put_assignments(producer, [[topic: "topic", partition: 0]])
assert_receive {:messages_fetched, 0}
MessageServer.push_messages(message_server, 1..10, topic: "topic", partition: 0)
assert_receive {:messages_fetched, 10}
assert_receive {:messages_fetched, 0}
stop_broadway(pid)
end
test "messages with the same topic/partition are processed in the same processor" do
{:ok, message_server} = MessageServer.start_link()
{:ok, pid} =
start_broadway(message_server, producers_concurrency: 2, processors_concurrency: 4)
producer_1 = get_producer(pid, 0)
producer_2 = get_producer(pid, 1)
put_assignments(producer_1, [
[topic: "topic_1", partition: 0, begin_offset: 100],
[topic: "topic_2", partition: 1, begin_offset: 400]
])
put_assignments(producer_2, [
[topic: "topic_1", partition: 1, begin_offset: 200],
[topic: "topic_2", partition: 0, begin_offset: 300]
])
MessageServer.push_messages(message_server, 1..10, topic: "topic_1", partition: 0)
MessageServer.push_messages(message_server, 1..10, topic: "topic_1", partition: 1)
MessageServer.push_messages(message_server, 1..10, topic: "topic_2", partition: 0)
MessageServer.push_messages(message_server, 1..10, topic: "topic_2", partition: 1)
assert_receive {:message_handled, %{topic: "topic_1", partition: 0, pid: processor_1}}
assert_receive {:message_handled, %{topic: "topic_1", partition: 1, pid: processor_2}}
assert_receive {:message_handled, %{topic: "topic_2", partition: 0, pid: processor_3}}
assert_receive {:message_handled, %{topic: "topic_2", partition: 1, pid: processor_4}}
processors = Enum.uniq([processor_1, processor_2, processor_3, processor_4])
assert length(processors) == 4
assert_receive_in_order(
{:message_handled, %{topic: "topic_1", partition: 0, pid: ^processor_1}},
offsets: 101..109
)
assert_receive_in_order(
{:message_handled, %{topic: "topic_1", partition: 1, pid: ^processor_2}},
offsets: 201..209
)
assert_receive_in_order(
{:message_handled, %{topic: "topic_2", partition: 0, pid: ^processor_3}},
offsets: 301..309
)
assert_receive_in_order(
{:message_handled, %{topic: "topic_2", partition: 1, pid: ^processor_4}},
offsets: 401..409
)
stop_broadway(pid)
end
test "batches with the same topic/partition are processed in the same batch consumer" do
{:ok, message_server} = MessageServer.start_link()
{:ok, pid} =
start_broadway(message_server,
producers_concurrency: 2,
processors_concurrency: 4,
batchers_concurrency: 4
)
producer_1 = get_producer(pid, 0)
producer_2 = get_producer(pid, 1)
put_assignments(producer_1, [
[topic: "topic_1", partition: 0, begin_offset: 101],
[topic: "topic_2", partition: 1, begin_offset: 401]
])
put_assignments(producer_2, [
[topic: "topic_1", partition: 1, begin_offset: 201],
[topic: "topic_2", partition: 0, begin_offset: 301]
])
MessageServer.push_messages(message_server, 1..50, topic: "topic_1", partition: 0, offset: 110)
MessageServer.push_messages(message_server, 1..50, topic: "topic_1", partition: 1, offset: 210)
MessageServer.push_messages(message_server, 1..50, topic: "topic_2", partition: 0, offset: 310)
MessageServer.push_messages(message_server, 1..50, topic: "topic_2", partition: 1, offset: 410)
assert_receive {:batch_handled, %{topic: "topic_1", partition: 0, pid: consumer_1}}
assert_receive {:batch_handled, %{topic: "topic_1", partition: 1, pid: consumer_2}}
assert_receive {:batch_handled, %{topic: "topic_2", partition: 0, pid: consumer_3}}
assert_receive {:batch_handled, %{topic: "topic_2", partition: 1, pid: consumer_4}}
consumers = Enum.uniq([consumer_1, consumer_2, consumer_3, consumer_4])
assert length(consumers) == 4
assert_receive_in_order(
{:batch_handled, %{topic: "topic_1", partition: 0, pid: ^consumer_1}},
offsets: [120, 130, 140, 150]
)
assert_receive_in_order(
{:batch_handled, %{topic: "topic_1", partition: 1, pid: ^consumer_2}},
offsets: [220, 230, 240, 250]
)
assert_receive_in_order(
{:batch_handled, %{topic: "topic_2", partition: 0, pid: ^consumer_3}},
offsets: [320, 330, 340, 350]
)
assert_receive_in_order(
{:batch_handled, %{topic: "topic_2", partition: 1, pid: ^consumer_4}},
offsets: [420, 430, 440, 450]
)
stop_broadway(pid)
end
test "messages from the same topic/partition are acknowledged in order" do
{:ok, message_server} = MessageServer.start_link()
{:ok, pid} =
start_broadway(message_server,
producers_concurrency: 2,
processors_concurrency: 4
)
producer_1 = get_producer(pid, 0)
producer_2 = get_producer(pid, 1)
put_assignments(producer_1, [
[topic: "topic_1", partition: 0, begin_offset: 101],
[topic: "topic_2", partition: 1, begin_offset: 401]
])
put_assignments(producer_2, [
[topic: "topic_1", partition: 1, begin_offset: 201],
[topic: "topic_2", partition: 0, begin_offset: 301]
])
MessageServer.push_messages(message_server, 1..20, topic: "topic_1", partition: 0)
MessageServer.push_messages(message_server, 1..20, topic: "topic_1", partition: 1)
MessageServer.push_messages(message_server, 1..20, topic: "topic_2", partition: 0)
MessageServer.push_messages(message_server, 1..20, topic: "topic_2", partition: 1)
assert_receive_in_order(
{:ack, %{topic: "topic_1", partition: 0}},
offsets: [105, 110, 115, 120]
)
assert_receive_in_order(
{:ack, %{topic: "topic_1", partition: 1}},
offsets: [205, 210, 215, 220]
)
assert_receive_in_order(
{:ack, %{topic: "topic_2", partition: 0}},
offsets: [305, 310, 315, 320]
)
assert_receive_in_order(
{:ack, %{topic: "topic_2", partition: 1}},
offsets: [405, 410, 415, 420]
)
stop_broadway(pid)
end
test "batches from the same topic/partition are acknowledged in order" do
{:ok, message_server} = MessageServer.start_link()
{:ok, pid} =
start_broadway(message_server,
producers_concurrency: 2,
processors_concurrency: 4,
batchers_concurrency: 4
)
producer_1 = get_producer(pid, 0)
producer_2 = get_producer(pid, 1)
put_assignments(producer_1, [
[topic: "topic_1", partition: 0, begin_offset: 101],
[topic: "topic_2", partition: 1, begin_offset: 401]
])
put_assignments(producer_2, [
[topic: "topic_1", partition: 1, begin_offset: 201],
[topic: "topic_2", partition: 0, begin_offset: 301]
])
MessageServer.push_messages(message_server, 1..40, topic: "topic_1", partition: 0)
MessageServer.push_messages(message_server, 1..40, topic: "topic_1", partition: 1)
MessageServer.push_messages(message_server, 1..40, topic: "topic_2", partition: 0)
MessageServer.push_messages(message_server, 1..40, topic: "topic_2", partition: 1)
assert_receive_in_order(
{:ack, %{topic: "topic_1", partition: 0}},
offsets: [110, 120, 130, 140]
)
assert_receive_in_order(
{:ack, %{topic: "topic_1", partition: 1}},
offsets: [210, 220, 230, 240]
)
assert_receive_in_order(
{:ack, %{topic: "topic_2", partition: 0}},
offsets: [310, 320, 330, 340]
)
assert_receive_in_order(
{:ack, %{topic: "topic_2", partition: 1}},
offsets: [410, 420, 430, 440]
)
stop_broadway(pid)
end
test "continue fetching messages after rebalancing" do
{:ok, message_server} = MessageServer.start_link()
{:ok, pid} = start_broadway(message_server)
producer = get_producer(pid)
put_assignments(producer, [[topic: "topic", partition: 0]])
assert_receive {:messages_fetched, 0}
BroadwayKafka.Producer.assignments_revoked(producer)
put_assignments(producer, [[topic: "topic", partition: 0]])
assert_receive {:messages_fetched, 0}
assert_receive {:messages_fetched, 0}
stop_broadway(pid)
end
test "stop trying to receive new messages after start draining" do
{:ok, message_server} = MessageServer.start_link()
{:ok, pid} = start_broadway(message_server)
producer = get_producer(pid)
put_assignments(producer, [[topic: "topic", partition: 0]])
assert_receive {:messages_fetched, 0}
:sys.suspend(producer)
flush_messages_received()
task = Task.async(fn -> Broadway.Topology.ProducerStage.drain(producer) end)
:sys.resume(producer)
Task.await(task)
refute_receive {:messages_fetched, 0}, 10
stop_broadway(pid)
end
test "if connection is lost, reconnect when :brod client is ready again" do
{:ok, message_server} = MessageServer.start_link()
{:ok, pid} = start_broadway(message_server)
assert_receive {:setup, client_id}
Process.exit(Process.whereis(client_id), :kill)
refute_receive {:setup, _}
{:ok, _} = Agent.start(fn -> false end, name: client_id)
refute_receive {:setup, _}
Agent.update(client_id, fn _ -> true end)
assert_receive {:setup, ^client_id}
stop_broadway(pid)
end
test "keep the producer alive on ack errors and log the exception" do
{:ok, message_server} = MessageServer.start_link()
{:ok, pid} = start_broadway(message_server, ack_raises_on_offset: 4)
producer = get_producer(pid)
producer_pid = Process.whereis(producer)
put_assignments(producer, [[topic: "topic", partition: 0]])
MessageServer.push_messages(message_server, 1..2, topic: "topic", partition: 0)
assert_receive {:ack, %{topic: "topic", partition: 0, pid: ^producer_pid}}
assert capture_log(fn ->
MessageServer.push_messages(message_server, 3..4, topic: "topic", partition: 0)
refute_receive {:ack, %{topic: "topic", partition: 0, pid: ^producer_pid}}
end) =~ "(RuntimeError) Ack failed on offset"
MessageServer.push_messages(message_server, 5..6, topic: "topic", partition: 0)
assert_receive {:ack, %{topic: "topic", partition: 0, pid: ^producer_pid}}
stop_broadway(pid)
end
defp start_broadway(message_server, opts \\ []) do
producers_concurrency = Keyword.get(opts, :producers_concurrency, 1)
processors_concurrency = Keyword.get(opts, :processors_concurrency, 1)
batchers_concurrency = Keyword.get(opts, :batchers_concurrency)
ack_raises_on_offset = Keyword.get(opts, :ack_raises_on_offset, nil)
batchers =
if batchers_concurrency do
[default: [concurrency: batchers_concurrency, batch_size: 10, batch_timeout: 10]]
else
[]
end
{:ok, pid} =
Broadway.start_link(Forwarder,
name: new_unique_name(),
context: %{test_pid: self()},
producer: [
module:
{BroadwayKafka.Producer,
[
client: FakeKafkaClient,
hosts: [],
test_pid: self(),
message_server: message_server,
receive_interval: 0,
reconnect_timeout: 10,
max_bytes: 10,
ack_raises_on_offset: ack_raises_on_offset
]},
concurrency: producers_concurrency
],
processors: [
default: [concurrency: processors_concurrency]
],
batchers: batchers
)
{:ok, pid}
end
defp put_assignments(producer, assignments) do
group_member_id = System.unique_integer([:positive])
group_generation_id = System.unique_integer([:positive])
kafka_assignments =
for assignment <- assignments do
begin_offset = Keyword.get(assignment, :begin_offset, 1)
brod_received_assignment(
topic: assignment[:topic],
partition: assignment[:partition],
begin_offset: begin_offset
)
end
BroadwayKafka.Producer.assignments_received(
producer,
group_member_id,
group_generation_id,
kafka_assignments
)
end
defp new_unique_name() do
:"Broadway#{System.unique_integer([:positive, :monotonic])}"
end
defp get_producer(broadway, index \\ 0) do
{_, name} = Process.info(broadway, :registered_name)
:"#{name}.Broadway.Producer_#{index}"
end
defp stop_broadway(pid) do
ref = Process.monitor(pid)
Process.exit(pid, :normal)
receive do
{:DOWN, ^ref, _, _, _} -> :ok
end
end
defp flush_messages_received() do
receive do
{:messages_fetched, 0} -> flush_messages_received()
after
0 -> :ok
end
end
end
| 30.68732 | 99 | 0.649575 |
9e7475eed31eddd369f422e3c92fbabeedb1925c | 73 | exs | Elixir | test/river/frame/settings_test.exs | peburrows/river | e8968535d02a86e70a7942a690c8e461fed55913 | [
"MIT"
] | 86 | 2016-08-19T21:59:28.000Z | 2022-01-31T20:14:18.000Z | test/river/frame/settings_test.exs | peburrows/river | e8968535d02a86e70a7942a690c8e461fed55913 | [
"MIT"
] | 7 | 2016-09-27T14:44:16.000Z | 2017-08-08T14:57:45.000Z | test/river/frame/settings_test.exs | peburrows/river | e8968535d02a86e70a7942a690c8e461fed55913 | [
"MIT"
] | 4 | 2016-09-26T10:57:24.000Z | 2018-04-03T14:30:19.000Z | defmodule River.Frame.SettingsTest do
use ExUnit.Case, async: true
end
| 18.25 | 37 | 0.794521 |
9e7479a55c1871dac2373120d3710a5169dbde24 | 2,458 | ex | Elixir | lib/bitcoin/block.ex | coinscript/bitcoinsv-elixir | 2dda03c81edc5662743ed2922abb5b1910d9c09a | [
"Apache-2.0"
] | 2 | 2019-08-12T04:53:57.000Z | 2019-09-03T03:47:33.000Z | lib/bitcoin/block.ex | coinscript/bitcoinsv-elixir | 2dda03c81edc5662743ed2922abb5b1910d9c09a | [
"Apache-2.0"
] | null | null | null | lib/bitcoin/block.ex | coinscript/bitcoinsv-elixir | 2dda03c81edc5662743ed2922abb5b1910d9c09a | [
"Apache-2.0"
] | null | null | null | defmodule Bitcoin.Block do
use Bitcoin.Common
alias Bitcoin.Protocol.Messages
alias Bitcoin.Block.Validation
@type t_hash :: Bitcoin.t_hash()
@doc """
Compute hash of the provided block, which is double sha256 of the serialized block header.
"""
@spec hash(Messages.Block.t()) :: t_hash
def hash(%Messages.Block{} = block) do
block
|> Messages.Block.serialize_header()
|> Bitcoin.Util.double_sha256()
end
@doc """
Compute the root hash of the transactions merkle tree for the provided block.
"""
@spec merkle_root(Messages.Block.t()) :: Bitcoin.t_hash()
def merkle_root(%Messages.Block{} = block) do
block.transactions
|> Enum.map(&Bitcoin.Tx.hash/1)
|> Bitcoin.Util.merkle_tree_hash()
end
@doc """
Returns sum of all transaction fees in the provided block message
"""
@spec total_fees(Messages.Block.t()) :: number
def total_fees(%Messages.Block{} = block, opts \\ %{}) do
[_coinbase | transactions] = block.transactions
opts = opts |> Map.put(:block, block)
transactions
|> Bitcoin.Util.pmap(fn tx -> Bitcoin.Tx.fee(tx, opts) end)
|> Enum.sum()
end
@doc """
Validate corectness of the block. Function checks if:
* parent block exists
* merkle root hash matches with the calculated one
* block size is within the limit
* block hash below target
* TODO target matches difficulty algorithm
* has coinbase transaction
* block reward is correct
"""
@spec validate(Message.Block.t(), map) :: :ok | {:error, term}
def validate(block, opts \\ %{})
def validate(@genesis_block, _opts), do: :ok
def validate(%Messages.Block{} = block, opts) do
flags = validation_flags(block, opts)
opts = %{flags: flags, block: block} |> Map.merge(opts)
[
&Validation.has_parent/1,
&Validation.merkle_root/1,
&Validation.block_size/1,
&Validation.hash_below_target/1,
&Validation.transactions/2,
&Validation.coinbase_value/2
]
|> Bitcoin.Util.run_validations(block, opts)
end
def validation_flags(%Messages.Block{} = block, _opts) do
%{
p2sh: fn -> block.timestamp >= @bip16_switch_time end
# TODO some sane way to get block height here, perhaps opts[:height] || Block.hegiht(block)?
# dersig:
# fn -> block.height >= @bip66_height end,
}
|> Enum.reduce(%{}, fn {flag, fun}, map ->
if fun.(), do: Map.put(map, flag, true), else: map
end)
end
end
| 28.917647 | 98 | 0.659479 |
9e74a30c3f294fbde40efb2f870b160bc8398a4f | 1,226 | exs | Elixir | app_ui/config/config.exs | samuelventura/nerves_sample_bbb_emmc | 13b2d376b002c420f71f62cfe48fc3eea4c83a1d | [
"Apache-2.0"
] | null | null | null | app_ui/config/config.exs | samuelventura/nerves_sample_bbb_emmc | 13b2d376b002c420f71f62cfe48fc3eea4c83a1d | [
"Apache-2.0"
] | null | null | null | app_ui/config/config.exs | samuelventura/nerves_sample_bbb_emmc | 13b2d376b002c420f71f62cfe48fc3eea4c83a1d | [
"Apache-2.0"
] | null | null | null | # This file is responsible for configuring your application
# and its dependencies with the aid of the Config module.
#
# This configuration file is loaded before any dependency and
# is restricted to this project.
# General application configuration
import Config
# Configures the endpoint
config :app_ui, AppUiWeb.Endpoint,
url: [host: "nerves.local"],
render_errors: [view: AppUiWeb.ErrorView, accepts: ~w(html json), layout: false],
pubsub_server: AppUi.PubSub,
live_view: [signing_salt: "ybuy7fF7"]
# Configure esbuild (the version is required)
config :esbuild,
version: "0.12.18",
default: [
args:
~w(js/app.js --bundle --target=es2016 --outdir=../priv/static/assets --external:/fonts/* --external:/images/*),
cd: Path.expand("../assets", __DIR__),
env: %{"NODE_PATH" => Path.expand("../deps", __DIR__)}
]
# Configures Elixir's Logger
config :logger, :console,
format: "$time $metadata[$level] $message\n",
metadata: [:request_id]
# Use Jason for JSON parsing in Phoenix
config :phoenix, :json_library, Jason
# Import environment specific config. This must remain at the bottom
# of this file so it overrides the configuration defined above.
import_config "#{config_env()}.exs"
| 32.263158 | 117 | 0.72186 |
9e74b5106464d6c322eec75914b3d9331b4094bb | 70 | ex | Elixir | lib/elixir_app/repo.ex | mogetutu/didactic-invention | be25678ba30ca5b9efc1b27e9f7c79350f53ab95 | [
"MIT"
] | null | null | null | lib/elixir_app/repo.ex | mogetutu/didactic-invention | be25678ba30ca5b9efc1b27e9f7c79350f53ab95 | [
"MIT"
] | null | null | null | lib/elixir_app/repo.ex | mogetutu/didactic-invention | be25678ba30ca5b9efc1b27e9f7c79350f53ab95 | [
"MIT"
] | null | null | null | defmodule ElixirApp.Repo do
use Ecto.Repo, otp_app: :elixir_app
end
| 17.5 | 37 | 0.785714 |
9e74e24ec7dc17af8017da01c8ec2a4a003f6d59 | 3,637 | exs | Elixir | machine_translation/MorpHIN/Learned/Resources/Set5/TrainingInstances/41.exs | AdityaPrasadMishra/NLP--Project-Group-16 | fb62cc6a1db4a494058171f11c14a2be3933a9a1 | [
"MIT"
] | null | null | null | machine_translation/MorpHIN/Learned/Resources/Set5/TrainingInstances/41.exs | AdityaPrasadMishra/NLP--Project-Group-16 | fb62cc6a1db4a494058171f11c14a2be3933a9a1 | [
"MIT"
] | null | null | null | machine_translation/MorpHIN/Learned/Resources/Set5/TrainingInstances/41.exs | AdityaPrasadMishra/NLP--Project-Group-16 | fb62cc6a1db4a494058171f11c14a2be3933a9a1 | [
"MIT"
] | null | null | null | **EXAMPLE FILE**
verb conj noun noun adjective;
verb_aux SYM noun noun adjective;
noun conj noun pnoun adjective;
pnoun cm noun cm pnoun;
noun cm noun pnoun adjective;
verb_aux SYM noun noun adjective;
pnoun cm noun noun adjective;
pnoun pnoun noun noun adjective;
verb_aux SYM noun noun adjective;
pnoun conj noun cm adjective;
adjective verb noun cm adjective;
cm nst noun noun adjective;
verb_aux SYM noun noun adjective;
conj quantifier noun cm pnoun;
verb_aux SYM noun pnoun adjective;
verb pnoun pnoun pnoun pnoun;
pnoun cm pnoun cm adjective;
SYM SYM noun pnoun adjective;
noun cm pnoun noun pnoun;
verb_aux SYM noun noun adjective;
SYM SYM noun noun adjective;
verb conj noun cm adjective;
quantifier adjective noun cm adjective;
SYM nst noun cm adjective;
verb_aux SYM noun pnoun adjective;
verb_aux SYM noun cm adjective;
cardinal adjective noun cm pnoun;
nst adjective adjective noun pnoun;
pnoun cm noun cm pnoun;
nst particle noun cm pnoun;
noun noun noun cm pnoun;
noun cm noun cm pnoun;
verb SYM noun noun adjective;
verb_aux SYM noun noun adjective;
verb SYM noun noun adjective;
noun cm noun cardinal adjective;
verb SYM noun pnoun adjective;
pnoun cm noun pnoun adjective;
verb_aux SYM noun noun adjective;
SYM nst noun cm adjective;
pnoun cm noun noun adjective;
verb_aux SYM pnoun pnoun pnoun;
SYM pnoun pnoun cm pnoun;
pnoun cm pnoun pnoun pnoun;
cm pnoun pnoun noun pnoun;
SYM pnoun cm cardinal pnoun;
verb_aux SYM noun noun adjective;
verb_aux conj pnoun cm pnoun;
pnoun cm noun cm adjective;
verb conj pnoun cm pnoun;
pnoun cm pnoun cm pnoun;
pnoun cm noun cm adjective;
pn noun pnoun cm pnoun;
SYM pn pnoun cm pnoun;
conj pn pnoun cm pnoun;
verb SYM noun noun adjective;
pnoun cm pnoun cm pnoun;
pnoun pnoun cm noun pnoun;
pnoun pnoun cm adjective pnoun;
verb SYM noun cm adjective;
verb_aux SYM noun pnoun adjective;
verb_aux conj noun noun adjective;
pnoun cm noun noun adjective;
noun cm noun pnoun adjective;
pnoun cm pnoun conj pnoun;
cm pnoun adjective noun pnoun;
SYM pn pnoun conj pnoun;
pn pnoun conj pnoun pnoun;
noun cm adjective noun adjective;
pnoun SYM noun pnoun adjective;
cm cm pnoun noun pnoun;
pnoun cm pnoun verb pnoun;
noun cm pnoun cm pnoun;
pnoun cm noun cm adjective;
verb_aux SYM noun pnoun adjective;
SYM SYM noun noun adjective;
SYM SYM noun cm adjective;
cm cm noun verb adjective;
cm nst noun cm adjective;
cm cm cm verb pnoun;
verb_aux pn pnoun noun pnoun;
verb_aux pn pnoun cm pnoun;
verb conj noun cm adjective;
verb_aux SYM noun conj adjective;
cm quantifier noun cm adjective;
verb pn conj pnoun pnoun;
pnoun conj noun cm pnoun;
noun cm cardinal adjective pnoun;
nst pnoun conj adjective adjective;
adjective conj noun cm adjective;
cm nst noun cm adjective;
demonstrative noun noun cm pnoun;
pnoun cm noun cm adjective;
noun cm noun cm adjective;
noun pnoun cm noun pnoun;
nst particle noun cm adjective;
verb SYM noun verb adjective;
verb_aux verb_aux noun pnoun adjective;
cm cardinal noun cm pnoun;
conj pn pnoun pnoun pnoun;
pn pnoun pnoun cm pnoun;
pnoun cm pnoun noun pnoun;
noun cm noun cm adjective;
cm cardinal pnoun cm pnoun;
noun verb noun cm adjective;
noun cm noun pnoun adjective;
verb verb_aux noun cm adjective;
verb SYM noun cm adjective;
pnoun cm noun cm adjective;
cm nst noun cm adjective;
SYM nst noun pnoun adjective;
pnoun cm noun cm adjective;
cm nst noun cm adjective;
verb_aux pn noun pnoun adjective;
cm cm noun pnoun adjective;
SYM adjective noun pnoun adjective;
verb_aux conj noun pnoun adjective;
verb SYM noun noun adjective;
| 30.057851 | 40 | 0.770965 |
9e74eb499216c9dcffa55e09201feb97f2ff0884 | 1,294 | exs | Elixir | mix.exs | codabrink/topo | f1ca4b7fe337a67285ee4c65a34fb521b119342c | [
"MIT"
] | null | null | null | mix.exs | codabrink/topo | f1ca4b7fe337a67285ee4c65a34fb521b119342c | [
"MIT"
] | null | null | null | mix.exs | codabrink/topo | f1ca4b7fe337a67285ee4c65a34fb521b119342c | [
"MIT"
] | null | null | null | defmodule Topo.Mixfile do
use Mix.Project
def project do
[
app: :topo,
version: "0.4.0",
elixir: "~> 1.6",
description: description(),
package: package(),
build_embedded: Mix.env() == :prod,
start_permanent: Mix.env() == :prod,
test_coverage: [tool: ExCoveralls],
dialyzer: [plt_add_apps: [:poison, :mix]],
deps: deps()
]
end
def application do
[applications: [:logger, :geo, :seg_seg, :vector]]
end
defp deps do
[
{:geo, "~> 3.1"},
{:math, "~> 0.5.0"},
{:seg_seg, "~> 0.1"},
{:vector, "~> 1.0"},
{:poison, "~> 3.0", only: [:test, :dev]},
{:benchfella, "~> 0.3.0", only: :dev},
{:excoveralls, "~> 0.8", only: :test},
{:envelope, "~> 1.0", only: :dev},
{:earmark, "~> 1.2", only: :dev},
{:ex_doc, "~> 0.19", only: :dev},
{:dialyxir, "~> 0.5", only: [:dev], runtime: false}
]
end
defp description do
"""
Geometry library for determining spatial relationships between geometries
"""
end
defp package do
[
files: ["lib/topo.ex", "lib/topo", "mix.exs", "README*"],
maintainers: ["Powell Kinney"],
licenses: ["MIT"],
links: %{"GitHub" => "https://github.com/pkinney/topo"}
]
end
end
| 23.962963 | 77 | 0.514683 |
9e74f57aa1774eedfcc87d0b8fa26314dc715684 | 444 | ex | Elixir | lib/siwapp_web/controllers/iframe_controller.ex | jakon89/siwapp | b5f8fd43458deae72c76e434ed0c63b620cb97a4 | [
"MIT"
] | 4 | 2015-02-12T09:23:47.000Z | 2022-03-09T18:11:06.000Z | lib/siwapp_web/controllers/iframe_controller.ex | jakon89/siwapp | b5f8fd43458deae72c76e434ed0c63b620cb97a4 | [
"MIT"
] | 254 | 2021-12-09T14:40:41.000Z | 2022-03-31T08:09:37.000Z | lib/siwapp_web/controllers/iframe_controller.ex | jakon89/siwapp | b5f8fd43458deae72c76e434ed0c63b620cb97a4 | [
"MIT"
] | 1 | 2022-03-07T10:25:49.000Z | 2022-03-07T10:25:49.000Z | defmodule SiwappWeb.IframeController do
use SiwappWeb, :controller
alias Siwapp.Invoices
alias Siwapp.Templates
plug :put_root_layout, false
plug :put_layout, false
@spec iframe(Plug.Conn.t(), map) :: Plug.Conn.t()
def iframe(conn, %{"id" => id}) do
invoice = Invoices.get!(id, preload: [{:items, :taxes}, :payments, :series])
str_template = Templates.print_str_template(invoice)
html(conn, str_template)
end
end
| 26.117647 | 80 | 0.704955 |
9e7510570de6e5669ff1d02f802b17eadcdd48d0 | 460 | exs | Elixir | back/config/test.exs | giovanecosta/zebra-xantis | 92e5937d51c44e75544bb539f5d95f6b0cc61b94 | [
"MIT"
] | null | null | null | back/config/test.exs | giovanecosta/zebra-xantis | 92e5937d51c44e75544bb539f5d95f6b0cc61b94 | [
"MIT"
] | null | null | null | back/config/test.exs | giovanecosta/zebra-xantis | 92e5937d51c44e75544bb539f5d95f6b0cc61b94 | [
"MIT"
] | 1 | 2019-08-29T17:37:51.000Z | 2019-08-29T17:37:51.000Z | use Mix.Config
# Configure your database
config :zx, Zx.Repo,
username: "postgres",
password: "postgres",
database: "zx_test",
hostname: "postgres",
pool: Ecto.Adapters.SQL.Sandbox,
types: Zx.PostgrexTypes
# We don't run a server during test. If one is required,
# you can enable the server option below.
config :zx, ZxWeb.Endpoint,
http: [port: 4002],
server: false
# Print only warnings and errors during test
config :logger, level: :warn
| 23 | 56 | 0.717391 |
9e751d1d6b24ee9ba16da2a1db2cd769e416d373 | 164 | ex | Elixir | lib/app.ex | ryanwinchester/irc | acb16b470144dcd664fb2026afa0b9a1727f2900 | [
"MIT"
] | null | null | null | lib/app.ex | ryanwinchester/irc | acb16b470144dcd664fb2026afa0b9a1727f2900 | [
"MIT"
] | null | null | null | lib/app.ex | ryanwinchester/irc | acb16b470144dcd664fb2026afa0b9a1727f2900 | [
"MIT"
] | null | null | null | defmodule ExIRC.App do
@moduledoc """
Entry point for the ExIRC application.
"""
use Application
def start(_type, _args) do
ExIRC.start!()
end
end
| 14.909091 | 40 | 0.670732 |
9e7520a8cd9d36819d5b3a6b5c37e30b4602260a | 1,355 | exs | Elixir | elixir/test/test_year_2015/day_02_test.exs | fdm1/advent_of_code | a1e91d847fd8fd9f6b2f48333203729b9d64fd80 | [
"MIT"
] | null | null | null | elixir/test/test_year_2015/day_02_test.exs | fdm1/advent_of_code | a1e91d847fd8fd9f6b2f48333203729b9d64fd80 | [
"MIT"
] | null | null | null | elixir/test/test_year_2015/day_02_test.exs | fdm1/advent_of_code | a1e91d847fd8fd9f6b2f48333203729b9d64fd80 | [
"MIT"
] | 1 | 2018-12-02T20:30:23.000Z | 2018-12-02T20:30:23.000Z | defmodule ElixirAdvent.Year2015.Day02Test do
use ExUnit.Case
doctest ElixirAdvent.Year2015.Day02
def test_cases do
# [dimension string, paper, ribbon]
[
["2x3x4", 58, 34],
["1x1x10", 43, 14]
]
end
def input_string do
Enum.join(Enum.map(test_cases(), fn(case) -> Enum.at(case, 0) end), "\n")
end
test "input_to_dim_list" do
assert ElixirAdvent.Year2015.Day02.input_to_dim_list(input_string()) == [[2, 3, 4], [1, 1, 10]]
end
test "paper_for_box" do
Enum.map(test_cases(), fn(n) ->
dim = Enum.map(String.split(Enum.at(n, 0), "x"), fn(i) -> String.to_integer(i) end)
assert ElixirAdvent.Year2015.Day02.paper_for_box(dim) == Enum.at(n, 1)
end)
end
test "part1" do
total_sum = Enum.sum(Enum.map(test_cases(), fn(test_case) -> Enum.at(test_case, 1) end))
assert ElixirAdvent.Year2015.Day02.part1(input_string()) == total_sum
end
test "ribbon_for_box" do
Enum.map(test_cases(), fn(n) ->
dim = Enum.map(String.split(Enum.at(n, 0), "x"), fn(i) -> String.to_integer(i) end)
assert ElixirAdvent.Year2015.Day02.ribbon_for_box(dim) == Enum.at(n, 2)
end)
end
test "part2" do
total_sum = Enum.sum(Enum.map(test_cases(), fn(test_case) -> Enum.at(test_case, 2) end))
assert ElixirAdvent.Year2015.Day02.part2(input_string()) == total_sum
end
end
| 29.456522 | 99 | 0.653875 |
9e754b9b6e5baa5a59226487f3435e79a1986dde | 2,030 | exs | Elixir | mix.exs | nickdichev/unicode_guards | d411c17690cbe7969174d61623dbc2d1537ad241 | [
"Apache-2.0"
] | null | null | null | mix.exs | nickdichev/unicode_guards | d411c17690cbe7969174d61623dbc2d1537ad241 | [
"Apache-2.0"
] | null | null | null | mix.exs | nickdichev/unicode_guards | d411c17690cbe7969174d61623dbc2d1537ad241 | [
"Apache-2.0"
] | null | null | null | defmodule Unicode.Guards.MixProject do
use Mix.Project
@version "0.3.1"
def project do
[
app: :unicode_guards,
version: @version,
elixir: "~> 1.8",
start_permanent: Mix.env() == :prod,
build_embedded: Mix.env() == :prod,
deps: deps(),
docs: docs(),
name: "Unicode function guards for Elixir",
source_url: "https://github.com/elixir-unicode/unicode_guards",
description: description(),
package: package(),
elixirc_paths: elixirc_paths(Mix.env())
]
end
defp description do
"""
Implementation of Unicode Set-based guards for Elixir. Supports matching
unicode sets to codepoints that can be used in function guards.
"""
end
defp package do
[
maintainers: ["Kip Cole"],
licenses: ["Apache 2.0"],
logo: "logo.png",
links: links(),
files: [
"lib",
"logo.png",
"mix.exs",
"README*",
"CHANGELOG*",
"LICENSE*"
]
]
end
def application do
[
extra_applications: [:logger]
]
end
defp deps do
[
{:unicode_set, "~> 0.5"},
{:nimble_parsec, "~> 0.5", runtime: false},
{:benchee, "~> 1.0", only: :dev},
{:ex_doc, "~> 0.19", only: [:dev, :test, :release], runtime: false}
]
end
def links do
%{
"GitHub" => "https://github.com/elixir-unicode/unicode_guards",
"Readme" => "https://github.com/elixir-unicode/unicode_guards/blob/v#{@version}/README.md",
"Changelog" => "https://github.com/elixir-unicode/unicode_guards/blob/v#{@version}/CHANGELOG.md"
}
end
def docs do
[
source_ref: "v#{@version}",
main: "readme",
logo: "logo.png",
extras: [
"README.md",
"LICENSE.md",
"CHANGELOG.md"
],
skip_undefined_reference_warnings_on: ["changelog"]
]
end
defp elixirc_paths(:test), do: ["lib", "test"]
defp elixirc_paths(:dev), do: ["lib", "bench"]
defp elixirc_paths(_), do: ["lib"]
end
| 23.068182 | 102 | 0.563547 |
9e75570b62fa09b152d34830356dde684e8691c2 | 2,538 | exs | Elixir | apps/api_web/test/api_web/router_test.exs | fjlanasa/api | c39bc393aea572bfb81754b2ea1adf9dda9ce24a | [
"MIT"
] | 62 | 2019-01-17T12:34:39.000Z | 2022-03-20T21:49:47.000Z | apps/api_web/test/api_web/router_test.exs | fjlanasa/api | c39bc393aea572bfb81754b2ea1adf9dda9ce24a | [
"MIT"
] | 375 | 2019-02-13T15:30:50.000Z | 2022-03-30T18:50:41.000Z | apps/api_web/test/api_web/router_test.exs | fjlanasa/api | c39bc393aea572bfb81754b2ea1adf9dda9ce24a | [
"MIT"
] | 14 | 2019-01-16T19:35:57.000Z | 2022-02-26T18:55:54.000Z | defmodule ApiWeb.RouterTest do
@moduledoc false
use ApiWeb.ConnCase
import ApiWeb.Router
import Plug.Conn
describe "authenticated_accepts/2" do
test "denies anonymous users when the type is in authenticated_accepts" do
conn =
build_conn()
|> Map.put(:req_headers, [{"accept", "text/event-stream"}])
|> accepts_runtime([])
|> ApiWeb.Plugs.Authenticate.call(ApiWeb.Plugs.Authenticate.init([]))
assert_raise Phoenix.NotAcceptableError, fn ->
authenticated_accepts(conn, ["event-stream"])
end
end
test "allows registered users when the type is in authenticated_accepts" do
assert %Plug.Conn{} =
build_conn()
|> ApiWeb.ConnCase.conn_with_api_key()
|> Map.put(:req_headers, [{"accept", "text/event-stream"}])
|> accepts_runtime([])
|> ApiWeb.Plugs.Authenticate.call(ApiWeb.Plugs.Authenticate.init([]))
|> authenticated_accepts(["event-stream"])
end
test "allows anonymous users when the type isn't in authenticated_accepts" do
assert %Plug.Conn{} =
build_conn()
|> Map.put(:req_headers, [{"accept", "text/event-stream"}])
|> accepts_runtime([])
|> ApiWeb.Plugs.Authenticate.call(ApiWeb.Plugs.Authenticate.init([]))
|> authenticated_accepts([])
assert %Plug.Conn{} =
build_conn()
|> Map.put(:req_headers, [{"accept", "application/json"}])
|> accepts_runtime([])
|> ApiWeb.Plugs.Authenticate.call(ApiWeb.Plugs.Authenticate.init([]))
|> authenticated_accepts(["event-stream"])
end
end
describe "CORS" do
@endpoint ApiWeb.Endpoint
test "returns the appropriate headers when an OPTIONS request is made", %{conn: conn} do
conn =
conn
|> put_req_header("x-api-key", conn.assigns.api_key)
|> put_req_header("access-control-request-headers", "x-api-key")
|> put_req_header("access-control-request-method", "GET")
|> put_req_header("origin", "http://localhost/")
response = options(conn, "/_health")
assert response.status == 200
assert ["*"] = get_resp_header(response, "access-control-allow-origin")
assert ["GET"] = get_resp_header(response, "access-control-allow-methods")
assert [headers] = get_resp_header(response, "access-control-allow-headers")
assert headers =~ "x-api-key"
end
end
end
| 37.880597 | 92 | 0.612687 |
9e757f944d52abb409aa9d285093a6768c295b6d | 1,615 | ex | Elixir | clients/analytics_admin/lib/google_api/analytics_admin/v1alpha/model/google_analytics_admin_v1alpha_delete_user_link_request.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/analytics_admin/lib/google_api/analytics_admin/v1alpha/model/google_analytics_admin_v1alpha_delete_user_link_request.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/analytics_admin/lib/google_api/analytics_admin/v1alpha/model/google_analytics_admin_v1alpha_delete_user_link_request.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.AnalyticsAdmin.V1alpha.Model.GoogleAnalyticsAdminV1alphaDeleteUserLinkRequest do
@moduledoc """
Request message for DeleteUserLink RPC.
## Attributes
* `name` (*type:* `String.t`, *default:* `nil`) - Required. Example format: accounts/1234/userLinks/5678
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:name => String.t() | nil
}
field(:name)
end
defimpl Poison.Decoder,
for: GoogleApi.AnalyticsAdmin.V1alpha.Model.GoogleAnalyticsAdminV1alphaDeleteUserLinkRequest do
def decode(value, options) do
GoogleApi.AnalyticsAdmin.V1alpha.Model.GoogleAnalyticsAdminV1alphaDeleteUserLinkRequest.decode(
value,
options
)
end
end
defimpl Poison.Encoder,
for: GoogleApi.AnalyticsAdmin.V1alpha.Model.GoogleAnalyticsAdminV1alphaDeleteUserLinkRequest do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 31.057692 | 108 | 0.75418 |
9e7584768b0ee878eeda8cb10f8319416d384fee | 2,268 | exs | Elixir | mix.exs | locaweb/elixir-cassette | 36069304427883128ad5f24d273f9300e88608ff | [
"MIT"
] | 15 | 2016-03-03T11:22:00.000Z | 2021-03-16T15:03:56.000Z | mix.exs | locaweb/elixir-cassette | 36069304427883128ad5f24d273f9300e88608ff | [
"MIT"
] | 13 | 2016-06-16T11:43:02.000Z | 2021-09-27T18:40:19.000Z | mix.exs | locaweb/elixir-cassette | 36069304427883128ad5f24d273f9300e88608ff | [
"MIT"
] | 6 | 2016-06-16T19:26:37.000Z | 2021-04-15T19:32:04.000Z | defmodule Cassette.Mixfile do
use Mix.Project
@elixir_version Version.parse!(System.version())
@min_version_for_credo Version.parse!("1.7.0")
def version, do: "1.5.3"
def project do
[
app: :cassette,
version: version(),
elixir: "~> 1.2",
description: "A CAS client and validation library",
elixirc_paths: elixirc_paths(Mix.env()),
package: package(),
docs: [
extras: ["README.md", "CONTRIBUTING.md", "LICENSE.md"]
],
test_coverage: [tool: ExCoveralls],
preferred_cli_env: [coveralls: :test, "coveralls.detail": :test, "coveralls.html": :test],
deps: deps()
]
end
def elixirc_paths(:prod), do: ["lib"]
def elixirc_paths(_), do: ["lib", "test/support"]
# Configuration for the OTP application
#
# Type "mix help compile.app" for more information
def application do
[
extra_applications: [:logger, :sweet_xml]
]
end
def package do
[
files: ["lib", "mix.exs", "README.md", "LICENSE.md", "CONTRIBUTING.md"],
maintainers: ["Ricardo Hermida Ruiz"],
licenses: ["MIT"],
links: %{
"GitHub" => "https://github.com/locaweb/elixir-cassette",
"Docs" => "https://hexdocs.pm/cassette/#{version()}"
}
]
end
# Dependencies can be Hex packages:
#
# {:mydep, "~> 0.3.0"}
#
# Or git/path repositories:
#
# {:mydep, git: "https://github.com/elixir-lang/mydep.git", tag: "0.1.0"}
#
# Type "mix help deps" for more examples and options
defp deps do
[
{:httpoison, "~> 0.8 or ~> 1.0"},
{:sweet_xml, "~> 0.6.0"},
{:ex_doc, "~> 0.11", only: :dev},
{:earmark, "~> 1.0", only: :dev},
{:bypass, "~> 1.0", only: [:dev, :test]},
{:fake_cas, "~> 1.1", only: [:dev, :test]},
{:excoveralls, "~> 0.7", only: :test}
]
|> (fn deps ->
# credo requires Elixir 1.7+, having it on older versions (CI) breaks compilation
# this can be removed when all versions on test matrix support credo
if Version.compare(@elixir_version, @min_version_for_credo) in [:gt, :eq] do
[{:credo, "~> 1.0", only: [:dev, :test], runtime: false} | deps]
else
deps
end
end).()
end
end
| 28.35 | 96 | 0.563492 |
9e75a00a8b061c9c81559323810a359a95afdbef | 1,474 | ex | Elixir | lib/surgex_web/views/error_helpers.ex | fiqus/surgex | af3ec37459abd3f17c7e9a826ca1abef1dd5fb44 | [
"MIT"
] | 8 | 2019-05-11T19:41:06.000Z | 2020-01-20T07:01:53.000Z | lib/surgex_web/views/error_helpers.ex | fiqus/surgex | af3ec37459abd3f17c7e9a826ca1abef1dd5fb44 | [
"MIT"
] | 12 | 2019-05-10T22:00:40.000Z | 2019-07-05T19:20:56.000Z | lib/surgex_web/views/error_helpers.ex | fiqus/surgex | af3ec37459abd3f17c7e9a826ca1abef1dd5fb44 | [
"MIT"
] | 1 | 2019-07-18T15:58:41.000Z | 2019-07-18T15:58:41.000Z | defmodule SurgexWeb.ErrorHelpers do
@moduledoc """
Conveniences for translating and building error messages.
"""
use Phoenix.HTML
@doc """
Generates tag for inlined form input errors.
"""
def error_tag(form, field) do
Enum.map(Keyword.get_values(form.errors, field), fn error ->
content_tag(:span, translate_error(error), class: "help-block")
end)
end
@doc """
Translates an error message using gettext.
"""
def translate_error({msg, opts}) do
# When using gettext, we typically pass the strings we want
# to translate as a static argument:
#
# # Translate "is invalid" in the "errors" domain
# dgettext("errors", "is invalid")
#
# # Translate the number of files with plural rules
# dngettext("errors", "1 file", "%{count} files", count)
#
# Because the error messages we show in our forms and APIs
# are defined inside Ecto, we need to translate them dynamically.
# This requires us to call the Gettext module passing our gettext
# backend as first argument.
#
# Note we use the "errors" domain, which means translations
# should be written to the errors.po file. The :count option is
# set by Ecto and indicates we should also apply plural rules.
if count = opts[:count] do
Gettext.dngettext(SurgexWeb.Gettext, "errors", msg, msg, count, opts)
else
Gettext.dgettext(SurgexWeb.Gettext, "errors", msg, opts)
end
end
end
| 32.755556 | 75 | 0.669607 |
9e7648fe6cc797813ddab01055b1062ef85eb991 | 272 | ex | Elixir | lib/mailman/testing_adapter.ex | uni-halle/mailman | c1a9b6d9d7f0727372f19638fef74b180d6691a6 | [
"MIT"
] | 154 | 2015-02-23T17:36:05.000Z | 2017-09-09T05:50:21.000Z | lib/mailman/testing_adapter.ex | uni-halle/mailman | c1a9b6d9d7f0727372f19638fef74b180d6691a6 | [
"MIT"
] | 67 | 2015-03-04T04:01:22.000Z | 2017-09-14T04:23:04.000Z | lib/mailman/testing_adapter.ex | uni-halle/mailman | c1a9b6d9d7f0727372f19638fef74b180d6691a6 | [
"MIT"
] | 62 | 2015-03-04T03:53:07.000Z | 2017-09-07T23:34:15.000Z | defmodule Mailman.TestingAdapter do
@moduledoc """
Implementation of the testing SMTP adapter.
"""
def deliver(config, _email, message) do
if config.store_deliveries do
Mailman.TestServer.register_delivery(message)
end
{:ok, message}
end
end
| 19.428571 | 51 | 0.713235 |
9e764a551f47791e67b51f2c55f4bf6d85eb42d7 | 29,031 | ex | Elixir | lib/elixir/lib/protocol.ex | spencerdcarlson/elixir | 23d75ecdf58df80969e12f4420282238e19219a1 | [
"Apache-2.0"
] | null | null | null | lib/elixir/lib/protocol.ex | spencerdcarlson/elixir | 23d75ecdf58df80969e12f4420282238e19219a1 | [
"Apache-2.0"
] | null | null | null | lib/elixir/lib/protocol.ex | spencerdcarlson/elixir | 23d75ecdf58df80969e12f4420282238e19219a1 | [
"Apache-2.0"
] | 1 | 2020-11-25T02:22:55.000Z | 2020-11-25T02:22:55.000Z | defmodule Protocol do
@moduledoc ~S"""
Reference and functions for working with protocols.
A protocol specifies an API that should be defined by its
implementations. A protocol is defined with `Kernel.defprotocol/2`
and its implementations with `Kernel.defimpl/2`.
## Examples
In Elixir, we have two verbs for checking how many items there
are in a data structure: `length` and `size`. `length` means the
information must be computed. For example, `length(list)` needs to
traverse the whole list to calculate its length. On the other hand,
`tuple_size(tuple)` and `byte_size(binary)` do not depend on the
tuple and binary size as the size information is precomputed in
the data structure.
Although Elixir includes specific functions such as `tuple_size`,
`binary_size` and `map_size`, sometimes we want to be able to
retrieve the size of a data structure regardless of its type.
In Elixir we can write polymorphic code, i.e. code that works
with different shapes/types, by using protocols. A size protocol
could be implemented as follows:
defprotocol Size do
@doc "Calculates the size (and not the length!) of a data structure"
def size(data)
end
Now that the protocol can be implemented for every data structure
the protocol may have a compliant implementation for:
defimpl Size, for: BitString do
def size(binary), do: byte_size(binary)
end
defimpl Size, for: Map do
def size(map), do: map_size(map)
end
defimpl Size, for: Tuple do
def size(tuple), do: tuple_size(tuple)
end
Note that we didn't implement it for lists as we don't have the
`size` information on lists, rather its value needs to be
computed with `length`.
The data structure you are implementing the protocol for
must be the first argument to all functions defined in the
protocol.
It is possible to implement protocols for all Elixir types:
* Structs (see below)
* `Tuple`
* `Atom`
* `List`
* `BitString`
* `Integer`
* `Float`
* `Function`
* `PID`
* `Map`
* `Port`
* `Reference`
* `Any` (see below)
## Protocols and Structs
The real benefit of protocols comes when mixed with structs.
For instance, Elixir ships with many data types implemented as
structs, like `MapSet`. We can implement the `Size` protocol
for those types as well:
defimpl Size, for: MapSet do
def size(map_set), do: MapSet.size(map_set)
end
When implementing a protocol for a struct, the `:for` option can
be omitted if the `defimpl` call is inside the module that defines
the struct:
defmodule User do
defstruct [:email, :name]
defimpl Size do
# two fields
def size(%User{}), do: 2
end
end
If a protocol implementation is not found for a given type,
invoking the protocol will raise unless it is configured to
fall back to `Any`. Conveniences for building implementations
on top of existing ones are also available, look at `defstruct/1`
for more information about deriving
protocols.
## Fallback to `Any`
In some cases, it may be convenient to provide a default
implementation for all types. This can be achieved by setting
the `@fallback_to_any` attribute to `true` in the protocol
definition:
defprotocol Size do
@fallback_to_any true
def size(data)
end
The `Size` protocol can now be implemented for `Any`:
defimpl Size, for: Any do
def size(_), do: 0
end
Although the implementation above is arguably not a reasonable
one. For example, it makes no sense to say a PID or an integer
have a size of `0`. That's one of the reasons why `@fallback_to_any`
is an opt-in behaviour. For the majority of protocols, raising
an error when a protocol is not implemented is the proper behaviour.
## Multiple implementations
Protocols can also be implemented for multiple types at once:
defprotocol Reversible do
def reverse(term)
end
defimpl Reversible, for: [Map, List] do
def reverse(term), do: Enum.reverse(term)
end
Inside `defimpl/2`, you can use `@protocol` to access the protocol
being implemented and `@for` to access the module it is being
defined for.
## Types
Defining a protocol automatically defines a type named `t`, which
can be used as follows:
@spec print_size(Size.t()) :: :ok
def print_size(data) do
result =
case Size.size(data) do
0 -> "data has no items"
1 -> "data has one item"
n -> "data has #{n} items"
end
IO.puts(result)
end
The `@spec` above expresses that all types allowed to implement the
given protocol are valid argument types for the given function.
## Reflection
Any protocol module contains three extra functions:
* `__protocol__/1` - returns the protocol information. The function takes
one of the following atoms:
* `:consolidated?` - returns whether the protocol is consolidated
* `:functions` - returns a keyword list of protocol functions and their arities
* `:impls` - if consolidated, returns `{:consolidated, modules}` with the list of modules
implementing the protocol, otherwise `:not_consolidated`
* `:module` - the protocol module atom name
* `impl_for/1` - returns the module that implements the protocol for the given argument,
`nil` otherwise
* `impl_for!/1` - same as above but raises an error if an implementation is
not found
For example, for the `Enumerable` protocol we have:
iex> Enumerable.__protocol__(:functions)
[count: 1, member?: 2, reduce: 3, slice: 1]
iex> Enumerable.impl_for([])
Enumerable.List
iex> Enumerable.impl_for(42)
nil
In addition, every protocol implementation module contains the `__impl__/1`
function. The function takes one of the following atoms:
* `:for` - returns the module responsible for the data structure of the
protocol implementation
* `:protocol` - returns the protocol module for which this implementation
is provided
For example, the module implementing the `Enumerable` protocol for lists is
`Enumerable.List`. Therefore, we can invoke `__impl__/1` on this module:
iex(1)> Enumerable.List.__impl__(:for)
List
iex(2)> Enumerable.List.__impl__(:protocol)
Enumerable
## Consolidation
In order to speed up protocol dispatching, whenever all protocol implementations
are known up-front, typically after all Elixir code in a project is compiled,
Elixir provides a feature called *protocol consolidation*. Consolidation directly
links protocols to their implementations in a way that invoking a function from a
consolidated protocol is equivalent to invoking two remote functions.
Protocol consolidation is applied by default to all Mix projects during compilation.
This may be an issue during test. For instance, if you want to implement a protocol
during test, the implementation will have no effect, as the protocol has already been
consolidated. One possible solution is to include compilation directories that are
specific to your test environment in your mix.exs:
def project do
...
elixirc_paths: elixirc_paths(Mix.env())
...
end
defp elixirc_paths(:test), do: ["lib", "test/support"]
defp elixirc_paths(_), do: ["lib"]
And then you can define the implementations specific to the test environment
inside `test/support/some_file.ex`.
Another approach is to disable protocol consolidation during tests in your
mix.exs:
def project do
...
consolidate_protocols: Mix.env() != :test
...
end
Although doing so is not recommended as it may affect your test suite
performance.
Finally, note all protocols are compiled with `debug_info` set to `true`,
regardless of the option set by the `elixirc` compiler. The debug info is
used for consolidation and it is removed after consolidation unless
globally set.
"""
@doc false
defmacro def(signature)
defmacro def({_, _, args}) when args == [] or is_atom(args) do
raise ArgumentError, "protocol functions expect at least one argument"
end
defmacro def({name, _, args}) when is_atom(name) and is_list(args) do
arity = length(args)
type_args = :lists.map(fn _ -> quote(do: term) end, :lists.seq(2, arity))
type_args = [quote(do: t) | type_args]
varify = fn pos -> Macro.var(String.to_atom("arg" <> Integer.to_string(pos)), __MODULE__) end
call_args = :lists.map(varify, :lists.seq(2, arity))
call_args = [quote(do: term) | call_args]
quote do
name = unquote(name)
arity = unquote(arity)
@functions [{name, arity} | @functions]
# Generate a fake definition with the user
# signature that will be used by docs
Kernel.def(unquote(name)(unquote_splicing(args)))
# Generate the actual implementation
Kernel.def unquote(name)(unquote_splicing(call_args)) do
impl_for!(term).unquote(name)(unquote_splicing(call_args))
end
# Copy spec as callback if possible,
# otherwise generate a dummy callback
Module.spec_to_callback(__MODULE__, {name, arity}) ||
@callback unquote(name)(unquote_splicing(type_args)) :: term
end
end
defmacro def(_) do
raise ArgumentError, "invalid arguments for def inside defprotocol"
end
@doc """
Checks if the given module is loaded and is protocol.
Returns `:ok` if so, otherwise raises `ArgumentError`.
"""
@spec assert_protocol!(module) :: :ok
def assert_protocol!(module) do
assert_protocol!(module, "")
end
defp assert_protocol!(module, extra) do
case Code.ensure_compiled(module) do
{:module, ^module} -> :ok
_ -> raise ArgumentError, "#{inspect(module)} is not available" <> extra
end
try do
module.__protocol__(:module)
rescue
UndefinedFunctionError ->
raise ArgumentError, "#{inspect(module)} is not a protocol" <> extra
end
:ok
end
@doc """
Checks if the given module is loaded and is an implementation
of the given protocol.
Returns `:ok` if so, otherwise raises `ArgumentError`.
"""
@spec assert_impl!(module, module) :: :ok
def assert_impl!(protocol, base) do
assert_impl!(protocol, base, "")
end
defp assert_impl!(protocol, base, extra) do
impl = Module.concat(protocol, base)
case Code.ensure_compiled(impl) do
{:module, ^impl} -> :ok
_ -> raise ArgumentError, "#{inspect(impl)} is not available" <> extra
end
try do
impl.__impl__(:protocol)
rescue
UndefinedFunctionError ->
raise ArgumentError, "#{inspect(impl)} is not an implementation of a protocol" <> extra
else
^protocol ->
:ok
other ->
raise ArgumentError,
"expected #{inspect(impl)} to be an implementation of #{inspect(protocol)}" <>
", got: #{inspect(other)}" <> extra
end
end
@doc """
Derives the `protocol` for `module` with the given options.
If your implementation passes options or if you are generating
custom code based on the struct, you will also need to implement
a macro defined as `__deriving__(module, struct, options)`
to get the options that were passed.
## Examples
defprotocol Derivable do
def ok(arg)
end
defimpl Derivable, for: Any do
defmacro __deriving__(module, struct, options) do
quote do
defimpl Derivable, for: unquote(module) do
def ok(arg) do
{:ok, arg, unquote(Macro.escape(struct)), unquote(options)}
end
end
end
end
def ok(arg) do
{:ok, arg}
end
end
defmodule ImplStruct do
@derive [Derivable]
defstruct a: 0, b: 0
end
Derivable.ok(%ImplStruct{})
{:ok, %ImplStruct{a: 0, b: 0}, %ImplStruct{a: 0, b: 0}, []}
Explicit derivations can now be called via `__deriving__`:
# Explicitly derived via `__deriving__`
Derivable.ok(%ImplStruct{a: 1, b: 1})
# Explicitly derived by API via `__deriving__`
require Protocol
Protocol.derive(Derivable, ImplStruct, :oops)
Derivable.ok(%ImplStruct{a: 1, b: 1})
"""
defmacro derive(protocol, module, options \\ []) do
quote do
Protocol.__derive__([{unquote(protocol), unquote(options)}], unquote(module), __ENV__)
end
end
## Consolidation
@doc """
Extracts all protocols from the given paths.
The paths can be either a charlist or a string. Internally
they are worked on as charlists, so passing them as lists
avoid extra conversion.
Does not load any of the protocols.
## Examples
# Get Elixir's ebin directory path and retrieve all protocols
iex> path = :code.lib_dir(:elixir, :ebin)
iex> mods = Protocol.extract_protocols([path])
iex> Enumerable in mods
true
"""
@spec extract_protocols([charlist | String.t()]) :: [atom]
def extract_protocols(paths) do
extract_matching_by_attribute(paths, 'Elixir.', fn module, attributes ->
case attributes[:__protocol__] do
[fallback_to_any: _] -> module
_ -> nil
end
end)
end
@doc """
Extracts all types implemented for the given protocol from
the given paths.
The paths can be either a charlist or a string. Internally
they are worked on as charlists, so passing them as lists
avoid extra conversion.
Does not load any of the implementations.
## Examples
# Get Elixir's ebin directory path and retrieve all protocols
iex> path = :code.lib_dir(:elixir, :ebin)
iex> mods = Protocol.extract_impls(Enumerable, [path])
iex> List in mods
true
"""
@spec extract_impls(module, [charlist | String.t()]) :: [atom]
def extract_impls(protocol, paths) when is_atom(protocol) do
prefix = Atom.to_charlist(protocol) ++ '.'
extract_matching_by_attribute(paths, prefix, fn _mod, attributes ->
case attributes[:__impl__] do
[protocol: ^protocol, for: for] -> for
_ -> nil
end
end)
end
defp extract_matching_by_attribute(paths, prefix, callback) do
for path <- paths,
path = to_charlist(path),
file <- list_dir(path),
mod = extract_from_file(path, file, prefix, callback),
do: mod
end
defp list_dir(path) when is_list(path) do
case :file.list_dir(path) do
{:ok, files} -> files
_ -> []
end
end
defp extract_from_file(path, file, prefix, callback) do
if :lists.prefix(prefix, file) and :filename.extension(file) == '.beam' do
extract_from_beam(:filename.join(path, file), callback)
end
end
defp extract_from_beam(file, callback) do
case :beam_lib.chunks(file, [:attributes]) do
{:ok, {module, [attributes: attributes]}} ->
callback.(module, attributes)
_ ->
nil
end
end
@doc """
Returns `true` if the protocol was consolidated.
"""
@spec consolidated?(module) :: boolean
def consolidated?(protocol) do
protocol.__protocol__(:consolidated?)
end
@doc """
Receives a protocol and a list of implementations and
consolidates the given protocol.
Consolidation happens by changing the protocol `impl_for`
in the abstract format to have fast lookup rules. Usually
the list of implementations to use during consolidation
are retrieved with the help of `extract_impls/2`.
It returns the updated version of the protocol bytecode.
If the first element of the tuple is `:ok`, it means
the protocol was consolidated.
A given bytecode or protocol implementation can be checked
to be consolidated or not by analyzing the protocol
attribute:
Protocol.consolidated?(Enumerable)
This function does not load the protocol at any point
nor loads the new bytecode for the compiled module.
However each implementation must be available and
it will be loaded.
"""
@spec consolidate(module, [module]) ::
{:ok, binary}
| {:error, :not_a_protocol}
| {:error, :no_beam_info}
def consolidate(protocol, types) when is_atom(protocol) do
with {:ok, ast_info, specs, compile_info} <- beam_protocol(protocol),
{:ok, definitions} <- change_debug_info(protocol, ast_info, types),
do: compile(definitions, specs, compile_info)
end
defp beam_protocol(protocol) do
chunk_ids = [:debug_info, 'Docs', 'ExCk']
opts = [:allow_missing_chunks]
case :beam_lib.chunks(beam_file(protocol), chunk_ids, opts) do
{:ok, {^protocol, [{:debug_info, debug_info} | chunks]}} ->
{:debug_info_v1, _backend, {:elixir_v1, info, specs}} = debug_info
%{attributes: attributes, definitions: definitions} = info
chunks = :lists.filter(fn {_name, value} -> value != :missing_chunk end, chunks)
chunks = :lists.map(fn {name, value} -> {List.to_string(name), value} end, chunks)
case attributes[:__protocol__] do
[fallback_to_any: any] ->
{:ok, {any, definitions}, specs, {info, chunks}}
_ ->
{:error, :not_a_protocol}
end
_ ->
{:error, :no_beam_info}
end
end
defp beam_file(module) when is_atom(module) do
case :code.which(module) do
[_ | _] = file -> file
_ -> module
end
end
# Change the debug information to the optimized
# impl_for/1 dispatch version.
defp change_debug_info(protocol, {any, definitions}, types) do
types = if any, do: types, else: List.delete(types, Any)
all = [Any] ++ for {_guard, mod} <- __built_in__(), do: mod
structs = types -- all
case List.keytake(definitions, {:__protocol__, 1}, 0) do
{protocol_def, definitions} ->
{impl_for, definitions} = List.keytake(definitions, {:impl_for, 1}, 0)
{struct_impl_for, definitions} = List.keytake(definitions, {:struct_impl_for, 1}, 0)
protocol_def = change_protocol(protocol_def, types)
impl_for = change_impl_for(impl_for, protocol, types)
struct_impl_for = change_struct_impl_for(struct_impl_for, protocol, types, structs)
{:ok, [protocol_def, impl_for, struct_impl_for] ++ definitions}
nil ->
{:error, :not_a_protocol}
end
end
defp change_protocol({_name, _kind, meta, clauses}, types) do
clauses =
Enum.map(clauses, fn
{meta, [:consolidated?], [], _} -> {meta, [:consolidated?], [], true}
{meta, [:impls], [], _} -> {meta, [:impls], [], {:consolidated, types}}
clause -> clause
end)
{{:__protocol__, 1}, :def, meta, clauses}
end
defp change_impl_for({_name, _kind, meta, _clauses}, protocol, types) do
fallback = if Any in types, do: load_impl(protocol, Any)
line = meta[:line]
clauses =
for {guard, mod} <- __built_in__(),
mod in types,
do: built_in_clause_for(mod, guard, protocol, meta, line)
struct_clause = struct_clause_for(meta, line)
fallback_clause = fallback_clause_for(fallback, protocol, meta)
clauses = [struct_clause] ++ clauses ++ [fallback_clause]
{{:impl_for, 1}, :def, meta, clauses}
end
defp change_struct_impl_for({_name, _kind, meta, _clauses}, protocol, types, structs) do
fallback = if Any in types, do: load_impl(protocol, Any)
clauses = for struct <- structs, do: each_struct_clause_for(struct, protocol, meta)
clauses = clauses ++ [fallback_clause_for(fallback, protocol, meta)]
{{:struct_impl_for, 1}, :defp, meta, clauses}
end
defp built_in_clause_for(mod, guard, protocol, meta, line) do
x = {:x, [line: line, version: -1], __MODULE__}
guard = quote(line: line, do: :erlang.unquote(guard)(unquote(x)))
body = load_impl(protocol, mod)
{meta, [x], [guard], body}
end
defp struct_clause_for(meta, line) do
x = {:x, [line: line, version: -1], __MODULE__}
head = quote(line: line, do: %{__struct__: unquote(x)})
guard = quote(line: line, do: :erlang.is_atom(unquote(x)))
body = quote(line: line, do: struct_impl_for(unquote(x)))
{meta, [head], [guard], body}
end
defp each_struct_clause_for(struct, protocol, meta) do
{meta, [struct], [], load_impl(protocol, struct)}
end
defp fallback_clause_for(value, _protocol, meta) do
{meta, [quote(do: _)], [], value}
end
defp load_impl(protocol, for) do
Module.concat(protocol, for).__impl__(:target)
end
# Finally compile the module and emit its bytecode.
defp compile(definitions, specs, {info, chunks}) do
info = %{info | definitions: definitions}
{:ok, :elixir_erl.consolidate(info, specs, chunks)}
end
## Definition callbacks
@doc false
def __protocol__(name, do: block) do
quote do
defmodule unquote(name) do
# We don't allow function definition inside protocols
import Kernel,
except: [
defmacrop: 1,
defmacrop: 2,
defmacro: 1,
defmacro: 2,
defp: 1,
defp: 2,
def: 1,
def: 2
]
# Import the new dsl that holds the new def
import Protocol, only: [def: 1]
# Compile with debug info for consolidation
@compile :debug_info
# Set up a clear slate to store defined functions
@functions []
@fallback_to_any false
# Invoke the user given block
_ = unquote(block)
# Finalize expansion
unquote(after_defprotocol())
end
end
end
defp after_defprotocol do
quote bind_quoted: [built_in: __built_in__()] do
any_impl_for =
if @fallback_to_any do
quote do: unquote(__MODULE__.Any).__impl__(:target)
else
nil
end
# Disable Dialyzer checks - before and after consolidation
# the types could be more strict
@dialyzer {:nowarn_function, __protocol__: 1, impl_for: 1, impl_for!: 1}
@doc false
@spec impl_for(term) :: atom | nil
Kernel.def(impl_for(data))
# Define the implementation for structs.
#
# It simply delegates to struct_impl_for which is then
# optimized during protocol consolidation.
Kernel.def impl_for(%struct{}) do
struct_impl_for(struct)
end
# Define the implementation for built-ins
:lists.foreach(
fn {guard, mod} ->
target = Module.concat(__MODULE__, mod)
Kernel.def impl_for(data) when :erlang.unquote(guard)(data) do
try do
unquote(target).__impl__(:target)
rescue
UndefinedFunctionError ->
unquote(any_impl_for)
end
end
end,
built_in
)
# Define a catch-all impl_for/1 clause to pacify Dialyzer (since
# destructuring opaque types is illegal, Dialyzer will think none of the
# previous clauses matches opaque types, and without this clause, will
# conclude that impl_for can't handle an opaque argument). This is a hack
# since it relies on Dialyzer not being smart enough to conclude that all
# opaque types will get the any_impl_for/0 implementation.
Kernel.def impl_for(_) do
unquote(any_impl_for)
end
@doc false
@spec impl_for!(term) :: atom
if any_impl_for do
Kernel.def impl_for!(data) do
impl_for(data)
end
else
Kernel.def impl_for!(data) do
impl_for(data) || raise(Protocol.UndefinedError, protocol: __MODULE__, value: data)
end
end
# Internal handler for Structs
Kernel.defp struct_impl_for(struct) do
target = Module.concat(__MODULE__, struct)
try do
target.__impl__(:target)
rescue
UndefinedFunctionError ->
unquote(any_impl_for)
end
end
# Inline struct implementation for performance
@compile {:inline, struct_impl_for: 1}
unless Module.defines_type?(__MODULE__, {:t, 0}) do
@type t :: term
end
# Store information as an attribute so it
# can be read without loading the module.
Module.register_attribute(__MODULE__, :__protocol__, persist: true)
@__protocol__ [fallback_to_any: !!@fallback_to_any]
@doc false
@spec __protocol__(:module) :: __MODULE__
@spec __protocol__(:functions) :: unquote(Protocol.__functions_spec__(@functions))
@spec __protocol__(:consolidated?) :: boolean
@spec __protocol__(:impls) :: :not_consolidated | {:consolidated, [module]}
Kernel.def(__protocol__(:module), do: __MODULE__)
Kernel.def(__protocol__(:functions), do: unquote(:lists.sort(@functions)))
Kernel.def(__protocol__(:consolidated?), do: false)
Kernel.def(__protocol__(:impls), do: :not_consolidated)
end
end
@doc false
def __functions_spec__([]), do: []
def __functions_spec__([head | tail]),
do: [:lists.foldl(&{:|, [], [&1, &2]}, head, tail), quote(do: ...)]
@doc false
def __impl__(protocol, opts) do
do_defimpl(protocol, :lists.keysort(1, opts))
end
defp do_defimpl(protocol, do: block, for: for) when is_list(for) do
for f <- for, do: do_defimpl(protocol, do: block, for: f)
end
defp do_defimpl(protocol, do: block, for: for) do
# Unquote the implementation just later
# when all variables will already be injected
# into the module body.
impl =
quote unquote: false do
@doc false
@spec __impl__(:for) :: unquote(for)
@spec __impl__(:target) :: __MODULE__
@spec __impl__(:protocol) :: unquote(protocol)
def __impl__(:for), do: unquote(for)
def __impl__(:target), do: __MODULE__
def __impl__(:protocol), do: unquote(protocol)
end
quote do
protocol = unquote(protocol)
for = unquote(for)
name = Module.concat(protocol, for)
Protocol.assert_protocol!(protocol)
Protocol.__ensure_defimpl__(protocol, for, __ENV__)
defmodule name do
@behaviour protocol
@protocol protocol
@for for
unquote(block)
Module.register_attribute(__MODULE__, :__impl__, persist: true)
@__impl__ [protocol: @protocol, for: @for]
unquote(impl)
end
end
end
@doc false
def __derive__(derives, for, %Macro.Env{} = env) when is_atom(for) do
struct = Macro.struct!(for, env)
foreach = fn
proto when is_atom(proto) ->
derive(proto, for, struct, [], env)
{proto, opts} when is_atom(proto) ->
derive(proto, for, struct, opts, env)
end
:lists.foreach(foreach, :lists.flatten(derives))
:ok
end
defp derive(protocol, for, struct, opts, env) do
extra = ", cannot derive #{inspect(protocol)} for #{inspect(for)}"
assert_protocol!(protocol, extra)
__ensure_defimpl__(protocol, for, env)
assert_impl!(protocol, Any, extra)
# Clean up variables from eval context
env = :elixir_env.reset_vars(env)
args = [for, struct, opts]
impl = Module.concat(protocol, Any)
:elixir_module.expand_callback(env.line, impl, :__deriving__, args, env, fn mod, fun, args ->
if function_exported?(mod, fun, length(args)) do
apply(mod, fun, args)
else
quoted =
quote do
Module.register_attribute(__MODULE__, :__impl__, persist: true)
@__impl__ [protocol: unquote(protocol), for: unquote(for)]
@doc false
@spec __impl__(:target) :: unquote(impl)
@spec __impl__(:protocol) :: unquote(protocol)
@spec __impl__(:for) :: unquote(for)
def __impl__(:target), do: unquote(impl)
def __impl__(:protocol), do: unquote(protocol)
def __impl__(:for), do: unquote(for)
end
Module.create(Module.concat(protocol, for), quoted, Macro.Env.location(env))
end
end)
end
@doc false
def __ensure_defimpl__(protocol, for, env) do
if Protocol.consolidated?(protocol) do
message =
"the #{inspect(protocol)} protocol has already been consolidated, an " <>
"implementation for #{inspect(for)} has no effect. If you want to " <>
"implement protocols after compilation or during tests, check the " <>
"\"Consolidation\" section in the Protocol module documentation"
IO.warn(message, Macro.Env.stacktrace(env))
end
:ok
end
## Helpers
@doc false
def __built_in__ do
[
is_tuple: Tuple,
is_atom: Atom,
is_list: List,
is_map: Map,
is_bitstring: BitString,
is_integer: Integer,
is_float: Float,
is_function: Function,
is_pid: PID,
is_port: Port,
is_reference: Reference
]
end
end
| 30.623418 | 97 | 0.651752 |
9e764eb50667ec1db4189479df0a2069db1136c3 | 637 | ex | Elixir | lib/randex/generator/base.ex | ananthakumaran/randex | 3e3783e00cba625c5354638248f82ae84e8c2941 | [
"MIT"
] | 10 | 2018-09-23T07:44:26.000Z | 2021-11-04T05:50:21.000Z | lib/randex/generator/base.ex | ananthakumaran/randex | 3e3783e00cba625c5354638248f82ae84e8c2941 | [
"MIT"
] | null | null | null | lib/randex/generator/base.ex | ananthakumaran/randex | 3e3783e00cba625c5354638248f82ae84e8c2941 | [
"MIT"
] | null | null | null | defmodule Randex.Generator.Base do
@moduledoc false
defmacro __using__(_opts) do
quote do
def repeat(amb, n, fun) do
if n == 0 do
amb
else
repeat(fun.(amb), n - 1, fun)
end
end
def string do
member_of(?\s..?~)
|> map(&List.to_string([&1]))
end
def integer(range) do
member_of(range)
end
def bind(amb, fun) do
bind_filter(amb, fn x ->
{:cont, fun.(x)}
end)
end
def map(amb, fun) do
bind(amb, fn x ->
constant(fun.(x))
end)
end
end
end
end
| 17.216216 | 39 | 0.472527 |
9e766c42eef9584bfdb724f4371badd71cfd000e | 1,595 | exs | Elixir | config/dev.exs | selfup/exdaas | 60b522397e0c1e3ea5b3520573c9aedb99dd76b9 | [
"MIT"
] | 8 | 2018-03-21T17:44:35.000Z | 2020-01-12T03:24:18.000Z | config/dev.exs | selfup/exdaas | 60b522397e0c1e3ea5b3520573c9aedb99dd76b9 | [
"MIT"
] | 5 | 2018-03-22T02:03:10.000Z | 2018-04-03T15:19:03.000Z | config/dev.exs | selfup/exdaas | 60b522397e0c1e3ea5b3520573c9aedb99dd76b9 | [
"MIT"
] | 1 | 2018-03-30T11:09:40.000Z | 2018-03-30T11:09:40.000Z | use Mix.Config
# For development, we disable any cache and enable
# debugging and code reloading.
#
# The watchers configuration can be used to run external
# watchers to your application. For example, we use it
# with brunch.io to recompile .js and .css sources.
config :exdaas, ExDaasWeb.Endpoint,
http: [port: 4000],
debug_errors: true,
code_reloader: true,
check_origin: false,
watchers: []
# ## SSL Support
#
# In order to use HTTPS in development, a self-signed
# certificate can be generated by running the following
# command from your terminal:
#
# openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.example.com" -keyout priv/server.key -out priv/server.pem
#
# The `http:` config above can be replaced with:
#
# https: [port: 4000, keyfile: "priv/server.key", certfile: "priv/server.pem"],
#
# If desired, both `http:` and `https:` keys can be
# configured to run both http and https servers on
# different ports.
# Watch static and templates for browser reloading.
config :exdaas, ExDaasWeb.Endpoint,
live_reload: [
patterns: [
~r{priv/static/.*(js|css|png|jpeg|jpg|gif|svg)$},
~r{priv/gettext/.*(po)$},
~r{lib/exdaas_web/views/.*(ex)$},
~r{lib/exdaas_web/templates/.*(eex)$}
]
]
# Do not include metadata nor timestamps in development logs
config :logger, :console, format: "[$level] $message\n"
# Set a higher stacktrace during development. Avoid configuring such
# in production as building large stacktraces may be expensive.
config :phoenix, :stacktrace_depth, 20
| 32.55102 | 170 | 0.710345 |
9e767c7ffc246aa709e397b0bf2be890118344e6 | 4,745 | ex | Elixir | deps/phoenix_html/lib/phoenix_html/engine.ex | bruguedes/nlw5-trilha-elixir | 4de5ecdce766c4371da80cd69ea08283838c66cd | [
"MIT"
] | 2 | 2020-05-06T14:51:56.000Z | 2020-05-06T14:52:21.000Z | deps/phoenix_html/lib/phoenix_html/engine.ex | rwtrecs/rocketseat-nlw5-inmana | 8ce8bc32e0bdd005c423394bb163945747b557e2 | [
"MIT"
] | 3 | 2021-06-20T14:51:14.000Z | 2021-06-25T00:56:11.000Z | deps/phoenix_html/lib/phoenix_html/engine.ex | rwtrecs/rocketseat-nlw5-inmana | 8ce8bc32e0bdd005c423394bb163945747b557e2 | [
"MIT"
] | 1 | 2020-05-16T22:44:14.000Z | 2020-05-16T22:44:14.000Z | defmodule Phoenix.HTML.Engine do
@moduledoc """
This is an implementation of EEx.Engine that guarantees
templates are HTML Safe.
The `encode_to_iodata!/1` function converts the rendered
template result into iodata.
"""
@behaviour EEx.Engine
@anno (if :erlang.system_info(:otp_release) >= '19' do
[generated: true]
else
[line: -1]
end)
@doc """
Encodes the HTML templates to iodata.
"""
def encode_to_iodata!({:safe, body}), do: body
def encode_to_iodata!(body) when is_binary(body), do: Plug.HTML.html_escape(body)
def encode_to_iodata!(other), do: Phoenix.HTML.Safe.to_iodata(other)
@doc false
def init(_opts) do
%{
iodata: [],
dynamic: [],
vars_count: 0
}
end
@doc false
def handle_begin(state) do
%{state | iodata: [], dynamic: []}
end
@doc false
def handle_end(quoted) do
handle_body(quoted)
end
@doc false
def handle_body(state) do
%{iodata: iodata, dynamic: dynamic} = state
safe = {:safe, Enum.reverse(iodata)}
{:__block__, [], Enum.reverse([safe | dynamic])}
end
@doc false
def handle_text(state, text) do
handle_text(state, [], text)
end
@doc false
def handle_text(state, _meta, text) do
%{iodata: iodata} = state
%{state | iodata: [text | iodata]}
end
@doc false
def handle_expr(state, "=", ast) do
ast = traverse(ast)
%{iodata: iodata, dynamic: dynamic, vars_count: vars_count} = state
var = Macro.var(:"arg#{vars_count}", __MODULE__)
ast = quote do: unquote(var) = unquote(to_safe(ast))
%{state | dynamic: [ast | dynamic], iodata: [var | iodata], vars_count: vars_count + 1}
end
def handle_expr(state, "", ast) do
ast = traverse(ast)
%{dynamic: dynamic} = state
%{state | dynamic: [ast | dynamic]}
end
def handle_expr(state, marker, ast) do
EEx.Engine.handle_expr(state, marker, ast)
end
## Safe conversion
defp to_safe(ast) do
to_safe(ast, line_from_expr(ast))
end
defp line_from_expr({_, meta, _}) when is_list(meta), do: Keyword.get(meta, :line, 0)
defp line_from_expr(_), do: 0
# We can do the work at compile time
defp to_safe(literal, _line) when is_binary(literal) or is_atom(literal) or is_number(literal) do
Phoenix.HTML.Safe.to_iodata(literal)
end
# We can do the work at runtime
defp to_safe(literal, line) when is_list(literal) do
quote line: line, do: Phoenix.HTML.Safe.List.to_iodata(unquote(literal))
end
# We need to check at runtime and we do so by optimizing common cases.
defp to_safe(expr, line) do
# Keep stacktraces for protocol dispatch and coverage
safe_return = quote line: line, do: data
bin_return = quote line: line, do: Plug.HTML.html_escape_to_iodata(bin)
other_return = quote line: line, do: Phoenix.HTML.Safe.to_iodata(other)
# However ignore them for the generated clauses to avoid warnings
quote @anno do
case unquote(expr) do
{:safe, data} -> unquote(safe_return)
bin when is_binary(bin) -> unquote(bin_return)
other -> unquote(other_return)
end
end
end
## Traversal
defp traverse(expr) do
Macro.prewalk(expr, &handle_assign/1)
end
defp handle_assign({:@, meta, [{name, _, atom}]}) when is_atom(name) and is_atom(atom) do
quote line: meta[:line] || 0 do
Phoenix.HTML.Engine.fetch_assign!(var!(assigns), unquote(name))
end
end
defp handle_assign(arg), do: arg
@doc false
def fetch_assign!(assigns, key) do
case Access.fetch(assigns, key) do
{:ok, val} ->
val
:error ->
deprecated_fallback(key, assigns) ||
raise ArgumentError, """
assign @#{key} not available in eex template.
Please make sure all proper assigns have been set. If this
is a child template, ensure assigns are given explicitly by
the parent template as they are not automatically forwarded.
Available assigns: #{inspect(Enum.map(assigns, &elem(&1, 0)))}
"""
end
end
defp deprecated_fallback(:view_module, %{conn: %{private: %{phoenix_view: view_module}}}) do
IO.warn """
using @view_module is deprecated, please use view_module(conn) instead.
If using render(@view_module, @view_template, assigns), replace it by @inner_content.
"""
view_module
end
defp deprecated_fallback(:view_template, %{conn: %{private: %{phoenix_template: view_template}}}) do
IO.warn """
using @view_template is deprecated, please use view_template(conn) instead.
If using render(@view_module, @view_template, assigns), replace it by @inner_content.
"""
view_template
end
defp deprecated_fallback(_, _), do: nil
end
| 27.427746 | 102 | 0.658799 |
9e76ba688b09c4ce640180a3c37bdcfa9785147e | 291 | ex | Elixir | lib/arrow_web/controllers/health_controller.ex | paulswartz/arrow | c1ba1ce52107c0ed94ce9bca2fef2bfeb606b8f9 | [
"MIT"
] | null | null | null | lib/arrow_web/controllers/health_controller.ex | paulswartz/arrow | c1ba1ce52107c0ed94ce9bca2fef2bfeb606b8f9 | [
"MIT"
] | 775 | 2019-11-18T16:23:57.000Z | 2022-03-28T18:20:04.000Z | lib/arrow_web/controllers/health_controller.ex | paulswartz/arrow | c1ba1ce52107c0ed94ce9bca2fef2bfeb606b8f9 | [
"MIT"
] | 1 | 2019-12-23T13:52:25.000Z | 2019-12-23T13:52:25.000Z | defmodule ArrowWeb.HealthController do
@moduledoc """
Simple controller to return 200 OK when website is running. This
is used by the AWS ALB to determine the health of the target.
"""
use ArrowWeb, :controller
def index(conn, _params) do
send_resp(conn, 200, "")
end
end
| 24.25 | 66 | 0.718213 |
9e76d2a0c06a371780f2d02d05ea45a375084552 | 238 | exs | Elixir | priv/repo/migrations/20210921020101_relationship_post_comments.exs | AkioCode/elxpro-blog | 236984915851b91058e091414deb70c5e8fed72a | [
"MIT"
] | null | null | null | priv/repo/migrations/20210921020101_relationship_post_comments.exs | AkioCode/elxpro-blog | 236984915851b91058e091414deb70c5e8fed72a | [
"MIT"
] | 4 | 2021-08-11T03:19:33.000Z | 2021-09-26T01:29:58.000Z | priv/repo/migrations/20210921020101_relationship_post_comments.exs | AkioCode/elxpro-blog | 236984915851b91058e091414deb70c5e8fed72a | [
"MIT"
] | null | null | null | defmodule ElxproBlog.Repo.Migrations.RelationshipPostComments do
use Ecto.Migration
def change do
alter table(:comments) do
add :post_id, references(:posts, on_delete: :delete_all, on_update: :update_all)
end
end
end
| 23.8 | 86 | 0.747899 |
9e76d57191b1c5321e85075ef69569d3e6f2c809 | 2,052 | exs | Elixir | test/hedwig_flowdock/streaming_connection_test.exs | massive/hedwig_flowdock | 36b0e880ae182b6335cc01ddcf39d0c906b05000 | [
"MIT"
] | null | null | null | test/hedwig_flowdock/streaming_connection_test.exs | massive/hedwig_flowdock | 36b0e880ae182b6335cc01ddcf39d0c906b05000 | [
"MIT"
] | null | null | null | test/hedwig_flowdock/streaming_connection_test.exs | massive/hedwig_flowdock | 36b0e880ae182b6335cc01ddcf39d0c906b05000 | [
"MIT"
] | null | null | null | defmodule Hedwig.Adapters.Flowdock.StreamingConnectionTest do
import Hedwig.Adapters.Flowdock.StreamingConnection
use ExUnit.Case
doctest Hedwig.Adapters.Flowdock.StreamingConnection
@sample_flow_response %{
"access_mode" => "invitation",
"api_token" => "1f3f5aad4e9bb66319ab0554ee660dc0",
"description" => "Discussion amongst distributed nutjobs",
"email" => "[email protected]",
"flow_admin" => true,
"id" => "3d14e900-977d-49af-a7ec-64aaf142bba6",
"joined" => true,
"last_message_at" => "2016-06-01T05:05:21.471Z",
"last_message_id" => 494281,
"name" => "pwd/whoami",
"open" => true,
"organization" => %{
"active" => true,
"flow_admins" => false,
"id" => 44373,
"name" => "pwd && whoami",
"parameterized_name" => "pwd-whoami",
"url" => "https://api.flowdock.com/organizations/pwd-whoami",
"user_count" => 5,
"user_limit" => 5
},
"parameterized_name" => "pwd-whoami",
"team_notifications" => true,
"url" => "https://api.flowdock.com/flows/pwd-whoami/pwd-whoami",
"web_url" => "https://www.flowdock.com/app/pwd-whoami/pwd-whoami"
}
test "paramaterize_flows" do
assert parameterize_flows([@sample_flow_response]) == "pwd-whoami/pwd-whoami"
assert parameterize_flows([@sample_flow_response, @sample_flow_response]) == "pwd-whoami/pwd-whoami,pwd-whoami/pwd-whoami"
end
end
| 51.3 | 126 | 0.460039 |
9e76d643792de326852ff8ab12fc88134aa2fcd5 | 6,944 | exs | Elixir | test/tesla/multipart_test.exs | zacck/tesla | 3c669cfd28296bbc7b168c174eccc13087d8f9a4 | [
"MIT"
] | 1 | 2020-12-21T03:45:23.000Z | 2020-12-21T03:45:23.000Z | test/tesla/multipart_test.exs | zacck/tesla | 3c669cfd28296bbc7b168c174eccc13087d8f9a4 | [
"MIT"
] | null | null | null | test/tesla/multipart_test.exs | zacck/tesla | 3c669cfd28296bbc7b168c174eccc13087d8f9a4 | [
"MIT"
] | null | null | null | defmodule Tesla.MultipartTest do
use ExUnit.Case
alias Tesla.Multipart
test "headers" do
mp = Multipart.new()
headers = Multipart.headers(mp)
assert headers == [{"content-type", "multipart/form-data; boundary=#{mp.boundary}"}]
end
test "add content-type param" do
mp =
Multipart.new()
|> Multipart.add_content_type_param("charset=utf-8")
headers = Multipart.headers(mp)
assert headers == [
{"content-type", "multipart/form-data; boundary=#{mp.boundary}; charset=utf-8"}
]
end
test "add content-type params" do
mp =
Multipart.new()
|> Multipart.add_content_type_param("charset=utf-8")
|> Multipart.add_content_type_param("foo=bar")
headers = Multipart.headers(mp)
assert headers == [
{"content-type",
"multipart/form-data; boundary=#{mp.boundary}; charset=utf-8; foo=bar"}
]
end
test "add_field" do
mp =
Multipart.new()
|> Multipart.add_field("foo", "bar")
body = Multipart.body(mp) |> Enum.join()
assert body == """
--#{mp.boundary}\r
content-disposition: form-data; name="foo"\r
\r
bar\r
--#{mp.boundary}--\r
"""
end
test "add_field with extra headers" do
mp =
Multipart.new()
|> Multipart.add_field(
"foo",
"bar",
headers: [{"content-id", "1"}, {"content-type", "text/plain"}]
)
body = Multipart.body(mp) |> Enum.join()
assert body == """
--#{mp.boundary}\r
content-id: 1\r
content-type: text/plain\r
content-disposition: form-data; name="foo"\r
\r
bar\r
--#{mp.boundary}--\r
"""
end
test "add_file (filename only)" do
mp =
Multipart.new()
|> Multipart.add_file("test/tesla/multipart_test_file.sh")
body = Multipart.body(mp) |> Enum.join()
assert body == """
--#{mp.boundary}\r
content-disposition: form-data; name="file"; filename="multipart_test_file.sh"\r
\r
#!/usr/bin/env bash
echo "test multipart file"
\r
--#{mp.boundary}--\r
"""
end
test "add_file (filename with name)" do
mp =
Multipart.new()
|> Multipart.add_file("test/tesla/multipart_test_file.sh", name: "foobar")
body = Multipart.body(mp) |> Enum.join()
assert body == """
--#{mp.boundary}\r
content-disposition: form-data; name="foobar"; filename="multipart_test_file.sh"\r
\r
#!/usr/bin/env bash
echo "test multipart file"
\r
--#{mp.boundary}--\r
"""
end
test "add_file (custom filename)" do
mp =
Multipart.new()
|> Multipart.add_file("test/tesla/multipart_test_file.sh", filename: "custom.png")
body = Multipart.body(mp) |> Enum.join()
assert body == """
--#{mp.boundary}\r
content-disposition: form-data; name="file"; filename="custom.png"\r
\r
#!/usr/bin/env bash
echo "test multipart file"
\r
--#{mp.boundary}--\r
"""
end
test "add_file (filename with name, extra headers)" do
mp =
Multipart.new()
|> Multipart.add_file(
"test/tesla/multipart_test_file.sh",
name: "foobar",
headers: [{"content-id", "1"}, {"content-type", "text/plain"}]
)
body = Multipart.body(mp) |> Enum.join()
assert body == """
--#{mp.boundary}\r
content-id: 1\r
content-type: text/plain\r
content-disposition: form-data; name="foobar"; filename="multipart_test_file.sh"\r
\r
#!/usr/bin/env bash
echo "test multipart file"
\r
--#{mp.boundary}--\r
"""
end
test "add_file (detect content type)" do
mp =
Multipart.new()
|> Multipart.add_file("test/tesla/multipart_test_file.sh", detect_content_type: true)
body = Multipart.body(mp) |> Enum.join()
assert body == """
--#{mp.boundary}\r
content-type: application/x-sh\r
content-disposition: form-data; name="file"; filename="multipart_test_file.sh"\r
\r
#!/usr/bin/env bash
echo "test multipart file"
\r
--#{mp.boundary}--\r
"""
end
test "add_file (detect content type overrides given header)" do
mp =
Multipart.new()
|> Multipart.add_file(
"test/tesla/multipart_test_file.sh",
detect_content_type: true,
headers: [{"content-type", "foo/bar"}]
)
body = Multipart.body(mp) |> Enum.join()
assert body == """
--#{mp.boundary}\r
content-type: application/x-sh\r
content-disposition: form-data; name="file"; filename="multipart_test_file.sh"\r
\r
#!/usr/bin/env bash
echo "test multipart file"
\r
--#{mp.boundary}--\r
"""
end
test "add_file_content" do
mp =
Multipart.new()
|> Multipart.add_file_content("file-data", "data.gif")
body = Multipart.body(mp) |> Enum.join()
assert body == """
--#{mp.boundary}\r
content-disposition: form-data; name="file"; filename="data.gif"\r
\r
file-data\r
--#{mp.boundary}--\r
"""
end
test "add non-existing file" do
mp =
Multipart.new()
|> Multipart.add_file("i-do-not-exists.txt")
assert_raise File.Error, fn ->
mp |> Multipart.body() |> Enum.to_list()
end
end
describe "add_field" do
test "numbers raise argument error" do
assert_raise ArgumentError, fn ->
Multipart.new()
|> Multipart.add_field("foo", 123)
end
assert_raise ArgumentError, fn ->
Multipart.new()
|> Multipart.add_field("bar", 123.00)
end
end
test "maps raise argument error" do
assert_raise ArgumentError, fn ->
Multipart.new()
|> Multipart.add_field("foo", %{hello: :world})
end
end
test "Iodata" do
mp =
Multipart.new()
|> Multipart.add_field("foo", ["bar", "baz"])
body = Multipart.body(mp) |> Enum.join()
assert body == """
--#{mp.boundary}\r
content-disposition: form-data; name="foo"\r
\r
barbaz\r
--#{mp.boundary}--\r
"""
end
test "IO.Stream" do
mp =
Multipart.new()
|> Multipart.add_field("foo", %IO.Stream{})
assert is_function(Multipart.body(mp))
end
test "File.Stream" do
mp =
Multipart.new()
|> Multipart.add_field("foo", %File.Stream{})
assert is_function(Multipart.body(mp))
end
end
end
| 25.068592 | 93 | 0.532978 |
9e76d7cff822ba78a7733f384095c007d7af5389 | 6,158 | ex | Elixir | lib/bolt_sips/internals/pack_stream/decoder_v1.ex | cheerfulstoic/bolt_sips | e86d6443f69d59f6cc41ecae5d0718ed05ea4904 | [
"Apache-2.0"
] | null | null | null | lib/bolt_sips/internals/pack_stream/decoder_v1.ex | cheerfulstoic/bolt_sips | e86d6443f69d59f6cc41ecae5d0718ed05ea4904 | [
"Apache-2.0"
] | null | null | null | lib/bolt_sips/internals/pack_stream/decoder_v1.ex | cheerfulstoic/bolt_sips | e86d6443f69d59f6cc41ecae5d0718ed05ea4904 | [
"Apache-2.0"
] | null | null | null | defmodule Bolt.Sips.Internals.PackStream.DecoderV1 do
@moduledoc false
_moduledoc = """
Bolt V1 can decode:
- Null
- Boolean
- Integer
- Float
- String
- List
- Map
- Struct
Functions from this module are not meant to be used directly.
Use `Decoder.decode(data, bolt_version)` for all decoding purposes.
"""
use Bolt.Sips.Internals.PackStream.Markers
alias Bolt.Sips.Internals.PackStream.Decoder
alias Bolt.Sips.Types
@spec decode(binary() | {integer(), binary(), integer()}, integer()) ::
list() | {:error, :not_implemented}
def decode(<<@null_marker, rest::binary>>, bolt_version) do
[nil | Decoder.decode(rest, bolt_version)]
end
# Boolean
def decode(<<@true_marker, rest::binary>>, bolt_version) do
[true | Decoder.decode(rest, bolt_version)]
end
def decode(<<@false_marker, rest::binary>>, bolt_version) do
[false | Decoder.decode(rest, bolt_version)]
end
# Float
def decode(<<@float_marker, number::float, rest::binary>>, bolt_version) do
[number | Decoder.decode(rest, bolt_version)]
end
# Strings
def decode(<<@tiny_bitstring_marker::4, str_length::4, rest::bytes>>, bolt_version) do
decode_string(rest, str_length, bolt_version)
end
def decode(<<@bitstring8_marker, str_length, rest::bytes>>, bolt_version) do
decode_string(rest, str_length, bolt_version)
end
def decode(<<@bitstring16_marker, str_length::16, rest::bytes>>, bolt_version) do
decode_string(rest, str_length, bolt_version)
end
def decode(<<@bitstring32_marker, str_length::32, rest::binary>>, bolt_version) do
decode_string(rest, str_length, bolt_version)
end
# Lists
def decode(<<@tiny_list_marker::4, list_size::4>> <> bin, bolt_version) do
decode_list(bin, list_size, bolt_version)
end
def decode(<<@list8_marker, list_size::8>> <> bin, bolt_version) do
decode_list(bin, list_size, bolt_version)
end
def decode(<<@list16_marker, list_size::16>> <> bin, bolt_version) do
decode_list(bin, list_size, bolt_version)
end
def decode(<<@list32_marker, list_size::32>> <> bin, bolt_version) do
decode_list(bin, list_size, bolt_version)
end
# Maps
def decode(<<@tiny_map_marker::4, entries::4>> <> bin, bolt_version) do
decode_map(bin, entries, bolt_version)
end
def decode(<<@map8_marker, entries::8>> <> bin, bolt_version) do
decode_map(bin, entries, bolt_version)
end
def decode(<<@map16_marker, entries::16>> <> bin, bolt_version) do
decode_map(bin, entries, bolt_version)
end
def decode(<<@map32_marker, entries::32>> <> bin, bolt_version) do
decode_map(bin, entries, bolt_version)
end
# Struct
def decode(<<@tiny_struct_marker::4, struct_size::4, sig::8>> <> struct, bolt_version) do
Decoder.decode({sig, struct, struct_size}, bolt_version)
end
def decode(<<@struct8_marker, struct_size::8, sig::8>> <> struct, bolt_version) do
Decoder.decode({sig, struct, struct_size}, bolt_version)
end
def decode(<<@struct16_marker, struct_size::16, sig::8>> <> struct, bolt_version) do
Decoder.decode({sig, struct, struct_size}, bolt_version)
end
######### SPECIAL STRUCTS
# Node
def decode({@node_marker, struct, struct_size}, bolt_version) do
{[id, labels, props], rest} = Decoder.decode_struct(struct, struct_size, bolt_version)
node = %Types.Node{id: id, labels: labels, properties: props}
[node | rest]
end
# Relationship
def decode({@relationship_marker, struct, struct_size}, bolt_version) do
{[id, start_node, end_node, type, props], rest} =
Decoder.decode_struct(struct, struct_size, bolt_version)
relationship = %Types.Relationship{
id: id,
start: start_node,
end: end_node,
type: type,
properties: props
}
[relationship | rest]
end
# UnboundedRelationship
def decode({@unbounded_relationship_marker, struct, struct_size}, bolt_version) do
{[id, type, props], rest} = Decoder.decode_struct(struct, struct_size, bolt_version)
unbounded_relationship = %Types.UnboundRelationship{
id: id,
type: type,
properties: props
}
[unbounded_relationship | rest]
end
# Path
def decode({@path_marker, struct, struct_size}, bolt_version) do
{[nodes, relationships, sequence], rest} =
Decoder.decode_struct(struct, struct_size, bolt_version)
path = %Types.Path{
nodes: nodes,
relationships: relationships,
sequence: sequence
}
[path | rest]
end
# Manage the end of data
def decode("", _), do: []
# Integers
def decode(<<@int8_marker, int::signed-integer, rest::binary>>, bolt_version) do
[int | Decoder.decode(rest, bolt_version)]
end
def decode(<<@int16_marker, int::signed-integer-16, rest::binary>>, bolt_version) do
[int | Decoder.decode(rest, bolt_version)]
end
def decode(<<@int32_marker, int::signed-integer-32, rest::binary>>, bolt_version) do
[int | Decoder.decode(rest, bolt_version)]
end
def decode(<<@int64_marker, int::signed-integer-64, rest::binary>>, bolt_version) do
[int | Decoder.decode(rest, bolt_version)]
end
def decode(<<int::signed-integer, rest::binary>>, bolt_version) do
[int | Decoder.decode(rest, bolt_version)]
end
def decode(_, _) do
{:error, :not_implemented}
end
@spec decode_string(binary(), integer(), integer()) :: list()
defp decode_string(bytes, str_length, bolt_version) do
<<string::binary-size(str_length), rest::binary>> = bytes
[string | Decoder.decode(rest, bolt_version)]
end
@spec decode_list(binary(), integer(), integer()) :: list()
defp decode_list(list, list_size, bolt_version) do
{list, rest} = list |> Decoder.decode(bolt_version) |> Enum.split(list_size)
[list | rest]
end
@spec decode_map(binary(), integer(), integer()) :: list()
defp decode_map(map, entries, bolt_version) do
{map, rest} = map |> Decoder.decode(bolt_version) |> Enum.split(entries * 2)
[to_map(map) | rest]
end
@spec to_map(list()) :: map()
defp to_map(map) do
map
|> Enum.chunk_every(2)
|> Enum.map(&List.to_tuple/1)
|> Map.new()
end
end
| 28.509259 | 91 | 0.679766 |
9e76dafd78ad0485b5afec272effda3cc0055b21 | 1,632 | ex | Elixir | apps/local_ledger/lib/local_ledger/entry.ex | vanmil/ewallet | 6c1aca95a83e0a9d93007670a40d8c45764a8122 | [
"Apache-2.0"
] | null | null | null | apps/local_ledger/lib/local_ledger/entry.ex | vanmil/ewallet | 6c1aca95a83e0a9d93007670a40d8c45764a8122 | [
"Apache-2.0"
] | null | null | null | apps/local_ledger/lib/local_ledger/entry.ex | vanmil/ewallet | 6c1aca95a83e0a9d93007670a40d8c45764a8122 | [
"Apache-2.0"
] | null | null | null | defmodule LocalLedger.Entry do
@moduledoc """
This module is responsible for preparing and formatting the entries
before they are passed to a transaction to be inserted in the database.
"""
alias LocalLedgerDB.{Wallet, Token, Entry}
@doc """
Get or insert the given token and all the given addresses before
building a map representation usable by the LocalLedgerDB schemas.
"""
def build_all(entries) do
Enum.map(entries, fn attrs ->
{:ok, token} = Token.get_or_insert(attrs["token"])
{:ok, wallet} = Wallet.get_or_insert(attrs)
%{
type: attrs["type"],
amount: attrs["amount"],
token_id: token.id,
wallet_address: wallet.address
}
end)
end
@doc """
Extract the list of DEBIT addresses.
"""
def get_addresses(entries) do
entries
|> Enum.filter(fn entry ->
entry[:type] == Entry.debit_type()
end)
|> Enum.map(fn entry -> entry[:wallet_address] end)
end
@doc """
Match when genesis is set to true and does... nothing.
"""
def check_balance(_, %{genesis: true}) do
:ok
end
@doc """
Match when genesis is false and run the wallet check.
"""
def check_balance(entries, %{genesis: _}) do
check_balance(entries)
end
@doc """
Check the current wallet amount for each DEBIT entry.
"""
def check_balance(entries) do
Enum.each(entries, fn entry ->
if entry[:type] == Entry.debit_type() do
Entry.check_balance(%{
amount: entry[:amount],
token_id: entry[:token_id],
address: entry[:wallet_address]
})
end
end)
end
end
| 24.727273 | 73 | 0.63174 |
9e76f530c13e82be12feab349455d09929aa109c | 1,099 | ex | Elixir | test/support/channel_case.ex | itsemilano/erlixir | 39fdcb86a9ccd55058682b3263d40efb9cbad11f | [
"MIT"
] | null | null | null | test/support/channel_case.ex | itsemilano/erlixir | 39fdcb86a9ccd55058682b3263d40efb9cbad11f | [
"MIT"
] | null | null | null | test/support/channel_case.ex | itsemilano/erlixir | 39fdcb86a9ccd55058682b3263d40efb9cbad11f | [
"MIT"
] | null | null | null | defmodule ErlixirWeb.ChannelCase do
@moduledoc """
This module defines the test case to be used by
channel tests.
Such tests rely on `Phoenix.ChannelTest` and also
import other functionality to make it easier
to build common data structures and query the data layer.
Finally, if the test case interacts with the database,
we enable the SQL sandbox, so changes done to the database
are reverted at the end of every test. If you are using
PostgreSQL, you can even run database tests asynchronously
by setting `use ErlixirWeb.ChannelCase, async: true`, although
this option is not recommended for other databases.
"""
use ExUnit.CaseTemplate
using do
quote do
# Import conveniences for testing with channels
import Phoenix.ChannelTest
import ErlixirWeb.ChannelCase
# The default endpoint for testing
@endpoint ErlixirWeb.Endpoint
end
end
setup tags do
pid = Ecto.Adapters.SQL.Sandbox.start_owner!(Erlixir.Repo, shared: not tags[:async])
on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end)
:ok
end
end
| 29.702703 | 88 | 0.737034 |
9e770d532797f135a0ff03cc19d6715d13812464 | 124 | ex | Elixir | lib/google_fit/activity_type/skating.ex | tsubery/google_fit | 7578b832c560b3b4a78059ac86af6e111812712e | [
"Apache-2.0"
] | 2 | 2017-02-01T13:51:26.000Z | 2019-04-12T11:37:25.000Z | lib/google_fit/activity_type/skating.ex | tsubery/google_fit | 7578b832c560b3b4a78059ac86af6e111812712e | [
"Apache-2.0"
] | null | null | null | lib/google_fit/activity_type/skating.ex | tsubery/google_fit | 7578b832c560b3b4a78059ac86af6e111812712e | [
"Apache-2.0"
] | null | null | null | defmodule GoogleFit.ActivityType.Skating do
@moduledoc false
def code, do: GoogleFit.ActivityType.code(__MODULE__)
end
| 20.666667 | 55 | 0.806452 |
9e7715db385f6e93ce0039bd662420c8f7ff880e | 5,550 | ex | Elixir | lib/plug_best.ex | remiprev/plug_best | f3d5ee080128b88f5c2051c11e9ad6d6683206a4 | [
"MIT"
] | 9 | 2016-05-25T13:44:35.000Z | 2019-05-04T11:12:10.000Z | lib/plug_best.ex | remi/plug_best | f3d5ee080128b88f5c2051c11e9ad6d6683206a4 | [
"MIT"
] | 5 | 2016-05-26T13:32:14.000Z | 2016-06-06T12:15:03.000Z | lib/plug_best.ex | remiprev/plug_best | f3d5ee080128b88f5c2051c11e9ad6d6683206a4 | [
"MIT"
] | null | null | null | defmodule PlugBest do
@moduledoc """
A library that parses HTTP `Accept-*` headers and returns the best match based
on a list of values.
## Examples
```elixir
iex> conn = %Plug.Conn{req_headers: [{"accept-language", "fr-CA,fr;q=0.8,en;q=0.6,en-US;q=0.4"}]}
iex> conn |> PlugBest.best_language(["en", "fr"])
{"fr-CA", "fr", 1.0}
iex> conn = %Plug.Conn{req_headers: [{"accept-language", "es"}]}
iex> conn |> PlugBest.best_language(["fr", "ru"])
nil
iex> conn = %Plug.Conn{req_headers: [{"accept-language", "es"}]}
iex> conn |> PlugBest.best_language_or_first(["ru", "fr"])
{"ru", "ru", 0.0}
```
"""
# Aliases
alias Plug.Conn
# Types
@type value :: {String.t(), String.t(), float}
@doc """
Returns the best supported langage based on the connection `Accept-Language`
HTTP header. Returns `nil` if none is found.
"""
@spec best_language(%Conn{}, [String.t()]) :: value | nil
def best_language(conn = %Conn{}, supported_values), do: best_value(conn, "accept-language", supported_values)
@doc """
Returns the best supported langage based on the connection `Accept-Language`
HTTP header. Returns the first supported language if none is found.
"""
@spec best_language_or_first(%Conn{}, [String.t()]) :: value | nil
def best_language_or_first(conn = %Conn{}, supported_values), do: best_value_or_first(conn, "accept-language", supported_values)
@doc """
Returns the best supported charset based on the connection `Accept-Charset`
HTTP header. Returns `nil` if none is found.
"""
@spec best_charset(%Conn{}, [String.t()]) :: value | nil
def best_charset(conn = %Conn{}, supported_values), do: best_value(conn, "accept-charset", supported_values)
@doc """
Returns the best supported charset based on the connection `Accept-Charset`
HTTP header. Returns the first supported charset if none is found.
"""
@spec best_charset_or_first(%Conn{}, [String.t()]) :: value | nil
def best_charset_or_first(conn = %Conn{}, supported_values), do: best_value_or_first(conn, "accept-charset", supported_values)
@doc """
Returns the best supported encoding based on the connection `Accept-Encoding`
HTTP header. Returns `nil` if none is found.
"""
@spec best_encoding(%Conn{}, [String.t()]) :: value | nil
def best_encoding(conn = %Conn{}, supported_values), do: best_value(conn, "accept-encoding", supported_values)
@doc """
Returns the best supported encoding based on the connection `Accept-Encoding`
HTTP header. Returns the first supported encoding if none is found.
"""
@spec best_encoding_or_first(%Conn{}, [String.t()]) :: value | nil
def best_encoding_or_first(conn = %Conn{}, supported_values), do: best_value_or_first(conn, "accept-encoding", supported_values)
@doc """
Returns the best supported type based on the connection `Accept`
HTTP header. Returns `nil` if none is found.
"""
@spec best_type(%Conn{}, [String.t()]) :: value | nil
def best_type(conn = %Conn{}, supported_values), do: best_value(conn, "accept", supported_values)
@doc """
Returns the best supported type based on the connection `Accept`
HTTP header. Returns the first supported type if none is found.
"""
@spec best_type_or_first(%Conn{}, [String.t()]) :: value | nil
def best_type_or_first(conn = %Conn{}, supported_values), do: best_value_or_first(conn, "accept", supported_values)
@spec best_value(%Conn{}, String.t(), [String.t()]) :: value | nil
defp best_value(conn = %Conn{}, header, supported_values) do
# Fetch the raw header content
conn
|> fetch_header_value(header)
# Convert it to a list
|> String.split(",")
|> Enum.map(&parse_header_item/1)
# Only keep values that we support
|> Enum.filter(&filter_header_value_item(&1, supported_values))
# Sort the parsed header with each score
|> Enum.sort(&sort_header_value_items/2)
# Return the first (best!) item
|> List.first()
end
@spec best_value_or_first(%Conn{}, String.t(), [String.t()]) :: value
defp best_value_or_first(conn = %Conn{}, header, supported_values) do
conn |> best_value(header, supported_values) || default_supported_value(supported_values)
end
@spec default_supported_value([String.t()]) :: value
defp default_supported_value(supported_values) do
[default_value | _] = supported_values
{default_value, default_value, 0.0}
end
@spec fetch_header_value(%Conn{}, String.t()) :: String.t()
defp fetch_header_value(conn, header_name) do
header_value =
conn
|> Conn.get_req_header(header_name)
|> List.first()
header_value || ""
end
@spec parse_header_item(String.t()) :: value
defp parse_header_item(item) do
[value, score] =
case String.split(item, ";") do
[value] -> [value, 1.0]
[value, "q=" <> score] -> [value, parse_score(score)]
end
# Extract base value by removing its suffix
base_value = value |> String.replace(~r/-.+$/, "")
{value, base_value, score}
end
@spec sort_header_value_items(value, value) :: boolean
defp sort_header_value_items({_, _, first_score}, {_, _, second_score}) do
first_score > second_score
end
@spec filter_header_value_item(value, [String.t()]) :: boolean
defp filter_header_value_item({_, base_value, _}, supported_values) do
Enum.member?(supported_values, base_value)
end
@spec parse_score(String.t()) :: float
defp parse_score(score) do
case Float.parse(score) do
{score, _} -> score
:error -> 0.0
end
end
end
| 34.90566 | 130 | 0.68036 |
9e771694f0f03cd528411b222a8fbf13b0b041b1 | 119,394 | ex | Elixir | lib/ecto/changeset.ex | matteing/ecto | beabc36931222ac574bb0047fef90d84cb138459 | [
"Apache-2.0"
] | null | null | null | lib/ecto/changeset.ex | matteing/ecto | beabc36931222ac574bb0047fef90d84cb138459 | [
"Apache-2.0"
] | null | null | null | lib/ecto/changeset.ex | matteing/ecto | beabc36931222ac574bb0047fef90d84cb138459 | [
"Apache-2.0"
] | null | null | null | defmodule Ecto.Changeset do
@moduledoc ~S"""
Changesets allow filtering, casting, validation and
definition of constraints when manipulating structs.
There is an example of working with changesets in the introductory
documentation in the `Ecto` module. The functions `cast/4` and
`change/2` are the usual entry points for creating changesets.
The first one is used to cast and validate external parameters,
such as parameters sent through a form, API, command line, etc.
The second one is used to change data directly from your application.
The remaining functions in this module, such as validations,
constraints, association handling, are about manipulating
changesets. Let's discuss some of this extra functionality.
## External vs internal data
Changesets allow working with both kinds of data:
* internal to the application - for example programmatically generated,
or coming from other subsystems. This use case is primarily covered
by the `change/2` and `put_change/3` functions.
* external to the application - for example data provided by the user in
a form that needs to be type-converted and properly validated. This
use case is primarily covered by the `cast/4` function.
## Validations and constraints
Ecto changesets provide both validations and constraints which
are ultimately turned into errors in case something goes wrong.
The difference between them is that most validations can be
executed without a need to interact with the database and, therefore,
are always executed before attempting to insert or update the entry
in the database. Some validations may happen against the database but
they are inherently unsafe. Those validations start with a `unsafe_`
prefix, such as `unsafe_validate_unique/3`.
On the other hand, constraints rely on the database and are always safe.
As a consequence, validations are always checked before constraints.
Constraints won't even be checked in case validations failed.
Let's see an example:
defmodule User do
use Ecto.Schema
import Ecto.Changeset
schema "users" do
field :name
field :email
field :age, :integer
end
def changeset(user, params \\ %{}) do
user
|> cast(params, [:name, :email, :age])
|> validate_required([:name, :email])
|> validate_format(:email, ~r/@/)
|> validate_inclusion(:age, 18..100)
|> unique_constraint(:email)
end
end
In the `changeset/2` function above, we define three validations.
They check that `name` and `email` fields are present in the
changeset, the e-mail is of the specified format, and the age is
between 18 and 100 - as well as a unique constraint in the email
field.
Let's suppose the e-mail is given but the age is invalid. The
changeset would have the following errors:
changeset = User.changeset(%User{}, %{age: 0, email: "[email protected]"})
{:error, changeset} = Repo.insert(changeset)
changeset.errors #=> [age: {"is invalid", []}, name: {"can't be blank", []}]
In this case, we haven't checked the unique constraint in the
e-mail field because the data did not validate. Let's fix the
age and the name, and assume that the e-mail already exists in the
database:
changeset = User.changeset(%User{}, %{age: 42, name: "Mary", email: "[email protected]"})
{:error, changeset} = Repo.insert(changeset)
changeset.errors #=> [email: {"has already been taken", []}]
Validations and constraints define an explicit boundary when the check
happens. By moving constraints to the database, we also provide a safe,
correct and data-race free means of checking the user input.
### Deferred constraints
Some databases support deferred constraints, i.e., constraints which are
checked at the end of the transaction rather than at the end of each statement.
Changesets do not support this type of constraints. When working with deferred
constraints, a violation while invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2` won't
return `{:error, changeset}`, but rather raise an error at the end of the
transaction.
## Empty values
Many times, the data given on cast needs to be further pruned, specially
regarding empty values. For example, if you are gathering data to be
cast from the command line or through an HTML form or any other text-based
format, it is likely those means cannot express nil values. For
those reasons, changesets include the concept of empty values, which are
values that will be automatically converted to the field's default value
on `cast/4`. Those values are stored in the changeset `empty_values` field
and default to `[""]`. You can also pass the `:empty_values` option to
`cast/4` in case you want to change how a particular `cast/4` work.
## Associations, embeds and on replace
Using changesets you can work with associations as well as with embedded
structs. There are two primary APIs:
* `cast_assoc/3` and `cast_embed/3` - those functions are used when
working with external data. In particular, they allow you to change
associations and embeds alongside the parent struct, all at once.
* `put_assoc/4` and `put_embed/4` - it allows you to replace the
association or embed as a whole. This can be used to move associated
data from one entry to another, to completely remove or replace
existing entries.
See the documentation for those functions for more information.
### The `:on_replace` option
When using any of those APIs, you may run into situations where Ecto sees
data is being replaced. For example, imagine a Post has many Comments where
the comments have IDs 1, 2 and 3. If you call `cast_assoc/3` passing only
the IDs 1 and 2, Ecto will consider 3 is being "replaced" and it will raise
by default. Such behaviour can be changed when defining the relation by
setting `:on_replace` option when defining your association/embed according
to the values below:
* `:raise` (default) - do not allow removing association or embedded
data via parent changesets
* `:mark_as_invalid` - if attempting to remove the association or
embedded data via parent changeset - an error will be added to the parent
changeset, and it will be marked as invalid
* `:nilify` - sets owner reference column to `nil` (available only for
associations). Use this on a `belongs_to` column to allow the association
to be cleared out so that it can be set to a new value. Will set `action`
on associated changesets to `:replace`
* `:update` - updates the association, available only for `has_one`, `belongs_to`
and `embeds_one`. This option will update all the fields given to the changeset
including the id for the association
* `:delete` - removes the association or related data from the database.
This option has to be used carefully (see below). Will set `action` on associated
changesets to `:replace`
* `:delete_if_exists` - like `:delete` except that it ignores any stale entry
error. For instance, if you set `on_replace: :delete` but the replaced
resource was already deleted by a separate request, it will raise a
`Ecto.StaleEntryError`. `:delete_if_exists` makes it so it will only delete
if the entry still exists
The `:delete` and `:delete_if_exists` options must be used carefully as they allow
users to delete any associated data by simply not sending the associated data.
If you need deletion, it is often preferred to add a separate boolean virtual field
in the schema and manually mark the changeset for deletion if the `:delete` field is
set in the params, as in the example below. Note that we don't call `cast/4` in this
case because we don't want to prevent deletion if a change is invalid (changes are
irrelevant if the entity needs to be deleted).
defmodule Comment do
use Ecto.Schema
import Ecto.Changeset
schema "comments" do
field :body, :string
field :delete, :boolean, virtual: true
end
def changeset(comment, %{"delete" => "true"}) do
%{Ecto.Changeset.change(comment, delete: true) | action: :delete}
end
def changeset(comment, params) do
cast(comment, params, [:body])
end
end
## Schemaless changesets
In the changeset examples so far, we have always used changesets to validate
and cast data contained in a struct defined by an Ecto schema, such as the `%User{}`
struct defined by the `User` module.
However, changesets can also be used with "regular" structs too by passing a tuple
with the data and its types:
user = %User{}
types = %{first_name: :string, last_name: :string, email: :string}
changeset =
{user, types}
|> Ecto.Changeset.cast(params, Map.keys(types))
|> Ecto.Changeset.validate_required(...)
|> Ecto.Changeset.validate_length(...)
where the user struct refers to the definition in the following module:
defmodule User do
defstruct [:name, :age]
end
Changesets can also be used with data in a plain map, by following the same API:
data = %{}
types = %{name: :string}
params = %{name: "Callum"}
changeset =
{data, types}
|> Ecto.Changeset.cast(params, Map.keys(types))
|> Ecto.Changeset.validate_required(...)
|> Ecto.Changeset.validate_length(...)
Such functionality makes Ecto extremely useful to cast, validate and prune data even
if it is not meant to be persisted to the database.
### Changeset actions
Changesets have an action field which is usually set by `Ecto.Repo`
whenever one of the operations such as `insert` or `update` is called:
changeset = User.changeset(%User{}, %{age: 42, email: "[email protected]"})
{:error, changeset} = Repo.insert(changeset)
changeset.action
#=> :insert
This means that when working with changesets that are not meant to be
persisted to the database, such as schemaless changesets, you may need
to explicitly set the action to one specific value. Frameworks such as
Phoenix use the action value to define how HTML forms should act.
Instead of setting the action manually, you may use `apply_action/2` that
emulates operations such as `c:Ecto.Repo.insert`. `apply_action/2` will return
`{:ok, changes}` if the changeset is valid or `{:error, changeset}`, with
the given `action` set in the changeset in case of errors.
## The Ecto.Changeset struct
The public fields are:
* `valid?` - Stores if the changeset is valid
* `data` - The changeset source data, for example, a struct
* `params` - The parameters as given on changeset creation
* `changes` - The `changes` from parameters that were approved in casting
* `errors` - All errors from validations
* `required` - All required fields as a list of atoms
* `action` - The action to be performed with the changeset
* `types` - Cache of the data's field types
* `empty_values` - A list of values to be considered empty
* `repo` - The repository applying the changeset (only set after a Repo function is called)
* `repo_opts` - A keyword list of options given to the underlying repository operation
The following fields are private and must not be accessed directly.
* `validations`
* `constraints`
* `filters`
* `prepare`
### Redacting fields in inspect
To hide a field's value from the inspect protocol of `Ecto.Changeset`, mark
the field as `redact: true` in the schema, and it will display with the
value `**redacted**`.
"""
require Ecto.Query
alias __MODULE__
alias Ecto.Changeset.Relation
@empty_values [""]
# If a new field is added here, def merge must be adapted
defstruct valid?: false, data: nil, params: nil, changes: %{},
errors: [], validations: [], required: [], prepare: [],
constraints: [], filters: %{}, action: nil, types: nil,
empty_values: @empty_values, repo: nil, repo_opts: []
@type t(data_type) :: %Changeset{
valid?: boolean(),
repo: atom | nil,
repo_opts: Keyword.t(),
data: data_type,
params: %{optional(String.t()) => term} | nil,
changes: %{optional(atom) => term},
required: [atom],
prepare: [(t -> t)],
errors: [{atom, error}],
constraints: [constraint],
validations: [{atom, term}],
filters: %{optional(atom) => term},
action: action,
types: nil | %{atom => Ecto.Type.t() | {:assoc, term()} | {:embed, term()}}
}
@type t :: t(Ecto.Schema.t | map | nil)
@type error :: {String.t, Keyword.t}
@type action :: nil | :insert | :update | :delete | :replace | :ignore | atom
@type constraint :: %{type: :check | :exclusion | :foreign_key | :unique,
constraint: String.t, match: :exact | :suffix | :prefix,
field: atom, error_message: String.t, error_type: atom}
@type data :: map()
@type types :: map()
@number_validators %{
less_than: {&</2, "must be less than %{number}"},
greater_than: {&>/2, "must be greater than %{number}"},
less_than_or_equal_to: {&<=/2, "must be less than or equal to %{number}"},
greater_than_or_equal_to: {&>=/2, "must be greater than or equal to %{number}"},
equal_to: {&==/2, "must be equal to %{number}"},
not_equal_to: {&!=/2, "must be not equal to %{number}"},
}
@relations [:embed, :assoc]
@match_types [:exact, :suffix, :prefix]
@doc """
Wraps the given data in a changeset or adds changes to a changeset.
`changes` is a map or keyword where the key is an atom representing a
field, association or embed and the value is a term. Note the `value` is
directly stored in the changeset with no validation whatsoever. For this
reason, this function is meant for working with data internal to the
application.
When changing embeds and associations, see `put_assoc/4` for a complete
reference on the accepted values.
This function is useful for:
* wrapping a struct inside a changeset
* directly changing a struct without performing castings nor validations
* directly bulk-adding changes to a changeset
Changed attributes will only be added if the change does not have the
same value as the field in the data.
When a changeset is passed as the first argument, the changes passed as the
second argument are merged over the changes already in the changeset if they
differ from the values in the struct.
When a `{data, types}` is passed as the first argument, a changeset is
created with the given data and types and marked as valid.
See `cast/4` if you'd prefer to cast and validate external parameters.
## Examples
iex> changeset = change(%Post{})
%Ecto.Changeset{...}
iex> changeset.valid?
true
iex> changeset.changes
%{}
iex> changeset = change(%Post{author: "bar"}, title: "title")
iex> changeset.changes
%{title: "title"}
iex> changeset = change(%Post{title: "title"}, title: "title")
iex> changeset.changes
%{}
iex> changeset = change(changeset, %{title: "new title", body: "body"})
iex> changeset.changes.title
"new title"
iex> changeset.changes.body
"body"
"""
@spec change(Ecto.Schema.t | t | {data, types}, %{atom => term} | Keyword.t) :: t
def change(data, changes \\ %{})
def change({data, types}, changes) when is_map(data) do
change(%Changeset{data: data, types: Enum.into(types, %{}), valid?: true}, changes)
end
def change(%Changeset{types: nil}, _changes) do
raise ArgumentError, "changeset does not have types information"
end
def change(%Changeset{changes: changes, types: types} = changeset, new_changes)
when is_map(new_changes) or is_list(new_changes) do
{changes, errors, valid?} =
get_changed(changeset.data, types, changes, new_changes,
changeset.errors, changeset.valid?)
%{changeset | changes: changes, errors: errors, valid?: valid?}
end
def change(%{__struct__: struct} = data, changes) when is_map(changes) or is_list(changes) do
types = struct.__changeset__()
{changes, errors, valid?} = get_changed(data, types, %{}, changes, [], true)
%Changeset{valid?: valid?, data: data, changes: changes,
errors: errors, types: types}
end
defp get_changed(data, types, old_changes, new_changes, errors, valid?) do
Enum.reduce(new_changes, {old_changes, errors, valid?}, fn
{key, value}, {changes, errors, valid?} ->
put_change(data, changes, errors, valid?, key, value, Map.get(types, key))
end)
end
@doc """
Applies the given `params` as changes on the `data` according to
the set of `permitted` keys. Returns a changeset.
`data` may be either a changeset, a schema struct or a `{data, types}`
tuple. The second argument is a map of `params` that are cast according
to the type information from `data`. `params` is a map with string keys
or a map with atom keys, containing potentially invalid data. Mixed keys
are not allowed.
During casting, all `permitted` parameters whose values match the specified
type information will have their key name converted to an atom and stored
together with the value as a change in the `:changes` field of the changeset.
All parameters that are not explicitly permitted are ignored.
If casting of all fields is successful, the changeset is returned as valid.
Note that `cast/4` validates the types in the `params`, but not in the given
`data`.
## Options
* `:empty_values` - a list of values to be considered as empty when casting.
Empty values are always replaced by the default value of the respective key. Defaults to `[""]`
## Examples
iex> changeset = cast(post, params, [:title])
iex> if changeset.valid? do
...> Repo.update!(changeset)
...> end
Passing a changeset as the first argument:
iex> changeset = cast(post, %{title: "Hello"}, [:title])
iex> new_changeset = cast(changeset, %{title: "Foo", body: "World"}, [:body])
iex> new_changeset.params
%{"title" => "Hello", "body" => "World"}
Or creating a changeset from a simple map with types:
iex> data = %{title: "hello"}
iex> types = %{title: :string}
iex> changeset = cast({data, types}, %{title: "world"}, [:title])
iex> apply_changes(changeset)
%{title: "world"}
## Composing casts
`cast/4` also accepts a changeset as its first argument. In such cases, all
the effects caused by the call to `cast/4` (additional errors and changes)
are simply added to the ones already present in the argument changeset.
Parameters are merged (**not deep-merged**) and the ones passed to `cast/4`
take precedence over the ones already in the changeset.
"""
@spec cast(Ecto.Schema.t | t | {data, types},
%{binary => term} | %{atom => term} | :invalid,
[atom],
Keyword.t) :: t
def cast(data, params, permitted, opts \\ [])
def cast(_data, %{__struct__: _} = params, _permitted, _opts) do
raise Ecto.CastError, type: :map, value: params,
message: "expected params to be a :map, got: `#{inspect(params)}`"
end
def cast({data, types}, params, permitted, opts) when is_map(data) do
cast(data, types, %{}, params, permitted, opts)
end
def cast(%Changeset{types: nil}, _params, _permitted, _opts) do
raise ArgumentError, "changeset does not have types information"
end
def cast(%Changeset{changes: changes, data: data, types: types, empty_values: empty_values} = changeset,
params, permitted, opts) do
opts = Keyword.put_new(opts, :empty_values, empty_values)
new_changeset = cast(data, types, changes, params, permitted, opts)
cast_merge(changeset, new_changeset)
end
def cast(%{__struct__: module} = data, params, permitted, opts) do
cast(data, module.__changeset__(), %{}, params, permitted, opts)
end
defp cast(%{} = data, %{} = types, %{} = changes, :invalid, permitted, _opts) when is_list(permitted) do
_ = Enum.each(permitted, &cast_key/1)
%Changeset{params: nil, data: data, valid?: false, errors: [],
changes: changes, types: types}
end
defp cast(%{} = data, %{} = types, %{} = changes, %{} = params, permitted, opts) when is_list(permitted) do
empty_values = Keyword.get(opts, :empty_values, @empty_values)
params = convert_params(params)
defaults = case data do
%{__struct__: struct} -> struct.__struct__()
%{} -> %{}
end
{changes, errors, valid?} =
Enum.reduce(permitted, {changes, [], true},
&process_param(&1, params, types, data, empty_values, defaults, &2))
%Changeset{params: params, data: data, valid?: valid?,
errors: Enum.reverse(errors), changes: changes, types: types}
end
defp cast(%{}, %{}, %{}, params, permitted, _opts) when is_list(permitted) do
raise Ecto.CastError, type: :map, value: params,
message: "expected params to be a :map, got: `#{inspect params}`"
end
defp process_param(key, params, types, data, empty_values, defaults, {changes, errors, valid?}) do
{key, param_key} = cast_key(key)
type = cast_type!(types, key)
current =
case changes do
%{^key => value} -> value
_ -> Map.get(data, key)
end
case cast_field(key, param_key, type, params, current, empty_values, defaults, valid?) do
{:ok, value, valid?} ->
{Map.put(changes, key, value), errors, valid?}
:missing ->
{changes, errors, valid?}
{:invalid, custom_errors} ->
{message, new_errors} =
custom_errors
|> Keyword.put_new(:validation, :cast)
|> Keyword.put(:type, type)
|> Keyword.pop(:message, "is invalid")
{changes, [{key, {message, new_errors}} | errors], false}
end
end
defp cast_type!(types, key) do
case types do
%{^key => {tag, _}} when tag in @relations ->
raise "casting #{tag}s with cast/4 for #{inspect key} field is not supported, use cast_#{tag}/3 instead"
%{^key => type} ->
type
_ ->
known_fields = types |> Map.keys() |> Enum.map_join(", ", &inspect/1)
raise ArgumentError,
"unknown field `#{inspect(key)}` given to cast. Either the field does not exist or it is a " <>
":through association (which are read-only). The known fields are: #{known_fields}"
end
end
defp cast_key(key) when is_atom(key),
do: {key, Atom.to_string(key)}
defp cast_key(key),
do: raise ArgumentError, "cast/3 expects a list of atom keys, got: `#{inspect key}`"
defp cast_field(key, param_key, type, params, current, empty_values, defaults, valid?) do
case params do
%{^param_key => value} ->
value = if value in empty_values, do: Map.get(defaults, key), else: value
case Ecto.Type.cast(type, value) do
{:ok, value} ->
if Ecto.Type.equal?(type, current, value) do
:missing
else
{:ok, value, valid?}
end
:error ->
{:invalid, []}
{:error, custom_errors} when is_list(custom_errors) ->
{:invalid, custom_errors}
end
_ ->
:missing
end
end
# TODO: Remove branch when we require Elixir v1.10+.
if Code.ensure_loaded?(:maps) and function_exported?(:maps, :iterator, 1) do
# We only look at the first element because traversing the whole map
# can be expensive and it was showing up during profiling. This means
# we won't always raise, but the check only exists for user convenience
# anyway, and it is not a guarantee.
defp convert_params(params) do
case :maps.next(:maps.iterator(params)) do
{key, _, _} when is_atom(key) ->
for {key, value} <- params, into: %{} do
if is_atom(key) do
{Atom.to_string(key), value}
else
raise Ecto.CastError, type: :map, value: params,
message: "expected params to be a map with atoms or string keys, " <>
"got a map with mixed keys: #{inspect params}"
end
end
_ ->
params
end
end
else
defp convert_params(params) do
params
|> Enum.reduce(nil, fn
{key, _value}, nil when is_binary(key) ->
nil
{key, _value}, _ when is_binary(key) ->
raise Ecto.CastError, type: :map, value: params,
message: "expected params to be a map with atoms or string keys, " <>
"got a map with mixed keys: #{inspect params}"
{key, value}, nil when is_atom(key) ->
[{Atom.to_string(key), value}]
{key, value}, acc when is_atom(key) ->
[{Atom.to_string(key), value} | acc]
end)
|> case do
nil -> params
list -> :maps.from_list(list)
end
end
end
## Casting related
@doc """
Casts the given association with the changeset parameters.
This function should be used when working with the entire association at
once (and not a single element of a many-style association) and receiving
data external to the application.
`cast_assoc/3` works matching the records extracted from the database
and compares it with the parameters received from an external source.
Therefore, it is expected that the data in the changeset has explicitly
preloaded the association being cast and that all of the IDs exist and
are unique.
For example, imagine a user has many addresses relationship where
post data is sent as follows
%{"name" => "john doe", "addresses" => [
%{"street" => "somewhere", "country" => "brazil", "id" => 1},
%{"street" => "elsewhere", "country" => "poland"},
]}
and then
User
|> Repo.get!(id)
|> Repo.preload(:addresses) # Only required when updating data
|> Ecto.Changeset.cast(params, [])
|> Ecto.Changeset.cast_assoc(:addresses, with: &MyApp.Address.changeset/2)
The parameters for the given association will be retrieved
from `changeset.params`. Those parameters are expected to be
a map with attributes, similar to the ones passed to `cast/4`.
Once parameters are retrieved, `cast_assoc/3` will match those
parameters with the associations already in the changeset record.
Once `cast_assoc/3` is called, Ecto will compare each parameter
with the user's already preloaded addresses and act as follows:
* If the parameter does not contain an ID, the parameter data
will be passed to `MyApp.Address.changeset/2` with a new struct
and become an insert operation
* If the parameter contains an ID and there is no associated child
with such ID, the parameter data will be passed to
`MyApp.Address.changeset/2` with a new struct and become an insert
operation
* If the parameter contains an ID and there is an associated child
with such ID, the parameter data will be passed to
`MyApp.Address.changeset/2` with the existing struct and become an
update operation
* If there is an associated child with an ID and its ID is not given
as parameter, the `:on_replace` callback for that association will
be invoked (see the "On replace" section on the module documentation)
Every time the `MyApp.Address.changeset/2` function is invoked, it must
return a changeset. Once the parent changeset is given to an `Ecto.Repo`
function, all entries will be inserted/updated/deleted within the same
transaction.
Note developers are allowed to explicitly set the `:action` field of a
changeset to instruct Ecto how to act in certain situations. Let's suppose
that, if one of the associations has only empty fields, you want to ignore
the entry altogether instead of showing an error. The changeset function could
be written like this:
def changeset(struct, params) do
struct
|> cast(params, [:title, :body])
|> validate_required([:title, :body])
|> case do
%{valid?: false, changes: changes} = changeset when changes == %{} ->
# If the changeset is invalid and has no changes, it is
# because all required fields are missing, so we ignore it.
%{changeset | action: :ignore}
changeset ->
changeset
end
end
## Partial changes for many-style associations
By preloading an association using a custom query you can confine the behavior
of `cast_assoc/3`. This opens up the possibility to work on a subset of the data,
instead of all associations in the database.
Taking the initial example of users having addresses imagine those addresses
are set up to belong to a country. If you want to allow users to bulk edit all
addresses that belong to a single country, you can do so by changing the preload
query:
query = from MyApp.Address, where: [country: ^edit_country]
User
|> Repo.get!(id)
|> Repo.preload(addresses: query)
|> Ecto.Changeset.cast(params, [])
|> Ecto.Changeset.cast_assoc(:addresses)
This will allow you to cast and update only the association for the given country.
The important point for partial changes is that any addresses, which were not
preloaded won't be changed.
## Options
* `:required` - if the association is a required field
* `:required_message` - the message on failure, defaults to "can't be blank"
* `:invalid_message` - the message on failure, defaults to "is invalid"
* `:force_update_on_change` - force the parent record to be updated in the
repository if there is a change, defaults to `true`
* `:with` - the function to build the changeset from params. Defaults to the
`changeset/2` function of the associated module. It can be changed by passing
an anonymous function or an MFA tuple. If using an MFA, the default changeset
and parameters arguments will be prepended to the given args. For example,
using `with: {Author, :special_changeset, ["hello"]}` will be invoked as
`Author.special_changeset(changeset, params, "hello")`
"""
def cast_assoc(changeset, name, opts \\ []) when is_atom(name) do
cast_relation(:assoc, changeset, name, opts)
end
@doc """
Casts the given embed with the changeset parameters.
The parameters for the given embed will be retrieved
from `changeset.params`. Those parameters are expected to be
a map with attributes, similar to the ones passed to `cast/4`.
Once parameters are retrieved, `cast_embed/3` will match those
parameters with the embeds already in the changeset record.
See `cast_assoc/3` for an example of working with casts and
associations which would also apply for embeds.
The changeset must have been previously `cast` using
`cast/4` before this function is invoked.
## Options
* `:required` - if the embed is a required field
* `:required_message` - the message on failure, defaults to "can't be blank"
* `:invalid_message` - the message on failure, defaults to "is invalid"
* `:force_update_on_change` - force the parent record to be updated in the
repository if there is a change, defaults to `true`
* `:with` - the function to build the changeset from params. Defaults to the
`changeset/2` function of the embedded module. It can be changed by passing
an anonymous function or an MFA tuple. If using an MFA, the default changeset
and parameters arguments will be prepended to the given args. For example,
using `with: {Author, :special_changeset, ["hello"]}` will be invoked as
`Author.special_changeset(changeset, params, "hello")`
"""
def cast_embed(changeset, name, opts \\ []) when is_atom(name) do
cast_relation(:embed, changeset, name, opts)
end
defp cast_relation(type, %Changeset{data: data, types: types}, _name, _opts)
when data == nil or types == nil do
raise ArgumentError, "cast_#{type}/3 expects the changeset to be cast. " <>
"Please call cast/4 before calling cast_#{type}/3"
end
defp cast_relation(type, %Changeset{} = changeset, key, opts) do
{key, param_key} = cast_key(key)
%{data: data, types: types, params: params, changes: changes} = changeset
%{related: related} = relation = relation!(:cast, type, key, Map.get(types, key))
params = params || %{}
{changeset, required?} =
if opts[:required] do
{update_in(changeset.required, &[key|&1]), true}
else
{changeset, false}
end
on_cast = Keyword.get_lazy(opts, :with, fn -> on_cast_default(type, related) end)
original = Map.get(data, key)
changeset =
case Map.fetch(params, param_key) do
{:ok, value} ->
current = Relation.load!(data, original)
case Relation.cast(relation, data, value, current, on_cast) do
{:ok, change, relation_valid?} when change != original ->
valid? = changeset.valid? and relation_valid?
changes = Map.put(changes, key, change)
changeset = %{force_update(changeset, opts) | changes: changes, valid?: valid?}
missing_relation(changeset, key, current, required?, relation, opts)
{:error, {message, meta}} ->
meta = [validation: type] ++ meta
error = {key, {message(opts, :invalid_message, message), meta}}
%{changeset | errors: [error | changeset.errors], valid?: false}
# ignore or ok with change == original
_ ->
missing_relation(changeset, key, current, required?, relation, opts)
end
:error ->
missing_relation(changeset, key, original, required?, relation, opts)
end
update_in changeset.types[key], fn {type, relation} ->
{type, %{relation | on_cast: on_cast}}
end
end
defp on_cast_default(type, module) do
fn struct, params ->
try do
module.changeset(struct, params)
rescue
e in UndefinedFunctionError ->
case __STACKTRACE__ do
[{^module, :changeset, args_or_arity, _}] when args_or_arity == 2
when length(args_or_arity) == 2 ->
raise ArgumentError, """
the module #{inspect module} does not define a changeset/2 function,
which is used by cast_#{type}/3. You need to either:
1. implement the #{type}.changeset/2 function
2. pass the :with option to cast_#{type}/3 with an anonymous
function that expects 2 args or an MFA tuple
When using an inline embed, the :with option must be given
"""
stacktrace ->
reraise e, stacktrace
end
end
end
end
defp missing_relation(%{changes: changes, errors: errors} = changeset,
name, current, required?, relation, opts) do
current_changes = Map.get(changes, name, current)
if required? and Relation.empty?(relation, current_changes) do
errors = [{name, {message(opts, :required_message, "can't be blank"), [validation: :required]}} | errors]
%{changeset | errors: errors, valid?: false}
else
changeset
end
end
defp relation!(_op, type, _name, {type, relation}),
do: relation
defp relation!(op, :assoc, name, nil),
do: raise(ArgumentError, "cannot #{op} assoc `#{name}`, assoc `#{name}` not found. Make sure it is spelled correctly and that the association type is not read-only")
defp relation!(op, type, name, nil),
do: raise(ArgumentError, "cannot #{op} #{type} `#{name}`, #{type} `#{name}` not found. Make sure that it exists and is spelled correctly")
defp relation!(op, type, name, {other, _}) when other in @relations,
do: raise(ArgumentError, "expected `#{name}` to be an #{type} in `#{op}_#{type}`, got: `#{other}`")
defp relation!(op, type, name, schema_type),
do: raise(ArgumentError, "expected `#{name}` to be an #{type} in `#{op}_#{type}`, got: `#{inspect schema_type}`")
defp force_update(changeset, opts) do
if Keyword.get(opts, :force_update_on_change, true) do
put_in(changeset.repo_opts[:force], true)
else
changeset
end
end
## Working with changesets
@doc """
Merges two changesets.
This function merges two changesets provided they have been applied to the
same data (their `:data` field is equal); if the data differs, an
`ArgumentError` exception is raised. If one of the changesets has a `:repo`
field which is not `nil`, then the value of that field is used as the `:repo`
field of the resulting changeset; if both changesets have a non-`nil` and
different `:repo` field, an `ArgumentError` exception is raised.
The other fields are merged with the following criteria:
* `params` - params are merged (not deep-merged) giving precedence to the
params of `changeset2` in case of a conflict. If both changesets have their
`:params` fields set to `nil`, the resulting changeset will have its params
set to `nil` too.
* `changes` - changes are merged giving precedence to the `changeset2`
changes.
* `errors` and `validations` - they are simply concatenated.
* `required` - required fields are merged; all the fields that appear
in the required list of both changesets are moved to the required
list of the resulting changeset.
## Examples
iex> changeset1 = cast(%Post{}, %{title: "Title"}, [:title])
iex> changeset2 = cast(%Post{}, %{title: "New title", body: "Body"}, [:title, :body])
iex> changeset = merge(changeset1, changeset2)
iex> changeset.changes
%{body: "Body", title: "New title"}
iex> changeset1 = cast(%Post{body: "Body"}, %{title: "Title"}, [:title])
iex> changeset2 = cast(%Post{}, %{title: "New title"}, [:title])
iex> merge(changeset1, changeset2)
** (ArgumentError) different :data when merging changesets
"""
@spec merge(t, t) :: t
def merge(changeset1, changeset2)
def merge(%Changeset{data: data} = cs1, %Changeset{data: data} = cs2) do
new_repo = merge_identical(cs1.repo, cs2.repo, "repos")
new_repo_opts = Keyword.merge(cs1.repo_opts, cs2.repo_opts)
new_action = merge_identical(cs1.action, cs2.action, "actions")
new_filters = Map.merge(cs1.filters, cs2.filters)
new_validations = cs1.validations ++ cs2.validations
new_constraints = cs1.constraints ++ cs2.constraints
cast_merge %{cs1 | repo: new_repo, repo_opts: new_repo_opts, filters: new_filters,
action: new_action, validations: new_validations,
constraints: new_constraints}, cs2
end
def merge(%Changeset{}, %Changeset{}) do
raise ArgumentError, message: "different :data when merging changesets"
end
defp cast_merge(cs1, cs2) do
new_params = (cs1.params || cs2.params) && Map.merge(cs1.params || %{}, cs2.params || %{})
new_changes = Map.merge(cs1.changes, cs2.changes)
new_errors = Enum.uniq(cs1.errors ++ cs2.errors)
new_required = Enum.uniq(cs1.required ++ cs2.required)
new_types = cs1.types || cs2.types
new_valid? = cs1.valid? and cs2.valid?
%{cs1 | params: new_params, valid?: new_valid?, errors: new_errors, types: new_types,
changes: new_changes, required: new_required}
end
defp merge_identical(object, nil, _thing), do: object
defp merge_identical(nil, object, _thing), do: object
defp merge_identical(object, object, _thing), do: object
defp merge_identical(lhs, rhs, thing) do
raise ArgumentError, "different #{thing} (`#{inspect lhs}` and " <>
"`#{inspect rhs}`) when merging changesets"
end
@doc """
Fetches the given field from changes or from the data.
While `fetch_change/2` only looks at the current `changes`
to retrieve a value, this function looks at the changes and
then falls back on the data, finally returning `:error` if
no value is available.
For relations, these functions will return the changeset
original data with changes applied. To retrieve raw changesets,
please use `fetch_change/2`.
## Examples
iex> post = %Post{title: "Foo", body: "Bar baz bong"}
iex> changeset = change(post, %{title: "New title"})
iex> fetch_field(changeset, :title)
{:changes, "New title"}
iex> fetch_field(changeset, :body)
{:data, "Bar baz bong"}
iex> fetch_field(changeset, :not_a_field)
:error
"""
@spec fetch_field(t, atom) :: {:changes, term} | {:data, term} | :error
def fetch_field(%Changeset{changes: changes, data: data, types: types}, key) when is_atom(key) do
case Map.fetch(changes, key) do
{:ok, value} ->
{:changes, change_as_field(types, key, value)}
:error ->
case Map.fetch(data, key) do
{:ok, value} -> {:data, data_as_field(data, types, key, value)}
:error -> :error
end
end
end
@doc """
Same as `fetch_field/2` but returns the value or raises if the given key was not found.
## Examples
iex> post = %Post{title: "Foo", body: "Bar baz bong"}
iex> changeset = change(post, %{title: "New title"})
iex> fetch_field!(changeset, :title)
"New title"
iex> fetch_field!(changeset, :other)
** (KeyError) key :other not found in: %Post{...}
"""
@spec fetch_field!(t, atom) :: term
def fetch_field!(changeset, key) do
case fetch_field(changeset, key) do
{_, value} ->
value
:error ->
raise KeyError, key: key, term: changeset.data
end
end
@doc """
Gets a field from changes or from the data.
While `get_change/3` only looks at the current `changes`
to retrieve a value, this function looks at the changes and
then falls back on the data, finally returning `default` if
no value is available.
For relations, these functions will return the changeset data
with changes applied. To retrieve raw changesets, please use `get_change/3`.
iex> post = %Post{title: "A title", body: "My body is a cage"}
iex> changeset = change(post, %{title: "A new title"})
iex> get_field(changeset, :title)
"A new title"
iex> get_field(changeset, :not_a_field, "Told you, not a field!")
"Told you, not a field!"
"""
@spec get_field(t, atom, term) :: term
def get_field(%Changeset{changes: changes, data: data, types: types}, key, default \\ nil) do
case Map.fetch(changes, key) do
{:ok, value} ->
change_as_field(types, key, value)
:error ->
case Map.fetch(data, key) do
{:ok, value} -> data_as_field(data, types, key, value)
:error -> default
end
end
end
defp change_as_field(types, key, value) do
case Map.get(types, key) do
{tag, relation} when tag in @relations ->
Relation.apply_changes(relation, value)
_other ->
value
end
end
defp data_as_field(data, types, key, value) do
case Map.get(types, key) do
{tag, _relation} when tag in @relations ->
Relation.load!(data, value)
_other ->
value
end
end
@doc """
Fetches a change from the given changeset.
This function only looks at the `:changes` field of the given `changeset` and
returns `{:ok, value}` if the change is present or `:error` if it's not.
## Examples
iex> changeset = change(%Post{body: "foo"}, %{title: "bar"})
iex> fetch_change(changeset, :title)
{:ok, "bar"}
iex> fetch_change(changeset, :body)
:error
"""
@spec fetch_change(t, atom) :: {:ok, term} | :error
def fetch_change(%Changeset{changes: changes} = _changeset, key) when is_atom(key) do
Map.fetch(changes, key)
end
@doc """
Same as `fetch_change/2` but returns the value or raises if the given key was not found.
## Examples
iex> changeset = change(%Post{body: "foo"}, %{title: "bar"})
iex> fetch_change!(changeset, :title)
"bar"
iex> fetch_change!(changeset, :body)
** (KeyError) key :body not found in: %{title: "bar"}
"""
@spec fetch_change!(t, atom) :: term
def fetch_change!(changeset, key) do
case fetch_change(changeset, key) do
{:ok, value} ->
value
:error ->
raise KeyError, key: key, term: changeset.changes
end
end
@doc """
Gets a change or returns a default value.
## Examples
iex> changeset = change(%Post{body: "foo"}, %{title: "bar"})
iex> get_change(changeset, :title)
"bar"
iex> get_change(changeset, :body)
nil
"""
@spec get_change(t, atom, term) :: term
def get_change(%Changeset{changes: changes} = _changeset, key, default \\ nil) when is_atom(key) do
Map.get(changes, key, default)
end
@doc """
Updates a change.
The given `function` is invoked with the change value only if there
is a change for `key`. Note that the value of the change
can still be `nil` (unless the field was marked as required on `validate_required/3`).
## Examples
iex> changeset = change(%Post{}, %{impressions: 1})
iex> changeset = update_change(changeset, :impressions, &(&1 + 1))
iex> changeset.changes.impressions
2
"""
@spec update_change(t, atom, (term -> term)) :: t
def update_change(%Changeset{changes: changes} = changeset, key, function) when is_atom(key) do
case Map.fetch(changes, key) do
{:ok, value} ->
put_change(changeset, key, function.(value))
:error ->
changeset
end
end
@doc """
Puts a change on the given `key` with `value`.
`key` is an atom that represents any field, embed or
association in the changeset. Note the `value` is directly
stored in the changeset with no validation whatsoever.
For this reason, this function is meant for working with
data internal to the application.
If the change is already present, it is overridden with
the new value. If the change has the same value as in the
changeset data, it is not added to the list of changes.
When changing embeds and associations, see `put_assoc/4`
for a complete reference on the accepted values.
## Examples
iex> changeset = change(%Post{}, %{title: "foo"})
iex> changeset = put_change(changeset, :title, "bar")
iex> changeset.changes
%{title: "bar"}
iex> changeset = change(%Post{title: "foo"})
iex> changeset = put_change(changeset, :title, "foo")
iex> changeset.changes
%{}
"""
@spec put_change(t, atom, term) :: t
def put_change(%Changeset{types: nil}, _key, _value) do
raise ArgumentError, "changeset does not have types information"
end
def put_change(%Changeset{data: data, types: types} = changeset, key, value) do
type = Map.get(types, key)
{changes, errors, valid?} =
put_change(data, changeset.changes, changeset.errors, changeset.valid?, key, value, type)
%{changeset | changes: changes, errors: errors, valid?: valid?}
end
defp put_change(data, changes, errors, valid?, key, value, {tag, relation})
when tag in @relations do
original = Map.get(data, key)
current = Relation.load!(data, original)
case Relation.change(relation, value, current) do
{:ok, change, relation_valid?} when change != original ->
{Map.put(changes, key, change), errors, valid? and relation_valid?}
{:error, error} ->
{changes, [{key, error} | errors], false}
# ignore or ok with change == original
_ ->
{Map.delete(changes, key), errors, valid?}
end
end
defp put_change(data, _changes, _errors, _valid?, key, _value, nil) when is_atom(key) do
raise ArgumentError, "unknown field `#{inspect(key)}` in #{inspect(data)}"
end
defp put_change(_data, _changes, _errors, _valid?, key, _value, nil) when not is_atom(key) do
raise ArgumentError, "field names given to change/put_change must be atoms, got: `#{inspect(key)}`"
end
defp put_change(data, changes, errors, valid?, key, value, type) do
if not Ecto.Type.equal?(type, Map.get(data, key), value) do
{Map.put(changes, key, value), errors, valid?}
else
{Map.delete(changes, key), errors, valid?}
end
end
@doc """
Puts the given association entry or entries as a change in the changeset.
This function is used to work with associations as a whole. For example,
if a Post has many Comments, it allows you to add, remove or change all
comments at once. If your goal is to simply add a new comment to a post,
then it is preferred to do so manually, as we will describe later in the
"Example: Adding a comment to a post" section.
This function requires the associated data to have been preloaded, except
when the parent changeset has been newly built and not yet persisted.
Missing data will invoke the `:on_replace` behaviour defined on the
association.
For associations with cardinality one, `nil` can be used to remove the existing
entry. For associations with many entries, an empty list may be given instead.
If the association has no changes, it will be skipped. If the association is
invalid, the changeset will be marked as invalid. If the given value is not any
of values below, it will raise.
The associated data may be given in different formats:
* a map or a keyword list representing changes to be applied to the
associated data. A map or keyword list can be given to update the
associated data as long as they have matching primary keys.
For example, `put_assoc(changeset, :comments, [%{id: 1, title: "changed"}])`
will locate the comment with `:id` of 1 and update its title.
If no comment with such id exists, one is created on the fly.
Since only a single comment was given, any other associated comment
will be replaced. On all cases, it is expected the keys to be atoms.
Opposite to `cast_assoc` and `embed_assoc`, the given map (or struct)
is not validated in any way and will be inserted as is.
This API is mostly used in scripts and tests, to make it straight-
forward to create schemas with associations at once, such as:
Ecto.Changeset.change(
%Post{},
title: "foo",
comments: [
%{body: "first"},
%{body: "second"}
]
)
* changesets or structs - when a changeset or struct is given, they
are treated as the canonical data and the associated data currently
stored in the association is ignored. For instance, the operation
`put_assoc(changeset, :comments, [%Comment{id: 1, title: "changed"}])`
will send the `Comment` as is to the database, ignoring any comment
currently associated, even if a matching ID is found. If the comment
is already persisted to the database, then `put_assoc/4` only takes
care of guaranteeing that the comments and the parent data are associated.
This extremely useful when associating existing data, as we will see
in the "Example: Adding tags to a post" section.
Once the parent changeset is given to an `Ecto.Repo` function, all entries
will be inserted/updated/deleted within the same transaction.
## Example: Adding a comment to a post
Imagine a relationship where Post has many comments and you want to add a
new comment to an existing post. While it is possible to use `put_assoc/4`
for this, it would be unnecessarily complex. Let's see an example.
First, let's fetch the post with all existing comments:
post = Post |> Repo.get!(1) |> Repo.preload(:comments)
The following approach is **wrong**:
post
|> Ecto.Changeset.change()
|> Ecto.Changeset.put_assoc(:comments, [%Comment{body: "bad example!"}])
|> Repo.update!()
The reason why the example above is wrong is because `put_assoc/4` always
works with the **full data**. So the example above will effectively **erase
all previous comments** and only keep the comment you are currently adding.
Instead, you could try:
post
|> Ecto.Changeset.change()
|> Ecto.Changeset.put_assoc(:comments, [%Comment{body: "so-so example!"} | post.comments])
|> Repo.update!()
In this example, we prepend the new comment to the list of existing comments.
Ecto will diff the list of comments currently in `post` with the list of comments
given, and correctly insert the new comment to the database. Note, however,
Ecto is doing a lot of work just to figure out something we knew since the
beginning, which is that there is only one new comment.
In cases like above, when you want to work only on a single entry, it is
much easier to simply work on the associated directly. For example, we
could instead set the `post` association in the comment:
%Comment{body: "better example"}
|> Ecto.Changeset.change()
|> Ecto.Changeset.put_assoc(:post, post)
|> Repo.insert!()
Alternatively, we can make sure that when we create a comment, it is already
associated to the post:
Ecto.build_assoc(post, :comments)
|> Ecto.Changeset.change(body: "great example!")
|> Repo.insert!()
Or we can simply set the post_id in the comment itself:
%Comment{body: "better example", post_id: post.id}
|> Repo.insert!()
In other words, when you find yourself wanting to work only with a subset
of the data, then using `put_assoc/4` is most likely unnecessary. Instead,
you want to work on the other side of the association.
Let's see an example where using `put_assoc/4` is a good fit.
## Example: Adding tags to a post
Imagine you are receiving a set of tags you want to associate to a post.
Let's imagine that those tags exist upfront and are all persisted to the
database. Imagine we get the data in this format:
params = %{"title" => "new post", "tags" => ["learner"]}
Now, since the tags already exist, we will bring all of them from the
database and put them directly in the post:
tags = Repo.all(from t in Tag, where: t.name in ^params["tags"])
post
|> Repo.preload(:tags)
|> Ecto.Changeset.cast(params, [:title]) # No need to allow :tags as we put them directly
|> Ecto.Changeset.put_assoc(:tags, tags) # Explicitly set the tags
Since in this case we always require the user to pass all tags
directly, using `put_assoc/4` is a great fit. It will automatically
remove any tag not given and properly associate all of the given
tags with the post.
Furthermore, since the tag information is given as structs read directly
from the database, Ecto will treat the data as correct and only do the
minimum necessary to guarantee that posts and tags are associated,
without trying to update or diff any of the fields in the tag struct.
Although it accepts an `opts` argument, there are no options currently
supported by `put_assoc/4`.
"""
def put_assoc(%Changeset{} = changeset, name, value, opts \\ []) do
put_relation(:assoc, changeset, name, value, opts)
end
@doc """
Puts the given embed entry or entries as a change in the changeset.
This function is used to work with embeds as a whole. For embeds with
cardinality one, `nil` can be used to remove the existing entry. For
embeds with many entries, an empty list may be given instead.
If the embed has no changes, it will be skipped. If the embed is
invalid, the changeset will be marked as invalid.
The list of supported values and their behaviour is described in
`put_assoc/4`. If the given value is not any of values listed there,
it will raise.
Although this function accepts an `opts` argument, there are no options
currently supported by `put_embed/4`.
"""
def put_embed(%Changeset{} = changeset, name, value, opts \\ []) do
put_relation(:embed, changeset, name, value, opts)
end
defp put_relation(_tag, %{types: nil}, _name, _value, _opts) do
raise ArgumentError, "changeset does not have types information"
end
defp put_relation(tag, changeset, name, value, _opts) do
%{data: data, types: types, changes: changes, errors: errors, valid?: valid?} = changeset
relation = relation!(:put, tag, name, Map.get(types, name))
{changes, errors, valid?} =
put_change(data, changes, errors, valid?, name, value, {tag, relation})
%{changeset | changes: changes, errors: errors, valid?: valid?}
end
@doc """
Forces a change on the given `key` with `value`.
If the change is already present, it is overridden with
the new value.
## Examples
iex> changeset = change(%Post{author: "bar"}, %{title: "foo"})
iex> changeset = force_change(changeset, :title, "bar")
iex> changeset.changes
%{title: "bar"}
iex> changeset = force_change(changeset, :author, "bar")
iex> changeset.changes
%{title: "bar", author: "bar"}
"""
@spec force_change(t, atom, term) :: t
def force_change(%Changeset{types: nil}, _key, _value) do
raise ArgumentError, "changeset does not have types information"
end
def force_change(%Changeset{types: types} = changeset, key, value) do
case Map.get(types, key) do
{tag, _} when tag in @relations ->
raise "changing #{tag}s with force_change/3 is not supported, " <>
"please use put_#{tag}/4 instead"
nil ->
raise ArgumentError, "unknown field `#{inspect(key)}` in #{inspect(changeset.data)}"
_ ->
put_in changeset.changes[key], value
end
end
@doc """
Deletes a change with the given key.
## Examples
iex> changeset = change(%Post{}, %{title: "foo"})
iex> changeset = delete_change(changeset, :title)
iex> get_change(changeset, :title)
nil
"""
@spec delete_change(t, atom) :: t
def delete_change(%Changeset{} = changeset, key) when is_atom(key) do
update_in changeset.changes, &Map.delete(&1, key)
end
@doc """
Applies the changeset changes to the changeset data.
This operation will return the underlying data with changes
regardless if the changeset is valid or not.
## Examples
iex> changeset = change(%Post{author: "bar"}, %{title: "foo"})
iex> apply_changes(changeset)
%Post{author: "bar", title: "foo"}
"""
@spec apply_changes(t) :: Ecto.Schema.t | data
def apply_changes(%Changeset{changes: changes, data: data}) when changes == %{} do
data
end
def apply_changes(%Changeset{changes: changes, data: data, types: types}) do
Enum.reduce(changes, data, fn {key, value}, acc ->
case Map.fetch(types, key) do
{:ok, {tag, relation}} when tag in @relations ->
apply_relation_changes(acc, key, relation, value)
{:ok, _} ->
Map.put(acc, key, value)
:error ->
acc
end
end)
end
@doc """
Applies the changeset action only if the changes are valid.
If the changes are valid, all changes are applied to the changeset data.
If the changes are invalid, no changes are applied, and an error tuple
is returned with the changeset containing the action that was attempted
to be applied.
The action may be any atom.
## Examples
iex> {:ok, data} = apply_action(changeset, :update)
iex> {:error, changeset} = apply_action(changeset, :update)
%Ecto.Changeset{action: :update}
"""
@spec apply_action(t, atom) :: {:ok, Ecto.Schema.t() | data} | {:error, t}
def apply_action(%Changeset{} = changeset, action) when is_atom(action) do
if changeset.valid? do
{:ok, apply_changes(changeset)}
else
{:error, %Changeset{changeset | action: action}}
end
end
def apply_action(%Changeset{}, action) do
raise ArgumentError, "expected action to be an atom, got: #{inspect action}"
end
@doc """
Applies the changeset action if the changes are valid or raises an error.
## Examples
iex> changeset = change(%Post{author: "bar"}, %{title: "foo"})
iex> apply_action!(changeset, :update)
%Post{author: "bar", title: "foo"}
iex> changeset = change(%Post{author: "bar"}, %{title: :bad})
iex> apply_action!(changeset, :update)
** (Ecto.InvalidChangesetError) could not perform update because changeset is invalid.
See `apply_action/2` for more information.
"""
@spec apply_action!(t, atom) :: Ecto.Schema.t() | data
def apply_action!(%Changeset{} = changeset, action) do
case apply_action(changeset, action) do
{:ok, data} ->
data
{:error, changeset} ->
raise Ecto.InvalidChangesetError, action: action, changeset: changeset
end
end
## Validations
@doc ~S"""
Returns a keyword list of the validations for this changeset.
The keys in the list are the names of fields, and the values are a
validation associated with the field. A field may occur multiple
times in the list.
## Example
%Post{}
|> change()
|> validate_format(:title, ~r/^\w+:\s/, message: "must start with a topic")
|> validate_length(:title, max: 100)
|> validations()
#=> [
title: {:length, [ max: 100 ]},
title: {:format, ~r/^\w+:\s/}
]
The following validations may be included in the result. The list is
not necessarily exhaustive. For example, custom validations written
by the developer will also appear in our return value.
This first group contains validations that hold a keyword list of validators.
This list may also include a `:message` key.
* `{:length, [option]}`
* `min: n`
* `max: n`
* `is: n`
* `count: :graphemes | :codepoints`
* `{:number, [option]}`
* `equal_to: n`
* `greater_than: n`
* `greater_than_or_equal_to: n`
* `less_than: n`
* `less_than_or_equal_to: n`
The other validators simply take a value:
* `{:exclusion, Enum.t}`
* `{:format, ~r/pattern/}`
* `{:inclusion, Enum.t}`
* `{:subset, Enum.t}`
Note that calling `validate_required/3` does not store the validation under the
`changeset.validations` key (and so won't be included in the result of this
function). The required fields are stored under the `changeset.required` key.
"""
@spec validations(t) :: [{atom, term}]
def validations(%Changeset{validations: validations}) do
validations
end
@doc """
Adds an error to the changeset.
An additional keyword list `keys` can be passed to provide additional
contextual information for the error. This is useful when using
`traverse_errors/2` and when translating errors with `Gettext`
## Examples
iex> changeset = change(%Post{}, %{title: ""})
iex> changeset = add_error(changeset, :title, "empty")
iex> changeset.errors
[title: {"empty", []}]
iex> changeset.valid?
false
iex> changeset = change(%Post{}, %{title: ""})
iex> changeset = add_error(changeset, :title, "empty", additional: "info")
iex> changeset.errors
[title: {"empty", [additional: "info"]}]
iex> changeset.valid?
false
iex> changeset = change(%Post{}, %{tags: ["ecto", "elixir", "x"]})
iex> changeset = add_error(changeset, :tags, "tag '%{val}' is too short", val: "x")
iex> changeset.errors
[tags: {"tag '%{val}' is too short", [val: "x"]}]
iex> changeset.valid?
false
"""
@spec add_error(t, atom, String.t, Keyword.t) :: t
def add_error(%Changeset{errors: errors} = changeset, key, message, keys \\ []) when is_binary(message) do
%{changeset | errors: [{key, {message, keys}}|errors], valid?: false}
end
@doc """
Validates the given `field` change.
It invokes the `validator` function to perform the validation
only if a change for the given `field` exists and the change
value is not `nil`. The function must return a list of errors
(with an empty list meaning no errors).
In case there's at least one error, the list of errors will be appended to the
`:errors` field of the changeset and the `:valid?` flag will be set to
`false`.
## Examples
iex> changeset = change(%Post{}, %{title: "foo"})
iex> changeset = validate_change changeset, :title, fn :title, title ->
...> # Value must not be "foo"!
...> if title == "foo" do
...> [title: "cannot be foo"]
...> else
...> []
...> end
...> end
iex> changeset.errors
[title: {"cannot be foo", []}]
"""
@spec validate_change(t, atom, (atom, term -> [{atom, String.t} | {atom, {String.t, Keyword.t}}])) :: t
def validate_change(%Changeset{} = changeset, field, validator) when is_atom(field) do
%{changes: changes, errors: errors} = changeset
ensure_field_exists!(changeset, field)
value = Map.get(changes, field)
new = if is_nil(value), do: [], else: validator.(field, value)
new =
Enum.map(new, fn
{key, val} when is_atom(key) and is_binary(val) ->
{key, {val, []}}
{key, {val, opts}} when is_atom(key) and is_binary(val) and is_list(opts) ->
{key, {val, opts}}
end)
case new do
[] -> changeset
[_|_] -> %{changeset | errors: new ++ errors, valid?: false}
end
end
@doc """
Stores the validation `metadata` and validates the given `field` change.
Similar to `validate_change/3` but stores the validation metadata
into the changeset validators. The validator metadata is often used
as a reflection mechanism, to automatically generate code based on
the available validations.
## Examples
iex> changeset = change(%Post{}, %{title: "foo"})
iex> changeset = validate_change changeset, :title, :useless_validator, fn
...> _, _ -> []
...> end
iex> changeset.validations
[title: :useless_validator]
"""
@spec validate_change(t, atom, term, (atom, term -> [{atom, String.t} | {atom, {String.t, Keyword.t}}])) :: t
def validate_change(%Changeset{validations: validations} = changeset,
field, metadata, validator) do
changeset = %{changeset | validations: [{field, metadata}|validations]}
validate_change(changeset, field, validator)
end
@doc """
Validates that one or more fields are present in the changeset.
You can pass a single field name or a list of field names that
are required.
If the value of a field is `nil` or a string made only of whitespace,
the changeset is marked as invalid, the field is removed from the
changeset's changes, and an error is added. An error won't be added if
the field already has an error.
If a field is given to `validate_required/3` but it has not been passed
as parameter during `cast/3` (i.e. it has not been changed), then
`validate_required/3` will check for its current value in the data.
If the data contains an non-empty value for the field, then no error is
added. This allows developers to use `validate_required/3` to perform
partial updates. For example, on `insert` all fields would be required,
because their default values on the data are all `nil`, but on `update`,
if you don't want to change a field that has been previously set,
you are not required to pass it as a parameter, since `validate_required/3`
won't add an error for missing changes as long as the value in the
data given to the `changeset` is not empty.
Do not use this function to validate associations that are required,
instead pass the `:required` option to `cast_assoc/3`.
Opposite to other validations, calling this function does not store
the validation under the `changeset.validations` key. Instead, it
stores all required fields under `changeset.required`.
## Options
* `:message` - the message on failure, defaults to "can't be blank"
* `:trim` - a boolean that sets whether whitespaces are removed before
running the validation on binaries/strings, defaults to true
## Examples
validate_required(changeset, :title)
validate_required(changeset, [:title, :body])
"""
@spec validate_required(t, list | atom, Keyword.t) :: t
def validate_required(%Changeset{} = changeset, fields, opts \\ []) when not is_nil(fields) do
%{required: required, errors: errors, changes: changes} = changeset
trim = Keyword.get(opts, :trim, true)
fields = List.wrap(fields)
fields_with_errors =
for field <- fields,
missing?(changeset, field, trim),
ensure_field_exists!(changeset, field),
is_nil(errors[field]),
do: field
case fields_with_errors do
[] ->
%{changeset | required: fields ++ required}
_ ->
message = message(opts, "can't be blank")
new_errors = Enum.map(fields_with_errors, &{&1, {message, [validation: :required]}})
changes = Map.drop(changes, fields_with_errors)
%{changeset | changes: changes, required: fields ++ required, errors: new_errors ++ errors, valid?: false}
end
end
@doc """
Validates that no existing record with a different primary key
has the same values for these fields.
This function exists to provide quick feedback to users of your
application. It should not be relied on for any data guarantee as it
has race conditions and is inherently unsafe. For example, if this
check happens twice in the same time interval (because the user
submitted a form twice), both checks may pass and you may end-up with
duplicate entries in the database. Therefore, a `unique_constraint/3`
should also be used to ensure your data won't get corrupted.
However, because constraints are only checked if all validations
succeed, this function can be used as an early check to provide
early feedback to users, since most conflicting data will have been
inserted prior to the current validation phase.
## Options
* `:message` - the message in case the constraint check fails,
defaults to "has already been taken".
* `:match` - how the changeset constraint name is matched against the
repo constraint, may be `:exact` or `:suffix`. Defaults to `:exact`.
`:suffix` matches any repo constraint which `ends_with?` `:name`
to this changeset constraint.
* `:error_key` - the key to which changeset error will be added when
check fails, defaults to the first field name of the given list of
fields.
* `:prefix` - the prefix to run the query on (such as the schema path
in Postgres or the database in MySQL). See `Ecto.Repo` documentation
for more information.
* `:repo_opts` - the options to pass to the `Ecto.Repo` call.
* `:query` - the base query to use for the check. Defaults to the schema of
the changeset. If the primary key is set, a clause will be added to exclude
the changeset row itself from the check.
## Examples
unsafe_validate_unique(changeset, :city_name, repo)
unsafe_validate_unique(changeset, [:city_name, :state_name], repo)
unsafe_validate_unique(changeset, [:city_name, :state_name], repo, message: "city must be unique within state")
unsafe_validate_unique(changeset, [:city_name, :state_name], repo, prefix: "public")
unsafe_validate_unique(changeset, [:city_name, :state_name], repo, query: from(c in City, where: is_nil(c.deleted_at)))
"""
@spec unsafe_validate_unique(t, atom | [atom, ...], Ecto.Repo.t, Keyword.t) :: t
def unsafe_validate_unique(changeset, fields, repo, opts \\ []) when is_list(opts) do
fields = List.wrap(fields)
{repo_opts, opts} = Keyword.pop(opts, :repo_opts, [])
{validations, schema} =
case changeset do
%Ecto.Changeset{validations: validations, data: %schema{}} ->
{validations, schema}
%Ecto.Changeset{} ->
raise ArgumentError, "unsafe_validate_unique/4 does not work with schemaless changesets"
end
changeset = %{changeset | validations: [{hd(fields), {:unsafe_unique, fields: fields}} | validations]}
where_clause = for field <- fields do
{field, get_field(changeset, field)}
end
# No need to query if there is a prior error for the fields
any_prior_errors_for_fields? = Enum.any?(changeset.errors, &(elem(&1, 0) in fields))
# No need to query if we haven't changed any of the fields in question
unrelated_changes? = Enum.all?(fields, ¬ Map.has_key?(changeset.changes, &1))
# If we don't have values for all fields, we can't query for uniqueness
any_nil_values_for_fields? = Enum.any?(where_clause, &(&1 |> elem(1) |> is_nil()))
if unrelated_changes? || any_nil_values_for_fields? || any_prior_errors_for_fields? do
changeset
else
query =
Keyword.get(opts, :query, schema)
|> maybe_exclude_itself(schema, changeset)
|> Ecto.Query.where(^where_clause)
|> Ecto.Query.select(true)
|> Ecto.Query.limit(1)
query =
if prefix = opts[:prefix] do
Ecto.Query.put_query_prefix(query, prefix)
else
query
end
if repo.one(query, repo_opts) do
error_key = Keyword.get(opts, :error_key, hd(fields))
add_error(changeset, error_key, message(opts, "has already been taken"),
validation: :unsafe_unique, fields: fields)
else
changeset
end
end
end
defp maybe_exclude_itself(base_query, schema, changeset) do
:primary_key
|> schema.__schema__()
|> Enum.map(&{&1, get_field(changeset, &1)})
|> case do
[{_pk_field, nil} | _remaining_pks] ->
base_query
[{pk_field, value} | remaining_pks] ->
# generate a clean query (one that does not start with 'TRUE OR ...')
first_expr = Ecto.Query.dynamic([q], field(q, ^pk_field) == ^value)
Enum.reduce_while(remaining_pks, first_expr, fn
{_pk_field, nil}, _expr ->
{:halt, nil}
{pk_field, value}, expr ->
{:cont, Ecto.Query.dynamic([q], ^expr and field(q, ^pk_field) == ^value)}
end)
|> case do
nil ->
base_query
matches_pk ->
Ecto.Query.where(base_query, ^Ecto.Query.dynamic(not (^matches_pk)))
end
[] ->
base_query
end
end
defp ensure_field_exists!(%Changeset{types: types, data: data}, field) do
unless Map.has_key?(types, field) do
raise ArgumentError, "unknown field #{inspect(field)} in #{inspect(data)}"
end
true
end
defp missing?(changeset, field, trim) when is_atom(field) do
case get_field(changeset, field) do
%{__struct__: Ecto.Association.NotLoaded} ->
raise ArgumentError, "attempting to validate association `#{field}` " <>
"that was not loaded. Please preload your associations " <>
"before calling validate_required/3 or pass the :required " <>
"option to Ecto.Changeset.cast_assoc/3"
value when is_binary(value) and trim -> String.trim_leading(value) == ""
value when is_binary(value) -> value == ""
nil -> true
_ -> false
end
end
defp missing?(_changeset, field, _trim) do
raise ArgumentError, "validate_required/3 expects field names to be atoms, got: `#{inspect field}`"
end
@doc """
Validates a change has the given format.
The format has to be expressed as a regular expression.
## Options
* `:message` - the message on failure, defaults to "has invalid format"
## Examples
validate_format(changeset, :email, ~r/@/)
"""
@spec validate_format(t, atom, Regex.t, Keyword.t) :: t
def validate_format(changeset, field, format, opts \\ []) do
validate_change changeset, field, {:format, format}, fn _, value ->
if value =~ format, do: [], else: [{field, {message(opts, "has invalid format"), [validation: :format]}}]
end
end
@doc """
Validates a change is included in the given enumerable.
## Options
* `:message` - the message on failure, defaults to "is invalid"
## Examples
validate_inclusion(changeset, :cardinal_direction, ["north", "east", "south", "west"])
validate_inclusion(changeset, :age, 0..99)
"""
@spec validate_inclusion(t, atom, Enum.t, Keyword.t) :: t
def validate_inclusion(changeset, field, data, opts \\ []) do
validate_change changeset, field, {:inclusion, data}, fn _, value ->
type =
changeset.types
|> Map.fetch!(field)
|> Ecto.Type.type()
if Ecto.Type.include?(type, value, data),
do: [],
else: [{field, {message(opts, "is invalid"), [validation: :inclusion, enum: data]}}]
end
end
@doc ~S"""
Validates a change, of type enum, is a subset of the given enumerable.
This validates if a list of values belongs to the given enumerable.
If you need to validate if a single value is inside the given enumerable,
you should use `validate_inclusion/4` instead.
## Options
* `:message` - the message on failure, defaults to "has an invalid entry"
## Examples
validate_subset(changeset, :pets, ["cat", "dog", "parrot"])
validate_subset(changeset, :lottery_numbers, 0..99)
"""
@spec validate_subset(t, atom, Enum.t, Keyword.t) :: t
def validate_subset(changeset, field, data, opts \\ []) do
validate_change changeset, field, {:subset, data}, fn _, value ->
{:array, element_type} =
changeset.types
|> Map.fetch!(field)
|> Ecto.Type.type()
case Enum.any?(value, fn element -> not Ecto.Type.include?(element_type, element, data) end) do
true -> [{field, {message(opts, "has an invalid entry"), [validation: :subset, enum: data]}}]
false -> []
end
end
end
@doc """
Validates a change is not included in the given enumerable.
## Options
* `:message` - the message on failure, defaults to "is reserved"
## Examples
validate_exclusion(changeset, :name, ~w(admin superadmin))
"""
@spec validate_exclusion(t, atom, Enum.t, Keyword.t) :: t
def validate_exclusion(changeset, field, data, opts \\ []) do
validate_change changeset, field, {:exclusion, data}, fn _, value ->
type =
changeset.types
|> Map.fetch!(field)
|> Ecto.Type.type()
if Ecto.Type.include?(type, value, data), do:
[{field, {message(opts, "is reserved"), [validation: :exclusion, enum: data]}}], else: []
end
end
@doc """
Validates a change is a string or list of the given length.
Note that the length of a string is counted in graphemes by default. If using
this validation to match a character limit of a database backend,
it's likely that the limit ignores graphemes and limits the number
of unicode characters. Then consider using the `:count` option to
limit the number of codepoints (`:codepoints`), or limit the number of bytes (`:bytes`).
## Options
* `:is` - the length must be exactly this value
* `:min` - the length must be greater than or equal to this value
* `:max` - the length must be less than or equal to this value
* `:count` - what length to count for string, `:graphemes` (default), `:codepoints` or `:bytes`
* `:message` - the message on failure, depending on the validation, is one of:
* for strings:
* "should be %{count} character(s)"
* "should be at least %{count} character(s)"
* "should be at most %{count} character(s)"
* for binary:
* "should be %{count} byte(s)"
* "should be at least %{count} byte(s)"
* "should be at most %{count} byte(s)"
* for lists:
* "should have %{count} item(s)"
* "should have at least %{count} item(s)"
* "should have at most %{count} item(s)"
## Examples
validate_length(changeset, :title, min: 3)
validate_length(changeset, :title, max: 100)
validate_length(changeset, :title, min: 3, max: 100)
validate_length(changeset, :code, is: 9)
validate_length(changeset, :topics, is: 2)
validate_length(changeset, :icon, count: :bytes, max: 1024 * 16)
"""
@spec validate_length(t, atom, Keyword.t) :: t
def validate_length(changeset, field, opts) when is_list(opts) do
validate_change changeset, field, {:length, opts}, fn
_, value ->
count_type = opts[:count] || :graphemes
{type, length} = case {value, count_type} do
{value, :codepoints} when is_binary(value) ->
{:string, codepoints_length(value, 0)}
{value, :graphemes} when is_binary(value) ->
{:string, String.length(value)}
{value, :bytes} when is_binary(value) ->
{:binary, byte_size(value)}
{value, _} when is_list(value) ->
{:list, list_length(changeset, field, value)}
end
error = ((is = opts[:is]) && wrong_length(type, length, is, opts)) ||
((min = opts[:min]) && too_short(type, length, min, opts)) ||
((max = opts[:max]) && too_long(type, length, max, opts))
if error, do: [{field, error}], else: []
end
end
defp codepoints_length(<<_::utf8, rest::binary>>, acc), do: codepoints_length(rest, acc + 1)
defp codepoints_length(<<_, rest::binary>>, acc), do: codepoints_length(rest, acc + 1)
defp codepoints_length(<<>>, acc), do: acc
defp list_length(%{types: types}, field, value) do
case Map.fetch(types, field) do
{:ok, {tag, _association}} when tag in [:embed, :assoc] ->
length(Relation.filter_empty(value))
_ ->
length(value)
end
end
defp wrong_length(_type, value, value, _opts), do: nil
defp wrong_length(:string, _length, value, opts), do:
{message(opts, "should be %{count} character(s)"), count: value, validation: :length, kind: :is, type: :string}
defp wrong_length(:binary, _length, value, opts), do:
{message(opts, "should be %{count} byte(s)"), count: value, validation: :length, kind: :is, type: :binary}
defp wrong_length(:list, _length, value, opts), do:
{message(opts, "should have %{count} item(s)"), count: value, validation: :length, kind: :is, type: :list}
defp too_short(_type, length, value, _opts) when length >= value, do: nil
defp too_short(:string, _length, value, opts), do:
{message(opts, "should be at least %{count} character(s)"), count: value, validation: :length, kind: :min, type: :string}
defp too_short(:binary, _length, value, opts), do:
{message(opts, "should be at least %{count} byte(s)"), count: value, validation: :length, kind: :min, type: :binary}
defp too_short(:list, _length, value, opts), do:
{message(opts, "should have at least %{count} item(s)"), count: value, validation: :length, kind: :min, type: :list}
defp too_long(_type, length, value, _opts) when length <= value, do: nil
defp too_long(:string, _length, value, opts), do:
{message(opts, "should be at most %{count} character(s)"), count: value, validation: :length, kind: :max, type: :string}
defp too_long(:binary, _length, value, opts), do:
{message(opts, "should be at most %{count} byte(s)"), count: value, validation: :length, kind: :max, type: :binary}
defp too_long(:list, _length, value, opts), do:
{message(opts, "should have at most %{count} item(s)"), count: value, validation: :length, kind: :max, type: :list}
@doc """
Validates the properties of a number.
## Options
* `:less_than`
* `:greater_than`
* `:less_than_or_equal_to`
* `:greater_than_or_equal_to`
* `:equal_to`
* `:not_equal_to`
* `:message` - the message on failure, defaults to one of:
* "must be less than %{number}"
* "must be greater than %{number}"
* "must be less than or equal to %{number}"
* "must be greater than or equal to %{number}"
* "must be equal to %{number}"
* "must be not equal to %{number}"
## Examples
validate_number(changeset, :count, less_than: 3)
validate_number(changeset, :pi, greater_than: 3, less_than: 4)
validate_number(changeset, :the_answer_to_life_the_universe_and_everything, equal_to: 42)
"""
@spec validate_number(t, atom, Keyword.t) :: t
def validate_number(changeset, field, opts) do
validate_change changeset, field, {:number, opts}, fn
field, value ->
{message, opts} = Keyword.pop(opts, :message)
Enum.find_value opts, [], fn {spec_key, target_value} ->
case Map.fetch(@number_validators, spec_key) do
{:ok, {spec_function, default_message}} ->
validate_number(field, value, message || default_message,
spec_key, spec_function, target_value)
:error ->
supported_options = @number_validators |> Map.keys() |> Enum.map_join("\n", &" * #{inspect(&1)}")
raise ArgumentError, """
unknown option #{inspect spec_key} given to validate_number/3
The supported options are:
#{supported_options}
"""
end
end
end
end
defp validate_number(field, %Decimal{} = value, message, spec_key, _spec_function, target_value) do
result = Decimal.compare(value, decimal_new(target_value)) |> normalize_compare()
case decimal_compare(result, spec_key) do
true -> nil
false -> [{field, {message, validation: :number, kind: spec_key, number: target_value}}]
end
end
defp validate_number(field, value, message, spec_key, spec_function, target_value) when is_number(value) do
case apply(spec_function, [value, target_value]) do
true -> nil
false -> [{field, {message, validation: :number, kind: spec_key, number: target_value}}]
end
end
defp validate_number(_field, value, _message, _spec_key, _spec_function, _target_value) do
raise ArgumentError, "expected value to be of type Decimal, Integer or Float, got: #{inspect value}"
end
# TODO: Remove me once we support Decimal 2.0 only
# Support mismatch between API for Decimal.compare/2 for versions 1.6 and 2.0
defp normalize_compare(result) do
case result do
%Decimal{coef: 1, sign: -1} -> :lt
%Decimal{coef: 0} -> :eq
%Decimal{coef: 1, sign: 1} -> :gt
_ -> result
end
end
defp decimal_new(term) when is_float(term), do: Decimal.from_float(term)
defp decimal_new(term), do: Decimal.new(term)
defp decimal_compare(:lt, spec), do: spec in [:less_than, :less_than_or_equal_to, :not_equal_to]
defp decimal_compare(:gt, spec), do: spec in [:greater_than, :greater_than_or_equal_to, :not_equal_to]
defp decimal_compare(:eq, spec), do: spec in [:equal_to, :less_than_or_equal_to, :greater_than_or_equal_to]
@doc """
Validates that the given parameter matches its confirmation.
By calling `validate_confirmation(changeset, :email)`, this
validation will check if both "email" and "email_confirmation"
in the parameter map matches. Note this validation only looks
at the parameters themselves, never the fields in the schema.
As such as, the "email_confirmation" field does not need to be
added as a virtual field in your schema.
Note that if the confirmation field is nil or missing, this does
not add a validation error. You can specify that the confirmation
parameter is required in the options (see below).
## Options
* `:message` - the message on failure, defaults to "does not match confirmation"
* `:required` - boolean, sets whether existence of confirmation parameter
is required for addition of error. Defaults to false
## Examples
validate_confirmation(changeset, :email)
validate_confirmation(changeset, :password, message: "does not match password")
cast(data, params, [:password])
|> validate_confirmation(:password, message: "does not match password")
"""
@spec validate_confirmation(t, atom, Keyword.t) :: t
def validate_confirmation(changeset, field, opts \\ [])
def validate_confirmation(%{params: params} = changeset, field, opts) when is_map(params) do
param = Atom.to_string(field)
error_param = "#{param}_confirmation"
error_field = String.to_atom(error_param)
value = Map.get(params, param)
errors =
case Map.fetch(params, error_param) do
{:ok, ^value} ->
[]
{:ok, _} ->
[{error_field,
{message(opts, "does not match confirmation"), [validation: :confirmation]}}]
:error ->
confirmation_missing(opts, error_field)
end
%{changeset | validations: [{field, {:confirmation, opts}} | changeset.validations],
errors: errors ++ changeset.errors,
valid?: changeset.valid? and errors == []}
end
def validate_confirmation(%{params: nil} = changeset, _, _) do
changeset
end
defp confirmation_missing(opts, error_field) do
required = Keyword.get(opts, :required, false)
if required, do: [{error_field, {message(opts, "can't be blank"), [validation: :required]}}], else: []
end
defp message(opts, key \\ :message, default) do
Keyword.get(opts, key, default)
end
@doc """
Validates the given parameter is true.
Note this validation only checks the parameter itself is true, never
the field in the schema. That's because acceptance parameters do not need
to be persisted, as by definition they would always be stored as `true`.
## Options
* `:message` - the message on failure, defaults to "must be accepted"
## Examples
validate_acceptance(changeset, :terms_of_service)
validate_acceptance(changeset, :rules, message: "please accept rules")
"""
@spec validate_acceptance(t, atom, Keyword.t) :: t
def validate_acceptance(changeset, field, opts \\ [])
def validate_acceptance(%{params: params} = changeset, field, opts) do
errors = validate_acceptance_errors(params, field, opts)
%{changeset | validations: [{field, {:acceptance, opts}} | changeset.validations],
errors: errors ++ changeset.errors,
valid?: changeset.valid? and errors == []}
end
defp validate_acceptance_errors(nil, _field, _opts), do: []
defp validate_acceptance_errors(params, field, opts) do
param = Atom.to_string(field)
value = Map.get(params, param)
case Ecto.Type.cast(:boolean, value) do
{:ok, true} -> []
_ -> [{field, {message(opts, "must be accepted"), validation: :acceptance}}]
end
end
## Optimistic lock
@doc ~S"""
Applies optimistic locking to the changeset.
[Optimistic
locking](https://en.wikipedia.org/wiki/Optimistic_concurrency_control) (or
*optimistic concurrency control*) is a technique that allows concurrent edits
on a single record. While pessimistic locking works by locking a resource for
an entire transaction, optimistic locking only checks if the resource changed
before updating it.
This is done by regularly fetching the record from the database, then checking
whether another user has made changes to the record *only when updating the
record*. This behaviour is ideal in situations where the chances of concurrent
updates to the same record are low; if they're not, pessimistic locking or
other concurrency patterns may be more suited.
## Usage
Optimistic locking works by keeping a "version" counter for each record; this
counter gets incremented each time a modification is made to a record. Hence,
in order to use optimistic locking, a field must exist in your schema for
versioning purpose. Such field is usually an integer but other types are
supported.
## Examples
Assuming we have a `Post` schema (stored in the `posts` table), the first step
is to add a version column to the `posts` table:
alter table(:posts) do
add :lock_version, :integer, default: 1
end
The column name is arbitrary and doesn't need to be `:lock_version`. Now add
a field to the schema too:
defmodule Post do
use Ecto.Schema
schema "posts" do
field :title, :string
field :lock_version, :integer, default: 1
end
def changeset(:update, struct, params \\ %{}) do
struct
|> Ecto.Changeset.cast(params, [:title])
|> Ecto.Changeset.optimistic_lock(:lock_version)
end
end
Now let's take optimistic locking for a spin:
iex> post = Repo.insert!(%Post{title: "foo"})
%Post{id: 1, title: "foo", lock_version: 1}
iex> valid_change = Post.changeset(:update, post, %{title: "bar"})
iex> stale_change = Post.changeset(:update, post, %{title: "baz"})
iex> Repo.update!(valid_change)
%Post{id: 1, title: "bar", lock_version: 2}
iex> Repo.update!(stale_change)
** (Ecto.StaleEntryError) attempted to update a stale entry:
%Post{id: 1, title: "baz", lock_version: 1}
When a conflict happens (a record which has been previously fetched is
being updated, but that same record has been modified since it was
fetched), an `Ecto.StaleEntryError` exception is raised.
Optimistic locking also works with delete operations. Just call the
`optimistic_lock/3` function with the data before delete:
iex> changeset = Ecto.Changeset.optimistic_lock(post, :lock_version)
iex> Repo.delete(changeset)
`optimistic_lock/3` by default assumes the field
being used as a lock is an integer. If you want to use another type,
you need to pass the third argument customizing how the next value
is generated:
iex> Ecto.Changeset.optimistic_lock(post, :lock_uuid, fn _ -> Ecto.UUID.generate end)
"""
@spec optimistic_lock(Ecto.Schema.t | t, atom, (term -> term)) :: t
def optimistic_lock(data_or_changeset, field, incrementer \\ &increment_with_rollover/1) do
changeset = change(data_or_changeset, %{})
current = get_field(changeset, field)
# Apply these changes only inside the repo because we
# don't want to permanently track the lock change.
changeset = prepare_changes(changeset, fn changeset ->
put_in(changeset.changes[field], incrementer.(current))
end)
changeset = put_in(changeset.filters[field], current)
changeset
end
# increment_with_rollover expect to be used with lock_version set as :integer in db schema
# 2_147_483_647 is upper limit for signed integer for both PostgreSQL and MySQL
defp increment_with_rollover(val) when val >= 2_147_483_647 do
1
end
defp increment_with_rollover(val) when is_integer(val) do
val + 1
end
@doc """
Provides a function executed by the repository on insert/update/delete.
If the changeset given to the repository is valid, the function given to
`prepare_changes/2` will be called with the changeset and must return a
changeset, allowing developers to do final adjustments to the changeset or
to issue data consistency commands. The repository itself can be accessed
inside the function under the `repo` field in the changeset. If the
changeset given to the repository is invalid, the function will not be
invoked.
The given function is guaranteed to run inside the same transaction
as the changeset operation for databases that do support transactions.
## Example
A common use case is updating a counter cache, in this case updating a post's
comment count when a comment is created:
def create_comment(comment, params) do
comment
|> cast(params, [:body, :post_id])
|> prepare_changes(fn changeset ->
if post_id = get_change(changeset, :post_id) do
query = from Post, where: [id: ^post_id]
changeset.repo.update_all(query, inc: [comment_count: 1])
end
changeset
end)
end
We retrieve the repo from the comment changeset itself and use
update_all to update the counter cache in one query. Finally, the original
changeset must be returned.
"""
@spec prepare_changes(t, (t -> t)) :: t
def prepare_changes(%Changeset{prepare: prepare} = changeset, function) when is_function(function, 1) do
%{changeset | prepare: [function | prepare]}
end
## Constraints
@doc """
Returns all constraints in a changeset.
A constraint is a map with the following fields:
* `:type` - the type of the constraint that will be checked in the database,
such as `:check`, `:unique`, etc
* `:constraint` - the database constraint name as a string
* `:match` - the type of match Ecto will perform on a violated constraint
against the `:constraint` value. It is `:exact`, `:suffix` or `:prefix`
* `:field` - the field a violated constraint will apply the error to
* `:error_message` - the error message in case of violated constraints
* `:error_type` - the type of error that identifies the error message
"""
@spec constraints(t) :: [constraint]
def constraints(%Changeset{constraints: constraints}) do
constraints
end
@doc """
Checks for a check constraint in the given field.
The check constraint works by relying on the database to check
if the check constraint has been violated or not and, if so,
Ecto converts it into a changeset error.
In order to use the check constraint, the first step is
to define the check constraint in a migration:
create constraint("users", :age_must_be_positive, check: "age > 0")
Now that a constraint exists, when modifying users, we could
annotate the changeset with a check constraint so Ecto knows
how to convert it into an error message:
cast(user, params, [:age])
|> check_constraint(:age, name: :age_must_be_positive)
Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, if the
age is not positive, it will be converted into an error and
`{:error, changeset}` returned by the repository. Note that the error
will occur only after hitting the database so it will not be visible
until all other validations pass.
## Options
* `:message` - the message in case the constraint check fails.
Defaults to "is invalid"
* `:name` - the name of the constraint. Required.
* `:match` - how the changeset constraint name is matched against the
repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to `:exact`.
`:suffix` matches any repo constraint which `ends_with?` `:name`
to this changeset constraint.
`:prefix` matches any repo constraint which `starts_with?` `:name`
to this changeset constraint.
"""
def check_constraint(changeset, field, opts \\ []) do
constraint = opts[:name] || raise ArgumentError, "must supply the name of the constraint"
message = message(opts, "is invalid")
match_type = Keyword.get(opts, :match, :exact)
add_constraint(changeset, :check, to_string(constraint), match_type, field, message)
end
@doc """
Checks for a unique constraint in the given field or list of fields.
The unique constraint works by relying on the database to check
if the unique constraint has been violated or not and, if so,
Ecto converts it into a changeset error.
In order to use the uniqueness constraint, the first step is
to define the unique index in a migration:
create unique_index(:users, [:email])
Now that a constraint exists, when modifying users, we could
annotate the changeset with a unique constraint so Ecto knows
how to convert it into an error message:
cast(user, params, [:email])
|> unique_constraint(:email)
Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, if the
email already exists, it will be converted into an error and
`{:error, changeset}` returned by the repository. Note that the error
will occur only after hitting the database so it will not be visible
until all other validations pass.
## Options
* `:message` - the message in case the constraint check fails,
defaults to "has already been taken"
* `:name` - the constraint name. By default, the constraint
name is inferred from the table + field(s). May be required
explicitly for complex cases
* `:match` - how the changeset constraint name is matched against the
repo constraint, may be `:exact` or `:suffix`. Defaults to `:exact`.
`:suffix` matches any repo constraint which `ends_with?` `:name`
to this changeset constraint.
## Complex constraints
Because the constraint logic is in the database, we can leverage
all the database functionality when defining them. For example,
let's suppose the e-mails are scoped by company id:
# In migration
create unique_index(:users, [:email, :company_id])
# In the changeset function
cast(user, params, [:email])
|> unique_constraint([:email, :company_id])
The first field name, `:email` in this case, will be used as the error
key to the changeset errors keyword list. For example, the above
`unique_constraint/3` would generate something like:
Repo.insert!(%User{email: "[email protected]", company_id: 1})
changeset = User.changeset(%User{}, %{email: "[email protected]", company_id: 1})
{:error, changeset} = Repo.insert(changeset)
changeset.errors #=> [email: {"has already been taken", []}]
In complex cases, instead of relying on name inference, it may be best
to set the constraint name explicitly:
# In the migration
create unique_index(:users, [:email, :company_id], name: :users_email_company_id_index)
# In the changeset function
cast(user, params, [:email])
|> unique_constraint(:email, name: :users_email_company_id_index)
### Partitioning
If your table is partitioned, then your unique index might look different
per partition, e.g. Postgres adds p<number> to the middle of your key, like:
users_p0_email_key
users_p1_email_key
...
users_p99_email_key
In this case you can use the name and suffix options together to match on
these dynamic indexes, like:
cast(user, params, [:email])
|> unique_constraint(:email, name: :email_key, match: :suffix)
## Case sensitivity
Unfortunately, different databases provide different guarantees
when it comes to case-sensitiveness. For example, in MySQL, comparisons
are case-insensitive by default. In Postgres, users can define case
insensitive column by using the `:citext` type/extension. In your migration:
execute "CREATE EXTENSION IF NOT EXISTS citext"
create table(:users) do
...
add :email, :citext
...
end
If for some reason your database does not support case insensitive columns,
you can explicitly downcase values before inserting/updating them:
cast(data, params, [:email])
|> update_change(:email, &String.downcase/1)
|> unique_constraint(:email)
"""
@spec unique_constraint(t, atom | [atom, ...], Keyword.t) :: t
def unique_constraint(changeset, field_or_fields, opts \\ [])
def unique_constraint(changeset, field, opts) when is_atom(field) do
unique_constraint(changeset, [field], opts)
end
def unique_constraint(changeset, [first_field | _] = fields, opts) do
constraint = opts[:name] || unique_index_name(changeset, fields)
message = message(opts, "has already been taken")
match_type = Keyword.get(opts, :match, :exact)
add_constraint(changeset, :unique, to_string(constraint), match_type, first_field, message)
end
defp unique_index_name(changeset, fields) do
field_names = Enum.map(fields, &get_field_source(changeset, &1))
Enum.join([get_source(changeset)] ++ field_names ++ ["index"], "_")
end
@doc """
Checks for foreign key constraint in the given field.
The foreign key constraint works by relying on the database to
check if the associated data exists or not. This is useful to
guarantee that a child will only be created if the parent exists
in the database too.
In order to use the foreign key constraint the first step is
to define the foreign key in a migration. This is often done
with references. For example, imagine you are creating a
comments table that belongs to posts. One would have:
create table(:comments) do
add :post_id, references(:posts)
end
By default, Ecto will generate a foreign key constraint with
name "comments_post_id_fkey" (the name is configurable).
Now that a constraint exists, when creating comments, we could
annotate the changeset with foreign key constraint so Ecto knows
how to convert it into an error message:
cast(comment, params, [:post_id])
|> foreign_key_constraint(:post_id)
Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, if the
associated post does not exist, it will be converted into an
error and `{:error, changeset}` returned by the repository.
## Options
* `:message` - the message in case the constraint check fails,
defaults to "does not exist"
* `:name` - the constraint name. By default, the constraint
name is inferred from the table + field. May be required
explicitly for complex cases
"""
@spec foreign_key_constraint(t, atom, Keyword.t) :: t
def foreign_key_constraint(changeset, field, opts \\ []) do
constraint = opts[:name] || "#{get_source(changeset)}_#{get_field_source(changeset, field)}_fkey"
message = message(opts, "does not exist")
add_constraint(changeset, :foreign_key, to_string(constraint), :exact, field, message, :foreign)
end
@doc """
Checks the associated field exists.
This is similar to `foreign_key_constraint/3` except that the
field is inferred from the association definition. This is useful
to guarantee that a child will only be created if the parent exists
in the database too. Therefore, it only applies to `belongs_to`
associations.
As the name says, a constraint is required in the database for
this function to work. Such constraint is often added as a
reference to the child table:
create table(:comments) do
add :post_id, references(:posts)
end
Now, when inserting a comment, it is possible to forbid any
comment to be added if the associated post does not exist:
comment
|> Ecto.Changeset.cast(params, [:post_id])
|> Ecto.Changeset.assoc_constraint(:post)
|> Repo.insert
## Options
* `:message` - the message in case the constraint check fails,
defaults to "does not exist"
* `:name` - the constraint name. By default, the constraint
name is inferred from the table + association field.
May be required explicitly for complex cases
"""
@spec assoc_constraint(t, atom, Keyword.t) :: t
def assoc_constraint(changeset, assoc, opts \\ []) do
constraint = opts[:name] ||
case get_assoc(changeset, assoc) do
%Ecto.Association.BelongsTo{owner_key: owner_key} ->
"#{get_source(changeset)}_#{owner_key}_fkey"
other ->
raise ArgumentError,
"assoc_constraint can only be added to belongs to associations, got: #{inspect other}"
end
message = message(opts, "does not exist")
add_constraint(changeset, :foreign_key, to_string(constraint), :exact, assoc, message, :assoc)
end
@doc """
Checks the associated field does not exist.
This is similar to `foreign_key_constraint/3` except that the
field is inferred from the association definition. This is useful
to guarantee that parent can only be deleted (or have its primary
key changed) if no child exists in the database. Therefore, it only
applies to `has_*` associations.
As the name says, a constraint is required in the database for
this function to work. Such constraint is often added as a
reference to the child table:
create table(:comments) do
add :post_id, references(:posts)
end
Now, when deleting the post, it is possible to forbid any post to
be deleted if they still have comments attached to it:
post
|> Ecto.Changeset.change
|> Ecto.Changeset.no_assoc_constraint(:comments)
|> Repo.delete
## Options
* `:message` - the message in case the constraint check fails,
defaults to "is still associated with this entry" (for `has_one`)
and "are still associated with this entry" (for `has_many`)
* `:name` - the constraint name. By default, the constraint
name is inferred from the association table + association
field. May be required explicitly for complex cases
"""
@spec no_assoc_constraint(t, atom, Keyword.t) :: t
def no_assoc_constraint(changeset, assoc, opts \\ []) do
{constraint, message} =
case get_assoc(changeset, assoc) do
%Ecto.Association.Has{cardinality: cardinality,
related_key: related_key, related: related} ->
{opts[:name] || "#{related.__schema__(:source)}_#{related_key}_fkey",
message(opts, no_assoc_message(cardinality))}
other ->
raise ArgumentError,
"no_assoc_constraint can only be added to has one/many associations, got: #{inspect other}"
end
add_constraint(changeset, :foreign_key, to_string(constraint), :exact, assoc, message, :no_assoc)
end
@doc """
Checks for an exclusion constraint in the given field.
The exclusion constraint works by relying on the database to check
if the exclusion constraint has been violated or not and, if so,
Ecto converts it into a changeset error.
## Options
* `:message` - the message in case the constraint check fails,
defaults to "violates an exclusion constraint"
* `:name` - the constraint name. By default, the constraint
name is inferred from the table + field. May be required
explicitly for complex cases
* `:match` - how the changeset constraint name is matched against the
repo constraint, may be `:exact` or `:suffix`. Defaults to `:exact`.
`:suffix` matches any repo constraint which `ends_with?` `:name`
to this changeset constraint.
"""
def exclusion_constraint(changeset, field, opts \\ []) do
constraint = opts[:name] || "#{get_source(changeset)}_#{get_field_source(changeset, field)}_exclusion"
message = message(opts, "violates an exclusion constraint")
match_type = Keyword.get(opts, :match, :exact)
add_constraint(changeset, :exclusion, to_string(constraint), match_type, field, message, :exclusion)
end
defp no_assoc_message(:one), do: "is still associated with this entry"
defp no_assoc_message(:many), do: "are still associated with this entry"
defp add_constraint(changeset, type, constraint, match, field, message) do
add_constraint(changeset, type, constraint, match, field, message, type)
end
defp add_constraint(%Changeset{constraints: constraints} = changeset,
type, constraint, match, field, error_message, error_type)
when is_binary(constraint) and is_atom(field) and is_binary(error_message) do
unless match in @match_types do
raise ArgumentError, "invalid match type: #{inspect match}. Allowed match types: #{inspect @match_types}"
end
constraint = %{
constraint: constraint,
error_message: error_message,
error_type: error_type,
field: field,
match: match,
type: type
}
%{changeset | constraints: [constraint | constraints]}
end
defp get_source(%{data: %{__meta__: %{source: source}}}) when is_binary(source),
do: source
defp get_source(%{data: data}), do:
raise ArgumentError, "cannot add constraint to changeset because it does not have a source, got: #{inspect data}"
defp get_source(item), do:
raise ArgumentError, "cannot add constraint because a changeset was not supplied, got: #{inspect item}"
defp get_assoc(%{types: types}, assoc) do
case Map.fetch(types, assoc) do
{:ok, {:assoc, association}} ->
association
_ ->
raise_invalid_assoc(types, assoc)
end
end
defp raise_invalid_assoc(types, assoc) do
associations = for {_key, {:assoc, %{field: field}}} <- types, do: field
one_of = if match?([_], associations), do: "", else: "one of "
raise ArgumentError,
"cannot add constraint to changeset because association `#{assoc}` does not exist. " <>
"Did you mean #{one_of}`#{Enum.join(associations, "`, `")}`?"
end
defp get_field_source(%{data: %{__struct__: schema}}, field) when is_atom(schema),
do: schema.__schema__(:field_source, field) || field
defp get_field_source(%{}, field),
do: field
@doc ~S"""
Traverses changeset errors and applies the given function to error messages.
This function is particularly useful when associations and embeds
are cast in the changeset as it will traverse all associations and
embeds and place all errors in a series of nested maps.
A changeset is supplied along with a function to apply to each
error message as the changeset is traversed. The error message
function receives an error tuple `{msg, opts}`, for example:
{"should be at least %{count} characters", [count: 3, validation: :length, min: 3]}
## Examples
iex> traverse_errors(changeset, fn {msg, opts} ->
...> Regex.replace(~r"%{(\w+)}", msg, fn _, key ->
...> opts |> Keyword.get(String.to_existing_atom(key), key) |> to_string()
...> end)
...> end)
%{title: ["should be at least 3 characters"]}
Optionally function can accept three arguments: `changeset`, `field`
and error tuple `{msg, opts}`. It is useful whenever you want to extract
validations rules from `changeset.validations` to build detailed error
description.
"""
@spec traverse_errors(t, (error -> String.t) | (Changeset.t, atom, error -> String.t)) :: %{atom => [term]}
def traverse_errors(%Changeset{errors: errors, changes: changes, types: types} = changeset, msg_func)
when is_function(msg_func, 1) or is_function(msg_func, 3) do
errors
|> Enum.reverse()
|> merge_keyword_keys(msg_func, changeset)
|> merge_related_keys(changes, types, msg_func, &traverse_errors/2)
end
defp merge_keyword_keys(keyword_list, msg_func, _) when is_function(msg_func, 1) do
Enum.reduce(keyword_list, %{}, fn({key, val}, acc) ->
val = msg_func.(val)
Map.update(acc, key, [val], &[val|&1])
end)
end
defp merge_keyword_keys(keyword_list, msg_func, changeset) when is_function(msg_func, 3) do
Enum.reduce(keyword_list, %{}, fn({key, val}, acc) ->
val = msg_func.(changeset, key, val)
Map.update(acc, key, [val], &[val|&1])
end)
end
defp merge_related_keys(_, _, nil, _, _) do
raise ArgumentError, "changeset does not have types information"
end
defp merge_related_keys(map, changes, types, msg_func, traverse_function) do
Enum.reduce types, map, fn
{field, {tag, %{cardinality: :many}}}, acc when tag in @relations ->
if changesets = Map.get(changes, field) do
{child, all_empty?} =
Enum.map_reduce(changesets, true, fn changeset, all_empty? ->
child = traverse_function.(changeset, msg_func)
{child, all_empty? and child == %{}}
end)
case all_empty? do
true -> acc
false -> Map.put(acc, field, child)
end
else
acc
end
{field, {tag, %{cardinality: :one}}}, acc when tag in @relations ->
if changeset = Map.get(changes, field) do
case traverse_function.(changeset, msg_func) do
child when child == %{} -> acc
child -> Map.put(acc, field, child)
end
else
acc
end
{_, _}, acc ->
acc
end
end
defp apply_relation_changes(acc, key, relation, value) do
relation_changed = Relation.apply_changes(relation, value)
acc = Map.put(acc, key, relation_changed)
with %Ecto.Association.BelongsTo{related_key: related_key} <- relation,
%{^related_key => id} <- relation_changed do
Map.put(acc, relation.owner_key, id)
else
_ -> acc
end
end
@doc ~S"""
Traverses changeset validations and applies the given function to validations.
This behaves the same as `traverse_errors/2`, but operates on changeset
validations instead of errors.
## Examples
iex> traverse_validations(changeset, &(&1))
%{title: [format: ~r/pattern/, length: [min: 1, max: 20]]}
iex> traverse_validations(changeset, fn
...> {:length, opts} -> {:length, "#{Keyword.get(opts, :min, 0)}-#{Keyword.get(opts, :max, 32)}"}
...> {:format, %Regex{source: source}} -> {:format, "/#{source}/"}
...> {other, opts} -> {other, inspect(opts)}
...> end)
%{title: [format: "/pattern/", length: "1-20"]}
"""
@spec traverse_validations(t, (error -> String.t) | (Changeset.t, atom, error -> String.t)) :: %{atom => [term]}
def traverse_validations(%Changeset{validations: validations, changes: changes, types: types} = changeset, msg_func)
when is_function(msg_func, 1) or is_function(msg_func, 3) do
validations
|> Enum.reverse()
|> merge_keyword_keys(msg_func, changeset)
|> merge_related_keys(changes, types, msg_func, &traverse_validations/2)
end
end
defimpl Inspect, for: Ecto.Changeset do
import Inspect.Algebra
def inspect(%Ecto.Changeset{data: data} = changeset, opts) do
list = for attr <- [:action, :changes, :errors, :data, :valid?] do
{attr, Map.get(changeset, attr)}
end
redacted_fields = case data do
%type{} ->
if function_exported?(type, :__schema__, 1) do
type.__schema__(:redact_fields)
else
[]
end
_ -> []
end
container_doc("#Ecto.Changeset<", list, ">", opts, fn
{:action, action}, opts -> concat("action: ", to_doc(action, opts))
{:changes, changes}, opts -> concat("changes: ", changes |> filter(redacted_fields) |> to_doc(opts))
{:data, data}, _opts -> concat("data: ", to_struct(data, opts))
{:errors, errors}, opts -> concat("errors: ", to_doc(errors, opts))
{:valid?, valid?}, opts -> concat("valid?: ", to_doc(valid?, opts))
end)
end
defp to_struct(%{__struct__: struct}, _opts), do: "#" <> Kernel.inspect(struct) <> "<>"
defp to_struct(other, opts), do: to_doc(other, opts)
defp filter(changes, redacted_fields) do
Enum.reduce(redacted_fields, changes, fn redacted_field, changes ->
if Map.has_key?(changes, redacted_field) do
Map.put(changes, redacted_field, "**redacted**")
else
changes
end
end)
end
end
| 38.427422 | 169 | 0.663442 |
9e771e6f7545ef9cd857ab3f44e7b11faa4b3e0a | 701 | ex | Elixir | apps/aehttpserver/lib/aehttpserver/web/controllers/peers_controller.ex | aeternity/epoch-elixir | d35613f5541a9bbebe61f90b8503a9b3416fe8b4 | [
"0BSD"
] | 131 | 2018-03-10T01:35:56.000Z | 2021-12-27T13:44:41.000Z | apps/aehttpserver/lib/aehttpserver/web/controllers/peers_controller.ex | aeternity/elixir-node | d35613f5541a9bbebe61f90b8503a9b3416fe8b4 | [
"0BSD"
] | 445 | 2018-03-12T09:46:17.000Z | 2018-12-12T09:52:07.000Z | apps/aehttpserver/lib/aehttpserver/web/controllers/peers_controller.ex | aeternity/epoch-elixir | d35613f5541a9bbebe61f90b8503a9b3416fe8b4 | [
"0BSD"
] | 23 | 2018-03-12T12:01:28.000Z | 2022-03-06T09:22:17.000Z | defmodule Aehttpserver.Web.PeersController do
use Aehttpserver.Web, :controller
alias Aecore.Peers.Worker, as: Peers
alias Aecore.Account.Account
alias Aecore.Keys
alias Aeutil.Environment
def info(conn, _params) do
sync_port = String.to_integer(Environment.get_env_or_default("SYNC_PORT", "3015"))
peer_pubkey =
:peer
|> Keys.keypair()
|> elem(0)
|> Keys.peer_encode()
json(conn, %{port: sync_port, pubkey: peer_pubkey})
end
def peers(conn, _params) do
peers = Peers.all_peers()
serialized_peers =
Enum.map(peers, fn peer -> %{peer | pubkey: Account.base58c_encode(peer.pubkey)} end)
json(conn, serialized_peers)
end
end
| 23.366667 | 91 | 0.687589 |
9e7724b97eebe69884c6c88812191bae83ee0706 | 695 | exs | Elixir | test/lbcparser_test.exs | adanselm/lbcproxy | 6ed4987e804eaae68491878783fab9706d534a72 | [
"MIT"
] | 3 | 2016-03-06T21:23:44.000Z | 2017-03-13T23:39:25.000Z | test/lbcparser_test.exs | adanselm/lbcproxy | 6ed4987e804eaae68491878783fab9706d534a72 | [
"MIT"
] | null | null | null | test/lbcparser_test.exs | adanselm/lbcproxy | 6ed4987e804eaae68491878783fab9706d534a72 | [
"MIT"
] | null | null | null | defmodule LbcparserTest do
use ExUnit.Case
@laguna_p1 File.read!("fixture/voitures_renault_laguna_3_midi_py.htm")
test "reads classifieds and skips ads" do
expected = %{category: "(pro)", date: "Hier", id: "839541197",
link: "http://www.leboncoin.fr/voitures/839541197.htm?ca=16_s",
placement: ["Fréjairolles", "Tarn"], time: "08:38",
price: <<49, 57, 32, 53, 48, 48, 195, 130, 194, 160, 194, 128>>,
title: "Renault Captur Intens 1.5 DCI 110 CV",
picture: "voitures_renault_laguna_3_midi_py_files/51baad98c41aad89f6d74ac5b5acd121fa4c9907.jpg"}
l = Lbcparser.parse @laguna_p1
assert length(l) == 9
assert List.first(l) == expected
end
end
| 36.578947 | 102 | 0.683453 |
9e772e3f953828cd6aef031d272ef5e437a2de62 | 2,005 | exs | Elixir | test/xdr/transactions/operations/clawback_claimable_balance_result_test.exs | einerzg/stellar_base | 2d10c5fc3b8159efc5de10b5c7c665e3b57b3d8f | [
"MIT"
] | 3 | 2021-08-17T20:32:45.000Z | 2022-03-13T20:26:02.000Z | test/xdr/transactions/operations/clawback_claimable_balance_result_test.exs | einerzg/stellar_base | 2d10c5fc3b8159efc5de10b5c7c665e3b57b3d8f | [
"MIT"
] | 45 | 2021-08-12T20:19:41.000Z | 2022-03-27T21:00:10.000Z | test/xdr/transactions/operations/clawback_claimable_balance_result_test.exs | einerzg/stellar_base | 2d10c5fc3b8159efc5de10b5c7c665e3b57b3d8f | [
"MIT"
] | 2 | 2021-09-22T23:11:13.000Z | 2022-01-23T03:19:11.000Z | defmodule StellarBase.XDR.Operations.ClawbackClaimableBalanceResultTest do
use ExUnit.Case
alias StellarBase.XDR.Void
alias StellarBase.XDR.Operations.{
ClawbackClaimableBalanceResult,
ClawbackClaimableBalanceResultCode
}
describe "ClawbackClaimableBalanceResult" do
setup do
code = ClawbackClaimableBalanceResultCode.new(:CLAWBACK_CLAIMABLE_BALANCE_SUCCESS)
%{
code: code,
value: Void.new(),
result: ClawbackClaimableBalanceResult.new(Void.new(), code),
binary: <<0, 0, 0, 0>>
}
end
test "new/1", %{code: code, value: value} do
%ClawbackClaimableBalanceResult{code: ^code, result: ^value} =
ClawbackClaimableBalanceResult.new(value, code)
end
test "encode_xdr/1", %{result: result, binary: binary} do
{:ok, ^binary} = ClawbackClaimableBalanceResult.encode_xdr(result)
end
test "encode_xdr!/1", %{result: result, binary: binary} do
^binary = ClawbackClaimableBalanceResult.encode_xdr!(result)
end
test "encode_xdr!/1 with a default value", %{code: code, binary: binary} do
result = ClawbackClaimableBalanceResult.new("TEST", code)
^binary = ClawbackClaimableBalanceResult.encode_xdr!(result)
end
test "decode_xdr/2", %{result: result, binary: binary} do
{:ok, {^result, ""}} = ClawbackClaimableBalanceResult.decode_xdr(binary)
end
test "decode_xdr!/2", %{result: result, binary: binary} do
{^result, ^binary} = ClawbackClaimableBalanceResult.decode_xdr!(binary <> binary)
end
test "decode_xdr!/2 an error code" do
{%ClawbackClaimableBalanceResult{
code: %ClawbackClaimableBalanceResultCode{
identifier: :CLAWBACK_CLAIMABLE_BALANCE_NOT_ISSUER
}
}, ""} = ClawbackClaimableBalanceResult.decode_xdr!(<<255, 255, 255, 254>>)
end
test "decode_xdr/2 with an invalid binary" do
{:error, :not_binary} = ClawbackClaimableBalanceResult.decode_xdr(123)
end
end
end
| 32.33871 | 88 | 0.689277 |
9e77331e043918c2a14e8ecbdb22d87e97d56bb1 | 734 | ex | Elixir | lib/ynd_phx_bootstrap_web/gettext.ex | ynd-consult-ug/ynd-phx-bootstrap | 784480186fee0375a01a534fcf597d1ef026e7ac | [
"MIT"
] | 33 | 2018-02-01T16:02:05.000Z | 2020-08-20T07:47:33.000Z | lib/ynd_phx_bootstrap_web/gettext.ex | ynd-consult/ynd-phx-bootstrap | 784480186fee0375a01a534fcf597d1ef026e7ac | [
"MIT"
] | null | null | null | lib/ynd_phx_bootstrap_web/gettext.ex | ynd-consult/ynd-phx-bootstrap | 784480186fee0375a01a534fcf597d1ef026e7ac | [
"MIT"
] | 2 | 2020-10-30T10:49:38.000Z | 2021-03-29T15:07:40.000Z | defmodule YndPhxBootstrapWeb.Gettext do
@moduledoc """
A module providing Internationalization with a gettext-based API.
By using [Gettext](https://hexdocs.pm/gettext),
your module gains a set of macros for translations, for example:
import YndPhxBootstrapWeb.Gettext
# Simple translation
gettext "Here is the string to translate"
# Plural translation
ngettext "Here is the string to translate",
"Here are the strings to translate",
3
# Domain-based translation
dgettext "errors", "Here is the error message to translate"
See the [Gettext Docs](https://hexdocs.pm/gettext) for detailed usage.
"""
use Gettext, otp_app: :ynd_phx_bootstrap
end
| 29.36 | 72 | 0.692098 |
9e7761a5d8fca00ee5b6bc74fc27e49d85b24037 | 648 | exs | Elixir | apps/kitsune_aws_sqs/mix.exs | shirayukikitsune/ex_aws | c56063fa986b173f160155dfb5185d1881989d0a | [
"BSD-2-Clause"
] | 1 | 2021-07-07T12:45:55.000Z | 2021-07-07T12:45:55.000Z | apps/kitsune_aws_sqs/mix.exs | shirayukikitsune/ex_aws | c56063fa986b173f160155dfb5185d1881989d0a | [
"BSD-2-Clause"
] | null | null | null | apps/kitsune_aws_sqs/mix.exs | shirayukikitsune/ex_aws | c56063fa986b173f160155dfb5185d1881989d0a | [
"BSD-2-Clause"
] | null | null | null | defmodule KitsuneAwsSqs.MixProject do
use Mix.Project
def project do
[
app: :kitsune_aws_sqs,
version: "0.1.0",
build_path: "../../_build",
config_path: "../../config/config.exs",
deps_path: "../../deps",
lockfile: "../../mix.lock",
elixir: "~> 1.9",
start_permanent: Mix.env() == :prod,
deps: deps()
]
end
# Run "mix help compile.app" to learn about applications.
def application do
[
extra_applications: [:logger]
]
end
# Run "mix help deps" to learn about dependencies.
defp deps do
[
{:kitsune_aws_core, in_umbrella: true}
]
end
end
| 20.25 | 59 | 0.575617 |
9e7779966b943186e2ef04211fa05668884bda3a | 500 | exs | Elixir | config/test.exs | benhayehudi/phoenix_sms_nexmo | df75b17745185a9932b85eedd70d36d19db0c366 | [
"MIT"
] | 2 | 2019-02-26T17:33:21.000Z | 2019-04-06T21:04:32.000Z | config/test.exs | benhayehudi/phoenix_sms_nexmo | df75b17745185a9932b85eedd70d36d19db0c366 | [
"MIT"
] | null | null | null | config/test.exs | benhayehudi/phoenix_sms_nexmo | df75b17745185a9932b85eedd70d36d19db0c366 | [
"MIT"
] | null | null | null | use Mix.Config
# We don't run a server during test. If one is required,
# you can enable the server option below.
config :sms_with_phoenix, SmsWithPhoenixWeb.Endpoint,
http: [port: 4002],
server: false
# Print only warnings and errors during test
config :logger, level: :warn
# Configure your database
config :sms_with_phoenix, SmsWithPhoenix.Repo,
username: "postgres",
password: "postgres",
database: "sms_with_phoenix_test",
hostname: "localhost",
pool: Ecto.Adapters.SQL.Sandbox
| 26.315789 | 56 | 0.754 |
9e7782eade3ce7ad1f2284e0dd0996071c66ca8a | 949 | ex | Elixir | lib/dogma/codeclimate_formatter.ex | fazibear/codeclimate-elixir | f4eaa2a35bbbd651f86cb530d80b5424dc94f887 | [
"MIT"
] | 7 | 2015-12-20T19:40:01.000Z | 2018-12-17T11:48:01.000Z | lib/dogma/codeclimate_formatter.ex | fazibear/codeclimate-elixir | f4eaa2a35bbbd651f86cb530d80b5424dc94f887 | [
"MIT"
] | 1 | 2016-07-15T16:40:33.000Z | 2017-06-07T15:37:44.000Z | lib/dogma/codeclimate_formatter.ex | fazibear/codeclimate-elixir | f4eaa2a35bbbd651f86cb530d80b5424dc94f887 | [
"MIT"
] | null | null | null | defmodule Dogma.CodeclimateFormatter do
@moduledoc """
"""
@doc """
Runs at the start of the test suite, printing nothing.
"""
def start(_), do: ""
@doc """
Runs after each script is tested, printing nothing.
"""
def script(_), do: ""
@doc """
Runs at the end of the test suite, printing json.
"""
def finish(scripts) do
scripts |> Enum.map(&format/1) |> Enum.join("\0")
end
defp format(script) do
script.errors |> Enum.map(fn(error) ->
format_error(error, script)
end) |> Enum.join("\0")
end
defp format_error(error, script) do
%{
type: "Issue",
#check_name: "Bug Risk/Unused Variable",
description: error.message,
#categories: ["Complexity", "Style"],
location: %{
path: script.path |> String.replace(~r/^\/code\//, ""),
lines: %{
begin: error.line,
end: error.line
}
}
} |> Poison.encode!
end
end
| 21.568182 | 63 | 0.567966 |
9e77999632d0e05a7d6f5895d5317b80bc407fd9 | 2,686 | ex | Elixir | lib/task_bunny/errors.ex | salemove/task_bunny | f1a69291b47f59cab1c010d48fda7551132ae51c | [
"MIT"
] | null | null | null | lib/task_bunny/errors.ex | salemove/task_bunny | f1a69291b47f59cab1c010d48fda7551132ae51c | [
"MIT"
] | 3 | 2019-08-13T13:05:26.000Z | 2021-11-10T08:12:51.000Z | lib/task_bunny/errors.ex | salemove/task_bunny | f1a69291b47f59cab1c010d48fda7551132ae51c | [
"MIT"
] | null | null | null | defmodule TaskBunny.ConfigError do
@moduledoc """
Raised when an error was found on TaskBunny config
"""
defexception [:message]
def exception(message: message) do
title = "Failed to load TaskBunny config"
message = "#{title}\n#{message}"
%__MODULE__{message: message}
end
end
defmodule TaskBunny.Connection.ConnectError do
@moduledoc """
Raised when failed to retain a connection
"""
defexception [:type, :message]
def exception(_opts = [type: type, host: host]) do
title = "Failed to get a connection to host '#{host}'."
detail =
case type do
:invalid_host ->
"The host is not defined in config"
:no_connection_process ->
"""
No process running for the host connection.
- Make sure supervisor process is up running.
- You might try to get connection before the process is ready.
"""
:not_connected ->
"""
The connection is not available.
- Check if RabbitMQ host is up running.
- Make sure you can connect to RabbitMQ from the application host.
- You might try to get connection before process is ready.
"""
fallback ->
"#{fallback}"
end
message = "#{title}\n#{detail}"
%__MODULE__{message: message, type: type}
end
end
defmodule TaskBunny.Job.QueueNotFoundError do
@moduledoc """
Raised when failed to find a queue for the job.
"""
defexception [:job, :message]
def exception(job: job) do
title = "Failed to find a queue for the job."
detail = "job=#{job}"
message = "#{title}\n#{detail}"
%__MODULE__{message: message, job: job}
end
end
defmodule TaskBunny.Message.DecodeError do
@moduledoc """
Raised when failed to decode the message.
"""
defexception [:message]
def exception(opts) do
title = "Failed to decode the message."
detail =
case opts[:type] do
:job_not_loaded ->
"Job is not valid Elixir module"
:decode_error ->
"Failed to decode the message. error=#{inspect(opts[:error])}"
fallback ->
"#{fallback}"
end
message = "#{title}\n#{detail}\nmessage body=#{opts[:body]}"
%__MODULE__{message: message}
end
end
defmodule TaskBunny.Publisher.PublishError do
@moduledoc """
Raised when failed to publish the message.
"""
defexception [:message, :inner_error]
def exception(inner_error: inner_error) do
title = "Failed to publish the message."
detail = "error=#{inspect(inner_error)}"
message = "#{title}\n#{detail}"
%__MODULE__{message: message, inner_error: inner_error}
end
end
| 24.642202 | 76 | 0.630678 |
9e77d4c2c8f946e0c54c13042805aa7de2ed3e6b | 4,328 | ex | Elixir | lib/memoize/cache_strategy/eviction.ex | davorbadrov/memoize | dd34c313de805e57da18c4e6142247cc8eb1ba6a | [
"MIT"
] | null | null | null | lib/memoize/cache_strategy/eviction.ex | davorbadrov/memoize | dd34c313de805e57da18c4e6142247cc8eb1ba6a | [
"MIT"
] | null | null | null | lib/memoize/cache_strategy/eviction.ex | davorbadrov/memoize | dd34c313de805e57da18c4e6142247cc8eb1ba6a | [
"MIT"
] | null | null | null | if Memoize.CacheStrategy.configured?(Memoize.CacheStrategy.Eviction) do
defmodule Memoize.CacheStrategy.Eviction do
@behaviour Memoize.CacheStrategy
@ets_tab __MODULE__
@read_history_tab Module.concat(__MODULE__, "ReadHistory")
@expiration_tab Module.concat(__MODULE__, "Expiration")
@opts Application.fetch_env!(:memoize, __MODULE__)
@max_threshold Keyword.fetch!(@opts, :max_threshold)
if @max_threshold != :infinity do
@min_threshold Keyword.fetch!(@opts, :min_threshold)
end
def init() do
:ets.new(@ets_tab, [:public, :set, :named_table, {:read_concurrency, true}])
:ets.new(@read_history_tab, [:public, :set, :named_table, {:write_concurrency, true}])
:ets.new(@expiration_tab, [:public, :ordered_set, :named_table])
end
def tab(_key) do
@ets_tab
end
def used_bytes() do
words = 0
words = words + :ets.info(@ets_tab, :memory)
words = words + :ets.info(@read_history_tab, :memory)
words * :erlang.system_info(:wordsize)
end
if @max_threshold == :infinity do
def cache(key, value, opts) do
do_cache(key, value, opts)
end
else
def cache(key, value, opts) do
if used_bytes() > @max_threshold do
garbage_collect()
end
do_cache(key, value, opts)
end
end
defp do_cache(key, _value, opts) do
case Keyword.fetch(opts, :expires_in) do
{:ok, expires_in} ->
expired_at = System.monotonic_time(:millisecond) + expires_in
counter = System.unique_integer()
:ets.insert(@expiration_tab, {{expired_at, counter}, key})
:error ->
:ok
end
%{permanent: Keyword.get(opts, :permanent, false)}
end
def read(key, _value, context) do
expired? = clear_expired_cache(key)
unless context.permanent do
counter = System.unique_integer([:monotonic, :positive])
:ets.insert(@read_history_tab, {key, counter})
end
if expired?, do: :retry, else: :ok
end
def invalidate() do
num_deleted = :ets.select_delete(@ets_tab, [{{:_, {:completed, :_, :_}}, [], [true]}])
:ets.delete_all_objects(@read_history_tab)
num_deleted
end
def invalidate(key) do
num_deleted = :ets.select_delete(@ets_tab, [{{key, {:completed, :_, :_}}, [], [true]}])
:ets.select_delete(@read_history_tab, [{{key, :_}, [], [true]}])
num_deleted
end
if @max_threshold == :infinity do
def garbage_collect() do
# never don't collect
0
end
else
def garbage_collect() do
if used_bytes() <= @min_threshold do
# still don't collect
0
else
# remove values ordered by last accessed time until used bytes less than @min_threshold.
values = :lists.keysort(2, :ets.tab2list(@read_history_tab))
stream = values |> Stream.filter(fn n -> n != :permanent end) |> Stream.with_index(1)
try do
for {{key, _}, num_deleted} <- stream do
:ets.select_delete(@ets_tab, [{{key, {:completed, :_, :_}}, [], [true]}])
:ets.delete(@read_history_tab, key)
if used_bytes() <= @min_threshold do
throw({:break, num_deleted})
end
end
else
_ -> length(values)
catch
{:break, num_deleted} -> num_deleted
end
end
end
end
def clear_expired_cache(read_key \\ nil, expired? \\ false) do
case :ets.first(@expiration_tab) do
:"$end_of_table" ->
expired?
{expired_at, _counter} = key ->
case :ets.lookup(@expiration_tab, key) do
[] ->
# retry
clear_expired_cache(read_key, expired?)
[{^key, cache_key}] ->
now = System.monotonic_time(:millisecond)
if now > expired_at do
invalidate(cache_key)
:ets.delete(@expiration_tab, key)
expired? = expired? || cache_key == read_key
# next
clear_expired_cache(read_key, expired?)
else
# completed
expired?
end
end
end
end
end
end
| 29.643836 | 98 | 0.572551 |
9e78062bce1a330c3c910cba6ed467f593f3fb6a | 2,789 | ex | Elixir | clients/sheets/lib/google_api/sheets/v4/model/spreadsheet.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/sheets/lib/google_api/sheets/v4/model/spreadsheet.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | clients/sheets/lib/google_api/sheets/v4/model/spreadsheet.ex | medikent/elixir-google-api | 98a83d4f7bfaeac15b67b04548711bb7e49f9490 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Sheets.V4.Model.Spreadsheet do
@moduledoc """
Resource that represents a spreadsheet.
## Attributes
* `developerMetadata` (*type:* `list(GoogleApi.Sheets.V4.Model.DeveloperMetadata.t)`, *default:* `nil`) - The developer metadata associated with a spreadsheet.
* `namedRanges` (*type:* `list(GoogleApi.Sheets.V4.Model.NamedRange.t)`, *default:* `nil`) - The named ranges defined in a spreadsheet.
* `properties` (*type:* `GoogleApi.Sheets.V4.Model.SpreadsheetProperties.t`, *default:* `nil`) - Overall properties of a spreadsheet.
* `sheets` (*type:* `list(GoogleApi.Sheets.V4.Model.Sheet.t)`, *default:* `nil`) - The sheets that are part of a spreadsheet.
* `spreadsheetId` (*type:* `String.t`, *default:* `nil`) - The ID of the spreadsheet.
This field is read-only.
* `spreadsheetUrl` (*type:* `String.t`, *default:* `nil`) - The url of the spreadsheet.
This field is read-only.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:developerMetadata => list(GoogleApi.Sheets.V4.Model.DeveloperMetadata.t()),
:namedRanges => list(GoogleApi.Sheets.V4.Model.NamedRange.t()),
:properties => GoogleApi.Sheets.V4.Model.SpreadsheetProperties.t(),
:sheets => list(GoogleApi.Sheets.V4.Model.Sheet.t()),
:spreadsheetId => String.t(),
:spreadsheetUrl => String.t()
}
field(:developerMetadata, as: GoogleApi.Sheets.V4.Model.DeveloperMetadata, type: :list)
field(:namedRanges, as: GoogleApi.Sheets.V4.Model.NamedRange, type: :list)
field(:properties, as: GoogleApi.Sheets.V4.Model.SpreadsheetProperties)
field(:sheets, as: GoogleApi.Sheets.V4.Model.Sheet, type: :list)
field(:spreadsheetId)
field(:spreadsheetUrl)
end
defimpl Poison.Decoder, for: GoogleApi.Sheets.V4.Model.Spreadsheet do
def decode(value, options) do
GoogleApi.Sheets.V4.Model.Spreadsheet.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Sheets.V4.Model.Spreadsheet do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 43.578125 | 163 | 0.714593 |
9e78163f4e4db063a4804324795bf61da096e590 | 1,949 | exs | Elixir | mix.exs | evnu/sniff | 003bd858dd436752a141bfab3629a0cdfc7df2e0 | [
"Apache-2.0"
] | null | null | null | mix.exs | evnu/sniff | 003bd858dd436752a141bfab3629a0cdfc7df2e0 | [
"Apache-2.0"
] | null | null | null | mix.exs | evnu/sniff | 003bd858dd436752a141bfab3629a0cdfc7df2e0 | [
"Apache-2.0"
] | null | null | null | defmodule Mix.Tasks.Compile.Nif do
def run(_) do
generate_env()
case :os.type() do
{:unix, :darwin} -> 0 = Mix.Shell.IO.cmd("make -f make.darwin")
{:unix, :linux} -> 0 = Mix.Shell.IO.cmd("make -f make.linux")
{:win32, :nt} -> 0 = Mix.Shell.IO.cmd("nmake /f make.winnt")
end
:ok
end
def clean() do
generate_env()
case :os.type() do
{:unix, :darwin} -> 0 = Mix.Shell.IO.cmd("make -f make.darwin clean")
{:unix, :linux} -> 0 = Mix.Shell.IO.cmd("make -f make.linux clean")
{:win32, :nt} -> 0 = Mix.Shell.IO.cmd("nmake /f make.winnt clean")
end
:ok
end
defp generate_env() do
build = Mix.Project.build_path() |> fix_path_separator
erts = Path.join([:code.root_dir(), 'erts-' ++ :erlang.system_info(:version)])
|> fix_path_separator
:ok = File.write "env.tmp", "BUILD_PATH=#{build}\nERTS_HOME=#{erts}"
end
defp fix_path_separator(path) do
case :os.type() do
{:win32, :nt} -> String.replace(path, "/", "\\")
_ -> path
end
end
end
defmodule Sniff.Mixfile do
use Mix.Project
def project do
[app: :sniff,
version: "0.1.4",
elixir: "~> 1.3",
compilers: [:nif | Mix.compilers],
build_embedded: Mix.env == :prod,
start_permanent: Mix.env == :prod,
aliases: aliases(),
description: description(),
package: package(),
deps: deps()]
end
def application do
[applications: []]
end
defp deps do
[
{:ex_doc, "~> 0.12", only: :dev},
]
end
defp description do
"Elixir Serial Port NIF"
end
defp package do
[
name: :sniff,
files: ["priv/.gitignore", "lib", "src", "test", "mix.*", "make.*", "*.exs", "*.sh", "*.bat", "*.md", ".gitignore", "LICENSE"],
maintainers: ["Samuel Ventura"],
licenses: ["Apache 2.0"],
links: %{"GitHub" => "https://github.com/samuelventura/sniff/"}]
end
defp aliases do
[
]
end
end
| 24.061728 | 132 | 0.566444 |
9e781c3a4f0ebcc7ff33279377724cd28ebfd4c1 | 1,914 | ex | Elixir | clients/speech/lib/google_api/speech/v1/model/speech_context.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | clients/speech/lib/google_api/speech/v1/model/speech_context.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | clients/speech/lib/google_api/speech/v1/model/speech_context.ex | mocknen/elixir-google-api | dac4877b5da2694eca6a0b07b3bd0e179e5f3b70 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This class is auto generated by the swagger code generator program.
# https://github.com/swagger-api/swagger-codegen.git
# Do not edit the class manually.
defmodule GoogleApi.Speech.V1.Model.SpeechContext do
@moduledoc """
Provides \"hints\" to the speech recognizer to favor specific words and phrases in the results.
## Attributes
- phrases ([String.t]): *Optional* A list of strings containing words and phrases \"hints\" so that the speech recognition is more likely to recognize them. This can be used to improve the accuracy for specific words and phrases, for example, if specific commands are typically spoken by the user. This can also be used to add additional words to the vocabulary of the recognizer. See [usage limits](/speech-to-text/quotas#content). Defaults to: `null`.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:phrases => list(any())
}
field(:phrases, type: :list)
end
defimpl Poison.Decoder, for: GoogleApi.Speech.V1.Model.SpeechContext do
def decode(value, options) do
GoogleApi.Speech.V1.Model.SpeechContext.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Speech.V1.Model.SpeechContext do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 39.875 | 465 | 0.750261 |
9e7827f3581a970bb7617e017bae0c147d8f262d | 373 | ex | Elixir | lib/quest/main/questionnaire.ex | ench0/quest | 844043205aa78e6937243f6993cbd560b6a5765b | [
"MIT"
] | null | null | null | lib/quest/main/questionnaire.ex | ench0/quest | 844043205aa78e6937243f6993cbd560b6a5765b | [
"MIT"
] | null | null | null | lib/quest/main/questionnaire.ex | ench0/quest | 844043205aa78e6937243f6993cbd560b6a5765b | [
"MIT"
] | null | null | null | defmodule Quest.Main.Questionnaire do
use Ecto.Schema
schema "main_questionnaires" do
field :info, :string
field :status, :string
field :tags, {:array, :string}
field :title, :string
has_many :main_questions, Main.Question, on_delete: :delete_all
# has_many :main_options, through: [:main_questions, :main_options]
timestamps()
end
end
| 23.3125 | 71 | 0.705094 |
9e782e6ad954be5ab61fc3095f75dbf2b01ff00a | 9,442 | exs | Elixir | test/watcher_test.exs | infinityoneframework/etop | 8e1fa45d52de752f7fab7df6f509e3c952a091c8 | [
"MIT"
] | 4 | 2020-10-21T16:41:05.000Z | 2022-03-22T11:36:08.000Z | test/watcher_test.exs | infinityoneframework/etop | 8e1fa45d52de752f7fab7df6f509e3c952a091c8 | [
"MIT"
] | 2 | 2021-10-04T10:03:18.000Z | 2022-01-29T08:36:02.000Z | test/watcher_test.exs | infinityoneframework/etop | 8e1fa45d52de752f7fab7df6f509e3c952a091c8 | [
"MIT"
] | 1 | 2021-10-15T06:16:45.000Z | 2021-10-15T06:16:45.000Z | defmodule Etop.WatcherTest do
use ExUnit.Case
import ExUnit.{CaptureIO, CaptureLog}
alias Etop.WatcherTest.WatcherServer, as: Server
setup meta do
Application.put_env(:etop, :etop, Etop)
etop_opts = Keyword.merge([first_interval: 10, interval: 50], meta[:etop_opts] || [])
opts =
meta
|> Map.get(:opts, [])
|> Keyword.put_new(:etop_opts, etop_opts)
if Map.get(meta, :start, false), do: Server.start(opts)
on_exit(fn ->
if Server.alive?(), do: Server.stop()
if Etop.alive?(), do: Etop.stop()
end)
:ok
end
def setup_state(meta) do
{:ok, state: Server.initial_state(meta[:opts] || [])}
end
@tag :start
test "get/0" do
assert Server.get() == MapSet.new()
end
@tag :start
test "put/1 and get/1" do
Server.put(:test)
Server.put({:a, 1})
assert Server.get(:test)
assert Server.get({:a, 1})
refute Server.get({:a, 2})
end
@tag :start
test "clear/1" do
Server.put(:test)
Server.put({:a, 1})
assert Server.get(:test)
assert Server.get({:a, 1})
Server.clear(:test)
refute Server.get(:test)
assert Server.get({:a, 1})
end
@tag :start
test "add_monitors" do
Process.sleep(1)
state = Server.initial_state()
load_fn = Server.load_threshold(state)
msgq_fn = Server.msgq_threshold(state)
assert Etop.monitors() == [
{:process, :message_queue_len, msgq_fn,
{Etop.WatcherTest.WatcherServer, :message_queue_callback}},
{:summary, [:load, :total], load_fn,
{Etop.WatcherTest.WatcherServer, :load_callback}}
]
end
@tag etop_opts: [reporting: false]
@tag opts: [notify_log: true, no_reporting: true]
@tag :start
test "trigger monitors" do
refute Etop.status().reporting
capture_io(fn ->
logs =
capture_log(fn ->
Etop.Utils.create_load()
Process.sleep(200)
end)
|> String.split("\n", trim: true)
|> Enum.filter(&(&1 =~ "[info]"))
|> Enum.take(3)
assert length(logs) == 1
assert Enum.all?(logs, &(&1 =~ "Etop high CPU usage:"))
end)
end
test "custom opts" do
{:ok, _} = Server.start(custom: "one", another: %{test: true})
%{custom: custom, another: another} = Server.status()
assert custom == "one"
assert another == %{test: true}
end
describe "notify" do
setup [:setup_state]
@tag opts: [notify_log: true]
test "notify_disable/3", %{state: state} do
assert capture_log(fn ->
assert Server.notify_disable(state, :test, "test message").set == MapSet.new()
end) == ""
assert capture_log(fn ->
state = update_in(state, [:set], &MapSet.put(&1, :test))
assert Server.notify_disable(state, :test, "test message").set == MapSet.new()
end) =~ "test message"
end
end
describe "handle_call :load_callback" do
setup [:setup_state]
@tag opts: [notify_log: true]
test ">= enable_limit", %{state: state} do
assert capture_log(fn ->
etop = %{reporting: false}
{:reply, reply, _} =
nil |> load_callback_params(90, etop) |> Server.handle_call(:dc, state)
assert reply.reporting
end) =~ "Etop high CPU usage: 90"
end
@tag opts: [notify_log: true]
test "<= notify_lower_limit", %{state: state} do
orig_state = state
state = update_in(state, [:set], &MapSet.put(&1, :load))
assert capture_log(fn ->
etop = %{reporting: true}
{:reply, reply, state1} =
nil |> load_callback_params(49, etop) |> Server.handle_call(:dc, state)
assert reply.reporting
refute MapSet.member?(state1.set, :load)
end) =~ "Etop high CPU usage resolved: 49"
assert capture_log(fn ->
etop = %{reporting: true}
{:reply, reply, state1} =
nil |> load_callback_params(49, etop) |> Server.handle_call(:dc, orig_state)
assert reply.reporting
assert state1 == orig_state
end) == ""
end
@tag opts: [notify_log: true]
test "<= disable_limit", %{state: state} do
orig_state = state
state = update_in(state, [:set], &MapSet.put(&1, :load))
assert capture_log(fn ->
etop = %{reporting: true}
{:reply, reply, state1} =
nil |> load_callback_params(9, etop) |> Server.handle_call(:dc, state)
refute reply.reporting
refute MapSet.member?(state1.set, :load)
end) =~ "Etop high CPU usage resolved: 9"
assert capture_log(fn ->
etop = %{reporting: true}
{:reply, reply, state1} =
nil |> load_callback_params(9, etop) |> Server.handle_call(:dc, orig_state)
refute reply.reporting
assert state1 == orig_state
end) == ""
end
@tag opts: [notify_log: true]
test "default", %{state: state} do
assert capture_log(fn ->
etop = %{reporting: false}
{:reply, reply, state1} =
nil |> load_callback_params(60, etop) |> Server.handle_call(:dc, state)
assert reply == etop
assert state1 == state
end) == ""
assert capture_log(fn ->
etop = %{reporting: true}
{:reply, reply, state1} =
nil |> load_callback_params(60, etop) |> Server.handle_call(:dc, state)
assert reply == etop
assert state1 == state
end) == ""
assert capture_log(fn ->
etop = %{reporting: true}
{:reply, reply, state1} =
:dc |> load_callback_params(10.0, etop) |> Server.handle_call(:dc, state)
refute reply.reporting
assert state1 == state
end) == ""
assert capture_log(fn ->
etop = %{reporting: false}
{:reply, reply, state1} =
:dc |> load_callback_params(10.0, etop) |> Server.handle_call(:dc, state)
refute reply.reporting
assert state1 == state
end) == ""
end
end
describe "handle_call :message_queue_callback" do
setup [:setup_state]
@tag opts: [notify_log: true]
test ">= stop_limit", %{state: state} do
pid = start()
assert capture_log(fn ->
etop = %{reporting: false}
{:reply, reply, _} =
%{pid: pid}
|> queue_callback_params(20_000, etop)
|> Server.handle_call(:dc, state)
refute reply.reporting
end) =~ "Killing process with high msg_q length: 20000, pid: #{inspect(pid)}"
refute Process.alive?(pid)
end
@tag opts: [notify_log: true]
test ">= notify_limit", %{state: state} do
pid = start()
assert capture_log(fn ->
etop = %{reporting: false}
{:reply, reply, state1} =
%{pid: pid}
|> queue_callback_params(2000, etop)
|> Server.handle_call(:dc, state)
assert reply.reporting
assert MapSet.member?(state1.set, {:msgq, pid})
end) =~ "High message queue length: 2000, pid: #{inspect(pid)}"
assert Process.alive?(pid)
pid = start()
assert capture_log(fn ->
etop = %{reporting: true}
state = update_in(state, [:set], &MapSet.put(&1, {:msgq, pid}))
{:reply, reply, state1} =
%{pid: pid}
|> queue_callback_params(2000, etop)
|> Server.handle_call(:dc, state)
assert reply.reporting
assert state1 == state
end) == ""
assert Process.alive?(pid)
end
@tag opts: [notify_log: true]
test "< notify_limit", %{state: state} do
pid = start()
assert capture_log(fn ->
etop = %{reporting: false}
{:reply, reply, state1} =
%{pid: pid}
|> queue_callback_params(1000, etop)
|> Server.handle_call(:dc, state)
refute reply.reporting
refute MapSet.member?(state1.set, {:msgq, pid})
end) == ""
assert capture_log(fn ->
etop = %{reporting: false, proc_r: MapSet.new([pid])}
{:reply, reply, state1} =
%{pid: pid}
|> queue_callback_params(1000, etop)
|> Server.handle_call(:dc, %{state | set: MapSet.new([{:msgq, pid}])})
refute reply.reporting
refute reply[:proc_r]
refute MapSet.member?(state1.set, {:msgq, pid})
end) =~ "High Message queue alert resolved, pid: #{inspect(pid)}"
end
end
defp load_callback_params(info, value, etop),
do: {:load_callback, info, value, etop}
defp queue_callback_params(info, value, etop),
do: {:message_queue_callback, info, value, etop}
defp start,
do:
spawn(fn ->
receive do
:quit -> :ok
after
1000 -> :ok
end
end)
defmodule WatcherServer do
use Etop.Watcher
end
end
| 28.269461 | 93 | 0.532408 |
9e78682858d1c0c0685bc73a4d2a129fd61ac6c2 | 633 | ex | Elixir | testData/org/elixir_lang/parser_definition/matched_dot_call_operation_parsing_test_case/Sigil.ex | keyno63/intellij-elixir | 4033e319992c53ddd42a683ee7123a97b5e34f02 | [
"Apache-2.0"
] | 1,668 | 2015-01-03T05:54:27.000Z | 2022-03-25T08:01:20.000Z | testData/org/elixir_lang/parser_definition/matched_dot_call_operation_parsing_test_case/Sigil.ex | keyno63/intellij-elixir | 4033e319992c53ddd42a683ee7123a97b5e34f02 | [
"Apache-2.0"
] | 2,018 | 2015-01-01T22:43:39.000Z | 2022-03-31T20:13:08.000Z | testData/org/elixir_lang/parser_definition/matched_dot_call_operation_parsing_test_case/Sigil.ex | keyno63/intellij-elixir | 4033e319992c53ddd42a683ee7123a97b5e34f02 | [
"Apache-2.0"
] | 145 | 2015-01-15T11:37:16.000Z | 2021-12-22T05:51:02.000Z | ~c{one}.()
~c{one}.(function positional, key: value)
~c{one}.(key_one: value_one, key_two: value_two)
~c{one}.(
&one,
one <- two,
one when two,
one | two,
one = two,
one or two,
one || two,
one and two,
one && two,
one != two,
one < two,
one |> two,
one in two,
one ++ two,
one..two,
one + two,
one ^^^ two,
!one,
one.(),
Two.Three,
@one,
one,
@1,
&1,
!1,
not 1,
1,
[],
"StringLine",
"""
String
Heredoc
""",
'CharListLine',
'''
CharList
Heredoc
''',
~x{sigil}modifiers,
nil,
:atom,
Alias
)
~c{one}.(
one,
key: value
)
~c{one}.(
one
)(
two
)
| 11.105263 | 48 | 0.496051 |
9e78816f84a1f0c4e09fdc539c988f2743449201 | 518 | exs | Elixir | examples/delayed_job.exs | kianmeng/honeydew | 7c0e825c70ef4b72c82d02ca95491e7365d6b2e8 | [
"MIT"
] | 717 | 2015-06-15T19:30:54.000Z | 2022-03-22T06:10:09.000Z | examples/delayed_job.exs | kianmeng/honeydew | 7c0e825c70ef4b72c82d02ca95491e7365d6b2e8 | [
"MIT"
] | 106 | 2015-06-25T05:38:05.000Z | 2021-12-08T23:17:19.000Z | examples/delayed_job.exs | kianmeng/honeydew | 7c0e825c70ef4b72c82d02ca95491e7365d6b2e8 | [
"MIT"
] | 60 | 2015-06-07T00:48:37.000Z | 2022-03-06T08:20:23.000Z | #
# iex --erl "+C multi_time_warp" -S mix run examples/delayed_job.exs
#
defmodule Worker do
@behaviour Honeydew.Worker
def hello(enqueued_at) do
secs_later = DateTime.diff(DateTime.utc_now(), enqueued_at, :millisecond) / 1_000
IO.puts "I was delayed by #{secs_later}s!"
end
end
defmodule App do
def start do
:ok = Honeydew.start_queue(:my_queue)
:ok = Honeydew.start_workers(:my_queue, Worker)
end
end
App.start
{:hello, [DateTime.utc_now()]} |> Honeydew.async(:my_queue, delay_secs: 2)
| 22.521739 | 85 | 0.710425 |
9e7882681aa7c9ee5135d395b5437254d2ec540c | 5,705 | ex | Elixir | lib/rcon/packet.ex | avitex/elixir-rcon | f27629607811d6441448262f0bebea6c366319ca | [
"MIT"
] | 2 | 2018-09-20T20:42:35.000Z | 2018-11-15T11:40:06.000Z | lib/rcon/packet.ex | avitex/elixir-rcon | f27629607811d6441448262f0bebea6c366319ca | [
"MIT"
] | 11 | 2020-03-12T07:44:56.000Z | 2021-06-25T15:36:30.000Z | lib/rcon/packet.ex | avitex/elixir-rcon | f27629607811d6441448262f0bebea6c366319ca | [
"MIT"
] | 4 | 2018-11-06T18:00:31.000Z | 2020-05-20T09:47:21.000Z | defmodule RCON.Packet do
@moduledoc """
Module for handling RCON packets.
"""
@initial_id 0
@auth_failed_id -1
# Null-term string, and packet terminator
@terminator_part <<0, 0>>
# Packet part lengths
@size_part_len 4
@id_part_len 4
@kind_part_len 4
@max_id 2_147_483_647
@max_body_len 1413
@min_size @id_part_len + @kind_part_len + byte_size(@terminator_part)
@type t :: {kind, id, body, from}
@type raw :: binary
@type size :: integer
@type id :: integer
@type kind :: :exec | :exec_resp | :auth | :auth_resp
@type kind_code :: 0 | 2 | 3
@type body :: binary
@type from :: :client | :server
@malformed_packet_error "Malformed packet"
@packet_body_len_error "Packet body too large"
@bad_packet_kind_error "Bad packet kind"
@bad_packet_kind_code_error "Bad packet kind code"
@bad_packet_size_error "Bad packet size"
@doc """
Returns the length in bytes of the packet size part.
"""
@spec size_part_len :: integer
def size_part_len, do: @size_part_len
@doc """
Returns the length in bytes of the packet id part.
"""
@spec id_part_len :: integer
def id_part_len, do: @id_part_len
@doc """
Returns the length in bytes of the packet kind part.
"""
@spec kind_part_len :: integer
def kind_part_len, do: @kind_part_len
@doc """
Returns the initial packet ID value.
"""
@spec initial_id :: id
def initial_id, do: @initial_id
@doc """
Returns the packet ID used for auth failure.
"""
@spec auth_failed_id :: id
def auth_failed_id, do: @auth_failed_id
@doc """
Returns the max possible value a packet ID may have.
Value from signed int32 max (`2^31 - 1`).
"""
@spec max_id :: id
def max_id, do: @max_id
@doc """
The smallest value packet size may be.
"""
@spec min_size :: size
def min_size, do: @min_size
@doc """
Returns the maximum size a body may have.
Minecraft only supports a request payload length of max 1446 byte.
However some tests showed that only requests with a payload length
of 1413 byte or lower work reliably.
"""
@spec max_body_len :: integer
def max_body_len, do: @max_body_len
@doc """
Returns the kind for a packet.
"""
@spec kind(t) :: kind
def kind(_packet = {kind, _, _, _}), do: kind
@doc """
Returns the ID for a packet.
"""
@spec id(t) :: id
def id(_packet = {_, id, _, _}), do: id
@doc """
Returns the body for a packet.
"""
@spec body(t) :: body
def body(_packet = {_, _, body, _}), do: body
@doc """
Returns the body length in bytes for a packet.
Does not include the null character.
"""
@spec body_len(t) :: integer
def body_len(_packet = {_, _, body, _}), do: byte_size(body)
@doc """
Returns from what side the packet was sent from.
"""
@spec from(t) :: from
def from(_packet = {_, _, _, from}), do: from
@doc """
Creates and encodes a packet in one step.
"""
@spec create_and_encode(kind, body, id, from) :: {:ok, raw} | {:error, binary}
def create_and_encode(kind, body, id \\ @initial_id, from \\ :client) do
encode({kind, id, body, from})
end
@doc """
Creates a packet.
"""
@spec create(kind, body, id, from) :: {:ok, t} | {:error, binary}
def create(kind, body, id \\ @initial_id, from \\ :client) do
check_packet({kind, id, body, from})
end
@doc """
Encodes a packet to a binary for transmission.
"""
@spec encode(t) :: {:ok, raw} | {:error, binary}
def encode(packet) do
with {:ok, {kind, id, body, from}} <- check_packet(packet),
{:ok, kind_code} <- kind_to_code(kind, from) do
size = byte_size(body) + @min_size
header = <<
size::32-signed-integer-little,
id::32-signed-integer-little,
kind_code::32-signed-integer-little
>>
{:ok, header <> body <> @terminator_part}
end
end
@doc """
Decodes a packet size.
"""
@spec decode_size(binary) :: {:ok, size} | {:error, binary}
def decode_size(<<size::32-signed-integer-little>>), do: {:ok, size}
def decode_size(_), do: {:error, @bad_packet_size_error}
@doc """
Decodes a packet payload from transmission.
"""
@spec decode_payload(size, binary, from) :: {:ok, t} | {:error, binary}
def decode_payload(size, payload, from \\ :server) do
body_size = size - @min_size
case payload do
<<
id::32-signed-integer-little,
kind_code::32-signed-integer-little,
body::binary-size(body_size)
>> <> @terminator_part ->
with {:ok, kind} <- kind_from_code(kind_code, from) do
{:ok, {kind, id, body, from}}
end
_ ->
{:error, @malformed_packet_error}
end
end
@doc """
Returns the packet kind for a code.
"""
@spec kind_from_code(kind_code, from) :: {:ok, kind} | {:error, binary}
def kind_from_code(kind_code, from)
def kind_from_code(0, _), do: {:ok, :exec_resp}
def kind_from_code(2, :client), do: {:ok, :exec}
def kind_from_code(2, :server), do: {:ok, :auth_resp}
def kind_from_code(3, _), do: {:ok, :auth}
def kind_from_code(_, _), do: {:error, @bad_packet_kind_code_error}
@doc """
Returns the code for a packet kind.
"""
@spec kind_to_code(kind, from) :: {:ok, kind_code} | {:error, binary}
def kind_to_code(kind, from)
def kind_to_code(:exec_resp, _), do: {:ok, 0}
def kind_to_code(:exec, :client), do: {:ok, 2}
def kind_to_code(:auth_resp, :server), do: {:ok, 2}
def kind_to_code(:auth, _), do: {:ok, 3}
def kind_to_code(_, _), do: {:error, @bad_packet_kind_error}
defp check_packet(packet) do
cond do
body_len(packet) > @max_body_len ->
{:error, @packet_body_len_error}
true ->
{:ok, packet}
end
end
end
| 26.050228 | 80 | 0.632077 |
9e7886b787d15faeb46d4fed584679443444aac6 | 1,114 | ex | Elixir | lib/ex_doc/markdown.ex | mischov/meeseeks | 74f84010252da3298f8c74e90fdee1ab9ad6d700 | [
"Apache-2.0",
"MIT"
] | 291 | 2017-03-27T15:53:36.000Z | 2022-03-14T23:01:42.000Z | lib/ex_doc/markdown.ex | mischov/meeseeks | 74f84010252da3298f8c74e90fdee1ab9ad6d700 | [
"Apache-2.0",
"MIT"
] | 70 | 2017-03-30T23:32:34.000Z | 2021-06-27T06:26:28.000Z | lib/ex_doc/markdown.ex | mischov/meeseeks | 74f84010252da3298f8c74e90fdee1ab9ad6d700 | [
"Apache-2.0",
"MIT"
] | 23 | 2017-06-18T10:29:04.000Z | 2021-11-04T13:08:12.000Z | # Inspired by the similar solution in Pow (https://github.com/danschultzer/pow)
if Code.ensure_loaded?(ExDoc.Markdown.Earmark) do
# Due to how relative links works in ExDoc, it's necessary for us to use a
# custom markdown parser to ensure that paths will work in generated docs.
#
# Ref: https://github.com/elixir-lang/ex_doc/issues/889
defmodule ExDoc.Markdown.Meeseeks do
@moduledoc false
alias ExDoc.Markdown.Earmark
@behaviour ExDoc.Markdown
def to_ast(text, opts) do
text
|> rewrite_urls()
|> Earmark.to_ast(opts)
end
@markdown_link_regex ~r/(\[[\S ]*\]\()([\S]*?)(\.md)([\S]*?\))/
defp rewrite_urls(text) do
Regex.replace(@markdown_link_regex, text, &rewrite_url/5)
end
# Links to guides in README
defp rewrite_url(_, first, "guides/" <> guide, ".md", last) do
first <> "#{guide}.html" <> last
end
# Links to CONTRIBUTING.md in README
defp rewrite_url(_, first, "CONTRIBUTING", ".md", last) do
first <> "contributing.html" <> last
end
defp rewrite_url(other, _, _, _, _), do: other
end
end
| 27.85 | 79 | 0.649013 |
9e788b1b63d20b51d0a7ac9444fd641ef0cbe611 | 20,082 | ex | Elixir | lib/ecto.ex | jccf091/ecto | 42d47a6da0711f842e1a0e6724a89b318b9b2144 | [
"Apache-2.0"
] | null | null | null | lib/ecto.ex | jccf091/ecto | 42d47a6da0711f842e1a0e6724a89b318b9b2144 | [
"Apache-2.0"
] | null | null | null | lib/ecto.ex | jccf091/ecto | 42d47a6da0711f842e1a0e6724a89b318b9b2144 | [
"Apache-2.0"
] | null | null | null | defmodule Ecto do
@moduledoc ~S"""
Ecto is split into 4 main components:
* `Ecto.Repo` - repositories are wrappers around the data store.
Via the repository, we can create, update, destroy and query existing entries.
A repository needs an adapter and credentials to communicate to the database
* `Ecto.Schema` - schemas are used to map any data source into an Elixir
struct. We will often use them to map tables into Elixir data but that's
one of their use cases and not a requirement for using Ecto
* `Ecto.Changeset` - changesets provide a way for developers to filter
and cast external parameters, as well as a mechanism to track and
validate changes before they are applied to your data
* `Ecto.Query` - written in Elixir syntax, queries are used to retrieve
information from a given repository. Queries in Ecto are secure, avoiding
common problems like SQL Injection, while still being composable, allowing
developers to build queries piece by piece instead of all at once
In the following sections, we will provide an overview of those components and
how they interact with each other. Feel free to access their respective module
documentation for more specific examples, options and configuration.
If you want to quickly check a sample application using Ecto, please check
the [getting started guide](http://hexdocs.pm/ecto/getting-started.html) and
the accompanying sample application.
After exploring the documentation and guides, consider checking out the
["What's new in Ecto 2.1"](http://pages.plataformatec.com.br/ebook-whats-new-in-ecto-2-0)
free ebook to learn more about many features in Ecto 2.1 such as `many_to_many`,
schemaless queries, concurrent testing and more. Note the book still largely applies
to Ecto 3.0 as the major change in Ecto 3.0 was the removal of the outdated
Ecto datetime types in favor of Elixir's Calendar types.
## Repositories
`Ecto.Repo` is a wrapper around the database. We can define a
repository as follows:
defmodule Repo do
use Ecto.Repo, otp_app: :my_app
end
Where the configuration for the Repo must be in your application
environment, usually defined in your `config/config.exs`:
config :my_app, Repo,
adapter: Ecto.Adapters.Postgres,
database: "ecto_simple",
username: "postgres",
password: "postgres",
hostname: "localhost",
# OR use a URL to connect instead
url: "postgres://postgres:postgres@localhost/ecto_simple"
Each repository in Ecto defines a `start_link/0` function that needs to be invoked
before using the repository. In general, this function is not called directly,
but used as part of your application supervision tree.
If your application was generated with a supervisor (by passing `--sup` to `mix new`)
you will have a `lib/my_app/application.ex` file (or `lib/my_app.ex` for Elixir versions `< 1.4.0`)
containing the application start callback that defines and starts your supervisor.
You just need to edit the `start/2` function to start the repo as a supervisor on
your application's supervisor:
def start(_type, _args) do
import Supervisor.Spec
children = [
supervisor(Repo, [])
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end
## Schema
Schemas allows developers to define the shape of their data.
Let's see an example:
defmodule Weather do
use Ecto.Schema
# weather is the DB table
schema "weather" do
field :city, :string
field :temp_lo, :integer
field :temp_hi, :integer
field :prcp, :float, default: 0.0
end
end
By defining a schema, Ecto automatically defines a struct with
the schema fields:
iex> weather = %Weather{temp_lo: 30}
iex> weather.temp_lo
30
The schema also allows us to interact with a repository:
iex> weather = %Weather{temp_lo: 0, temp_hi: 23}
iex> Repo.insert!(weather)
%Weather{...}
After persisting `weather` to the database, it will return a new copy of
`%Weather{}` with the primary key (the `id`) set. We can use this value
to read a struct back from the repository:
# Get the struct back
iex> weather = Repo.get Weather, 1
%Weather{id: 1, ...}
# Delete it
iex> Repo.delete!(weather)
%Weather{...}
> NOTE: by using `Ecto.Schema`, an `:id` field with type `:id` (:id means :integer) is
> generated by default, which is the primary key of the Schema. If you want
> to use a different primary key, you can declare custom `@primary_key`
> before the `schema/2` call. Consult the `Ecto.Schema` documentation
> for more information.
Notice how the storage (repository) and the data are decoupled. This provides
two main benefits:
* By having structs as data, we guarantee they are light-weight,
serializable structures. In many languages, the data is often represented
by large, complex objects, with entwined state transactions, which makes
serialization, maintenance and understanding hard;
* You do not need to define schemas in order to interact with repositories,
operations like `all`, `insert_all` and so on allow developers to directly
access and modify the data, keeping the database at your fingertips when
necessary;
## Changesets
Although in the example above we have directly inserted and deleted the
struct in the repository, operations on top of schemas are done through
changesets so Ecto can efficiently track changes.
Changesets allow developers to filter, cast, and validate changes before
we apply them to the data. Imagine the given schema:
defmodule User do
use Ecto.Schema
import Ecto.Changeset
schema "users" do
field :name
field :email
field :age, :integer
end
def changeset(user, params \\ %{}) do
user
|> cast(params, [:name, :email, :age])
|> validate_required([:name, :email])
|> validate_format(:email, ~r/@/)
|> validate_inclusion(:age, 18..100)
end
end
The `changeset/2` function first invokes `Ecto.Changeset.cast/4` with
the struct, the parameters and a list of allowed fields; this returns a changeset.
The parameters is a map with binary keys and values that will be cast based
on the type defined on the schema.
Any parameter that was not explicitly listed in the fields list will be ignored.
After casting, the changeset is given to many `Ecto.Changeset.validate_*`
functions that validate only the **changed fields**. In other words:
if a field was not given as a parameter, it won't be validated at all.
For example, if the params map contain only the "name" and "email" keys,
the "age" validation won't run.
Once a changeset is built, it can be given to functions like `insert` and
`update` in the repository that will return an `:ok` or `:error` tuple:
case Repo.update(changeset) do
{:ok, user} ->
# user updated
{:error, changeset} ->
# an error occurred
end
The benefit of having explicit changesets is that we can easily provide
different changesets for different use cases. For example, one
could easily provide specific changesets for registering and updating
users:
def registration_changeset(user, params) do
# Changeset on create
end
def update_changeset(user, params) do
# Changeset on update
end
Changesets are also capable of transforming database constraints,
like unique indexes and foreign key checks, into errors. Allowing
developers to keep their database consistent while still providing
proper feedback to end users. Check `Ecto.Changeset.unique_constraint/3`
for some examples as well as the other `_constraint` functions.
## Query
Last but not least, Ecto allows you to write queries in Elixir and send
them to the repository, which translates them to the underlying database.
Let's see an example:
import Ecto.Query, only: [from: 2]
query = from u in User,
where: u.age > 18 or is_nil(u.email),
select: u
# Returns %User{} structs matching the query
Repo.all(query)
In the example above we relied on our schema but queries can also be
made directly against a table by giving the table name as a string. In
such cases, the data to be fetched must be explicitly outlined:
query = from u in "users",
where: u.age > 18 or is_nil(u.email),
select: %{name: u.name, age: u.age}
# Returns maps as defined in select
Repo.all(query)
Queries are defined and extended with the `from` macro. The supported
keywords are:
* `:distinct`
* `:where`
* `:order_by`
* `:offset`
* `:limit`
* `:lock`
* `:group_by`
* `:having`
* `:join`
* `:select`
* `:preload`
Examples and detailed documentation for each of those are available
in the `Ecto.Query` module. Functions supported in queries are listed
in `Ecto.Query.API`.
When writing a query, you are inside Ecto's query syntax. In order to
access params values or invoke Elixir functions, you need to use the `^`
operator, which is overloaded by Ecto:
def min_age(min) do
from u in User, where: u.age > ^min
end
Besides `Repo.all/1` which returns all entries, repositories also
provide `Repo.one/1` which returns one entry or nil, `Repo.one!/1`
which returns one entry or raises, `Repo.get/2` which fetches
entries for a particular ID and more.
Finally, if you need an escape hatch, Ecto provides fragments
(see `Ecto.Query.API.fragment/1`) to inject SQL (and non-SQL)
fragments into queries. Also, most adapters provide direct
APIs for queries, like `Ecto.Adapters.SQL.query/4`, allowing
developers to completely bypass Ecto queries.
## Other topics
### Associations
Ecto supports defining associations on schemas:
defmodule Post do
use Ecto.Schema
schema "posts" do
has_many :comments, Comment
end
end
defmodule Comment do
use Ecto.Schema
schema "comments" do
field :title, :string
belongs_to :post, Post
end
end
When an association is defined, Ecto also defines a field in the schema
with the association name. By default, associations are not loaded into
this field:
iex> post = Repo.get(Post, 42)
iex> post.comments
#Ecto.Association.NotLoaded<...>
However, developers can use the preload functionality in queries to
automatically pre-populate the field:
Repo.all from p in Post, preload: [:comments]
Preloading can also be done with a pre-defined join value:
Repo.all from p in Post,
join: c in assoc(p, :comments),
where: c.votes > p.votes,
preload: [comments: c]
Finally, for the simple cases, preloading can also be done after
a collection was fetched:
posts = Repo.all(Post) |> Repo.preload(:comments)
The `Ecto` module also provides conveniences for working
with associations. For example, `Ecto.assoc/2` returns a query
with all associated data to a given struct:
import Ecto
# Get all comments for the given post
Repo.all assoc(post, :comments)
# Or build a query on top of the associated comments
query = from c in assoc(post, :comments), where: not is_nil(c.title)
Repo.all(query)
Another function in `Ecto` is `build_assoc/3`, which allows
someone to build an associated struct with the proper fields:
Repo.transaction fn ->
post = Repo.insert!(%Post{title: "Hello", body: "world"})
# Build a comment from post
comment = Ecto.build_assoc(post, :comments, body: "Excellent!")
Repo.insert!(comment)
end
In the example above, `Ecto.build_assoc/3` is equivalent to:
%Comment{post_id: post.id, body: "Excellent!"}
You can find more information about defining associations and each
respective association module in `Ecto.Schema` docs.
> NOTE: Ecto does not lazy load associations. While lazily loading
> associations may sound convenient at first, in the long run it
> becomes a source of confusion and performance issues.
### Embeds
Ecto also supports embeds. While associations keep parent and child
entries in different tables, embeds stores the child along side the
parent.
Databases like MongoDB have native support for embeds. Databases
like PostgreSQL uses a mixture of JSONB (`embeds_one/3`) and ARRAY
columns to provide this functionality.
Check `Ecto.Schema.embeds_one/3` and `Ecto.Schema.embeds_many/3`
for more information.
### Mix tasks and generators
Ecto provides many tasks to help your workflow as well as code generators.
You can find all available tasks by typing `mix help` inside a project
with Ecto listed as a dependency.
Ecto generators will automatically open the generated files if you have
`ECTO_EDITOR` set in your environment variable.
#### Migrations
Ecto supports database migrations. You can generate a migration with:
$ mix ecto.gen.migration create_posts
This will create a new file inside `priv/repo/migrations` with the `change`
function. Check `Ecto.Migration` for more information.
#### Repo resolution
Ecto requires developers to specify the key `:ecto_repos` in their application
configuration before using tasks like `ecto.create` and `ecto.migrate`. For example:
config :my_app, :ecto_repos, [MyApp.Repo]
config :my_app, MyApp.Repo,
adapter: Ecto.Adapters.Postgres,
database: "ecto_simple",
username: "postgres",
password: "postgres",
hostname: "localhost"
"""
@doc """
Returns the schema primary keys as a keyword list.
"""
@spec primary_key(Ecto.Schema.t) :: Keyword.t
def primary_key(%{__struct__: schema} = struct) do
Enum.map schema.__schema__(:primary_key), fn(field) ->
{field, Map.fetch!(struct, field)}
end
end
@doc """
Returns the schema primary keys as a keyword list.
Raises `Ecto.NoPrimaryKeyFieldError` if the schema has no
primary key field.
"""
@spec primary_key!(Ecto.Schema.t) :: Keyword.t | no_return
def primary_key!(%{__struct__: schema} = struct) do
case primary_key(struct) do
[] -> raise Ecto.NoPrimaryKeyFieldError, schema: schema
pk -> pk
end
end
@doc """
Builds a struct from the given `assoc` in `struct`.
## Examples
If the relationship is a `has_one` or `has_many` and
the key is set in the given struct, the key will automatically
be set in the built association:
iex> post = Repo.get(Post, 13)
%Post{id: 13}
iex> build_assoc(post, :comments)
%Comment{id: nil, post_id: 13}
Note though it doesn't happen with `belongs_to` cases, as the
key is often the primary key and such is usually generated
dynamically:
iex> comment = Repo.get(Comment, 13)
%Comment{id: 13, post_id: 25}
iex> build_assoc(comment, :post)
%Post{id: nil}
You can also pass the attributes, which can be a map or
a keyword list, to set the struct's fields except the
association key.
iex> build_assoc(post, :comments, text: "cool")
%Comment{id: nil, post_id: 13, text: "cool"}
iex> build_assoc(post, :comments, %{text: "cool"})
%Comment{id: nil, post_id: 13, text: "cool"}
iex> build_assoc(post, :comments, post_id: 1)
%Comment{id: nil, post_id: 13}
"""
def build_assoc(%{__struct__: schema} = struct, assoc, attributes \\ %{}) do
assoc = Ecto.Association.association_from_schema!(schema, assoc)
assoc.__struct__.build(assoc, struct, drop_meta(attributes))
end
defp drop_meta(%{} = attrs), do: Map.drop(attrs, [:__struct__, :__meta__])
defp drop_meta([_|_] = attrs), do: Keyword.drop(attrs, [:__struct__, :__meta__])
@doc """
Builds a query for the association in the given struct or structs.
## Examples
In the example below, we get all comments associated to the given
post:
post = Repo.get Post, 1
Repo.all Ecto.assoc(post, :comments)
`assoc/2` can also receive a list of posts, as long as the posts are
not empty:
posts = Repo.all from p in Post, where: is_nil(p.published_at)
Repo.all Ecto.assoc(posts, :comments)
This function can also be used to dynamically load through associations
by giving it a list. For example, to get all authors for all comments for
the given posts, do:
posts = Repo.all from p in Post, where: is_nil(p.published_at)
Repo.all Ecto.assoc(posts, [:comments, :author])
"""
def assoc(struct_or_structs, assocs) do
[assoc | assocs] = List.wrap(assocs)
structs = List.wrap(struct_or_structs)
if structs == [] do
raise ArgumentError, "cannot retrieve association #{inspect assoc} for empty list"
end
schema = hd(structs).__struct__
assoc = %{owner_key: owner_key} =
Ecto.Association.association_from_schema!(schema, assoc)
values =
Enum.uniq for(struct <- structs,
assert_struct!(schema, struct),
key = Map.fetch!(struct, owner_key),
do: key)
Ecto.Association.assoc_query(assoc, assocs, nil, values)
end
@doc """
Checks if an association is loaded.
## Examples
iex> post = Repo.get(Post, 1)
iex> Ecto.assoc_loaded?(post.comments)
false
iex> post = post |> Repo.preload(:comments)
iex> Ecto.assoc_loaded?(post.comments)
true
"""
def assoc_loaded?(association) do
case association do
%Ecto.Association.NotLoaded{} -> false
_ -> true
end
end
@doc """
Gets the metadata from the given struct.
"""
def get_meta(struct, :context),
do: struct.__meta__.context
def get_meta(struct, :state),
do: struct.__meta__.state
def get_meta(struct, :source),
do: struct.__meta__.source |> elem(1)
def get_meta(struct, :prefix),
do: struct.__meta__.source |> elem(0)
@doc """
Returns a new struct with updated metadata.
It is possible to set:
* `:source` - changes the struct query source
* `:prefix` - changes the struct query prefix
* `:context` - changes the struct meta context
* `:state` - changes the struct state
Please refer to the `Ecto.Schema.Metadata` module for more information.
"""
@spec put_meta(Ecto.Schema.schema, meta) :: Ecto.Schema.schema
when meta: [source: Ecto.Schema.source, prefix: Ecto.Schema.prefix,
context: Ecto.Schema.Metadata.context, state: Ecto.Schema.Metadata.state]
def put_meta(struct, opts) do
update_in struct.__meta__, &update_meta(opts, &1)
end
defp update_meta([{:state, state}|t], meta) do
if state in [:built, :loaded, :deleted] do
update_meta t, %{meta | state: state}
else
raise ArgumentError, "invalid state #{inspect state}"
end
end
defp update_meta([{:source, source} | t], %{source: {prefix, _}} = meta) do
update_meta t, %{meta | source: {prefix, source}}
end
defp update_meta([{:prefix, prefix} | t], %{source: {_, source}} = meta) do
update_meta t, %{meta | source: {prefix, source}}
end
defp update_meta([{:context, context} | t], meta) do
update_meta t, %{meta | context: context}
end
defp update_meta([], meta) do
meta
end
defp update_meta([{k, _}], _meta) do
raise ArgumentError, "unknown meta key #{inspect k}"
end
defp assert_struct!(module, %{__struct__: struct}) do
if struct != module do
raise ArgumentError, "expected a homogeneous list containing the same struct, " <>
"got: #{inspect module} and #{inspect struct}"
else
true
end
end
end
| 33.029605 | 101 | 0.679863 |
9e78b8ff576a5bb720bebdc117e586e6fa2209f6 | 1,058 | ex | Elixir | lib/excoap/server.ex | mbialon/excoap | 929c3c799d6fd664acbc0caf0022a62f0d702df1 | [
"MIT"
] | 3 | 2015-09-26T06:57:56.000Z | 2016-12-14T17:08:44.000Z | lib/excoap/server.ex | mbialon/excoap | 929c3c799d6fd664acbc0caf0022a62f0d702df1 | [
"MIT"
] | 1 | 2018-12-17T19:54:52.000Z | 2018-12-17T19:54:52.000Z | lib/excoap/server.ex | mbialon/excoap | 929c3c799d6fd664acbc0caf0022a62f0d702df1 | [
"MIT"
] | null | null | null | defmodule Excoap.Server do
def start(port \\ 0, handler) do
pid = spawn_link(fn ->
open!(port) |> handle(handler)
end)
{:ok, pid}
end
def start!(port \\ 0, handler) do
{:ok, pid} = start(port, handler)
pid
end
defp open!(port) do
{:ok, socket} = :gen_udp.open(port, [:binary])
socket
end
defp handle(socket, handler) do
receive do
{:udp, socket, ip_addr, port, packet} ->
msg = Excoap.Message.decode(packet)
send(handler, {self(), ip_addr, port, msg})
handle(socket, handler)
{:excoap, ip_addr, port, msg} ->
packet = Excoap.Message.encode(msg)
:ok = :gen_udp.send(socket, ip_addr, port, packet)
handle(socket, handler)
end
end
def register(pid, ip_addr, port, id) do
ep = :io_lib.format("ep=~32.16.0B", [id]) |> List.to_string
msg = %Excoap.Message{
type: :con,
code: :post,
options: [
{:uri_path, "rd"},
{:uri_query, ep}
]
}
send(pid, {:excoap, ip_addr, port, msg})
end
end
| 23.511111 | 63 | 0.563327 |
9e78c070dea34fe5fe157d60c5b70ee40cd47157 | 861 | ex | Elixir | lib/proxir.ex | troelsim/proxir | fc897bc4ce551a92cba68caedf8f7debe1e90df9 | [
"Apache-2.0"
] | null | null | null | lib/proxir.ex | troelsim/proxir | fc897bc4ce551a92cba68caedf8f7debe1e90df9 | [
"Apache-2.0"
] | null | null | null | lib/proxir.ex | troelsim/proxir | fc897bc4ce551a92cba68caedf8f7debe1e90df9 | [
"Apache-2.0"
] | null | null | null | defmodule Proxir do
@moduledoc """
Documentation for Proxir.
"""
@doc """
Hello world.
## Examples
iex> Proxir.hello
:world
"""
def main(args) do
parse_args(args)
end
def parse_args([port, host, remote_port]) when
is_integer(port) and
is_binary(host) and
is_integer(remote_port)
do
Proxir.Application.start(:normal, [port: port, host: host, remote_port: remote_port])
# Wait indefinitely, otherwise process will quit immediately
receive do
_ -> nil
end
end
def parse_args([port, host, remote_port]) do
try do
parse_args([String.to_integer(port), host, String.to_integer(remote_port)])
rescue
ArgumentError -> parse_args(nil)
end
end
def parse_args(_) do
IO.puts("""
Usage:
proxir <port> <remote_host> <remote_port>
""")
end
end
| 19.568182 | 89 | 0.637631 |
9e790859c1eefe4e1df92a944c27aff8058bf8df | 345 | exs | Elixir | priv/repo/seeds.exs | kawakami-o3/epcc | e0a102b5471e133580c92feb91ec9597f1cf7021 | [
"MIT"
] | null | null | null | priv/repo/seeds.exs | kawakami-o3/epcc | e0a102b5471e133580c92feb91ec9597f1cf7021 | [
"MIT"
] | 3 | 2020-07-17T03:56:23.000Z | 2021-05-09T00:04:45.000Z | priv/repo/seeds.exs | kawakami-o3/epcc | e0a102b5471e133580c92feb91ec9597f1cf7021 | [
"MIT"
] | null | null | null | # Script for populating the database. You can run it as:
#
# mix run priv/repo/seeds.exs
#
# Inside the script, you can read and write to any of your
# repositories directly:
#
# Epcc.Repo.insert!(%Epcc.SomeSchema{})
#
# We recommend using the bang functions (`insert!`, `update!`
# and so on) as they will fail if something goes wrong.
| 28.75 | 61 | 0.701449 |
9e7917ed6bb7bf4bbc4a7288d8e3f3d9c40d0c5e | 2,966 | ex | Elixir | clients/firestore/lib/google_api/firestore/v1beta1/model/listen_response.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2021-12-20T03:40:53.000Z | 2021-12-20T03:40:53.000Z | clients/firestore/lib/google_api/firestore/v1beta1/model/listen_response.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:11:23.000Z | 2020-08-18T00:44:16.000Z | clients/firestore/lib/google_api/firestore/v1beta1/model/listen_response.ex | pojiro/elixir-google-api | 928496a017d3875a1929c6809d9221d79404b910 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.Firestore.V1beta1.Model.ListenResponse do
@moduledoc """
The response for Firestore.Listen.
## Attributes
* `documentChange` (*type:* `GoogleApi.Firestore.V1beta1.Model.DocumentChange.t`, *default:* `nil`) - A Document has changed.
* `documentDelete` (*type:* `GoogleApi.Firestore.V1beta1.Model.DocumentDelete.t`, *default:* `nil`) - A Document has been deleted.
* `documentRemove` (*type:* `GoogleApi.Firestore.V1beta1.Model.DocumentRemove.t`, *default:* `nil`) - A Document has been removed from a target (because it is no longer relevant to that target).
* `filter` (*type:* `GoogleApi.Firestore.V1beta1.Model.ExistenceFilter.t`, *default:* `nil`) - A filter to apply to the set of documents previously returned for the given target. Returned when documents may have been removed from the given target, but the exact documents are unknown.
* `targetChange` (*type:* `GoogleApi.Firestore.V1beta1.Model.TargetChange.t`, *default:* `nil`) - Targets have changed.
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:documentChange => GoogleApi.Firestore.V1beta1.Model.DocumentChange.t() | nil,
:documentDelete => GoogleApi.Firestore.V1beta1.Model.DocumentDelete.t() | nil,
:documentRemove => GoogleApi.Firestore.V1beta1.Model.DocumentRemove.t() | nil,
:filter => GoogleApi.Firestore.V1beta1.Model.ExistenceFilter.t() | nil,
:targetChange => GoogleApi.Firestore.V1beta1.Model.TargetChange.t() | nil
}
field(:documentChange, as: GoogleApi.Firestore.V1beta1.Model.DocumentChange)
field(:documentDelete, as: GoogleApi.Firestore.V1beta1.Model.DocumentDelete)
field(:documentRemove, as: GoogleApi.Firestore.V1beta1.Model.DocumentRemove)
field(:filter, as: GoogleApi.Firestore.V1beta1.Model.ExistenceFilter)
field(:targetChange, as: GoogleApi.Firestore.V1beta1.Model.TargetChange)
end
defimpl Poison.Decoder, for: GoogleApi.Firestore.V1beta1.Model.ListenResponse do
def decode(value, options) do
GoogleApi.Firestore.V1beta1.Model.ListenResponse.decode(value, options)
end
end
defimpl Poison.Encoder, for: GoogleApi.Firestore.V1beta1.Model.ListenResponse do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
| 50.271186 | 288 | 0.747808 |
9e793330ef5f357887a3eb0d11940d7b7b38efab | 7,043 | exs | Elixir | test/rover_test.exs | nicolafiorillo/mars_rover | 03f74c3fb6f867e78c9e51fad73ddca99d004231 | [
"MIT"
] | null | null | null | test/rover_test.exs | nicolafiorillo/mars_rover | 03f74c3fb6f867e78c9e51fad73ddca99d004231 | [
"MIT"
] | null | null | null | test/rover_test.exs | nicolafiorillo/mars_rover | 03f74c3fb6f867e78c9e51fad73ddca99d004231 | [
"MIT"
] | null | null | null | defmodule RoverTest do
use ExUnit.Case
test "create a rover and send to planet" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, _rover} = MarsRover.Rover.start_link(mars, 1, 1)
end
test "send a rover to an obstacle" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, {1, 1}} = MarsRover.Planet.add_obstacle(mars, 1, 1)
{:error, :obstacle} = MarsRover.Rover.start_link(mars, 1, 1)
end
test "send a rover to an obstacle crossing x edge" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, {1, 1}} = MarsRover.Planet.add_obstacle(mars, 1, 1)
{:error, :obstacle} = MarsRover.Rover.start_link(mars, 11, 1)
end
test "send a rover to an obstacle crossing y edge" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, {1, 1}} = MarsRover.Planet.add_obstacle(mars, 1, 1)
{:error, :obstacle} = MarsRover.Rover.start_link(mars, 1, 11)
end
test "create a rover, send to planet whithout move it" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1)
{:ok, {1, 1}} = MarsRover.Rover.commands(rover, "")
end
test "create a rover, send to planet whithout obstacles and move it" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :s)
{:ok, {1, 10}} = MarsRover.Rover.commands(rover, "fffffffff")
end
test "create a rover, send to planet whithout obstacles, move it and turn" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :s)
{:ok, {2, 4}} = MarsRover.Rover.commands(rover, "ffflf")
end
test "create a rover, send to planet whithout obstacles, move it to opposite corner from {1, 1}" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :s)
{:ok, {10, 10}} = MarsRover.Rover.commands(rover, "ffffffffflfffffffff")
end
test "create a rover, send to planet whithout obstacles, move it to opposite corner from {10, 10}" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 10, 10, :n)
{:ok, {1, 1}} = MarsRover.Rover.commands(rover, "ffffffffflfffffffff")
end
test "rover goes along the edges forward" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :e)
{:ok, {1, 1}} = MarsRover.Rover.commands(rover, "fffffffffrfffffffffrfffffffffrfffffffff")
end
test "rover goes along the edges backward" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :w)
{:ok, {1, 1}} = MarsRover.Rover.commands(rover, "bbbbbbbbblbbbbbbbbblbbbbbbbbblbbbbbbbbb")
end
test "rover goes over north edge forward" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :n)
{:ok, {1, 10}} = MarsRover.Rover.commands(rover, "f")
end
test "rover goes over south edge forward" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :s)
{:ok, {1, 1}} = MarsRover.Rover.commands(rover, "ffffffffff")
end
test "rover goes over est edge forward" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :e)
{:ok, {1, 1}} = MarsRover.Rover.commands(rover, "ffffffffff")
end
test "rover goes over west edge forward" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :w)
{:ok, {10, 1}} = MarsRover.Rover.commands(rover, "f")
end
test "rover goes over north edge backward" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :s)
{:ok, {1, 10}} = MarsRover.Rover.commands(rover, "b")
end
test "rover goes over south edge backward" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :n)
{:ok, {1, 1}} = MarsRover.Rover.commands(rover, "bbbbbbbbbb")
end
test "rover goes over est edge backward" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :w)
{:ok, {1, 1}} = MarsRover.Rover.commands(rover, "bbbbbbbbbb")
end
test "rover goes over west edge backward" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :e)
{:ok, {10, 1}} = MarsRover.Rover.commands(rover, "b")
end
test "rover goes near obstacle" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, {3, 1}} = MarsRover.Planet.add_obstacle(mars, 3, 1)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :e)
{:ok, {2, 1}} = MarsRover.Rover.commands(rover, "f")
end
test "rover hits obstacle" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, {3, 1}} = MarsRover.Planet.add_obstacle(mars, 3, 1)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :e)
{:error, {:obstacle, {3, 1}}} = MarsRover.Rover.commands(rover, "ff")
assert_last_good_position(rover, 2, 1)
end
test "rover hits obstacle across edge" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, {10, 1}} = MarsRover.Planet.add_obstacle(mars, 10, 1)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :w)
{:error, {:obstacle, {10, 1}}} = MarsRover.Rover.commands(rover, "f")
assert_last_good_position(rover, 1, 1)
end
test "rover hits obstacle at the opposite corner" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, {10, 10}} = MarsRover.Planet.add_obstacle(mars, 10, 10)
{:ok, rover} = MarsRover.Rover.start_link(mars, 1, 1, :s)
{:error, {:obstacle, {10, 10}}} = MarsRover.Rover.commands(rover, "flfrflfrflfrflfrflfrflfrflfrflfrflf")
assert_last_good_position(rover, 9, 10)
end
test "rover in a cage!" do
{:ok, mars} = MarsRover.Planet.start_link(10, 10)
{:ok, {5, 4}} = MarsRover.Planet.add_obstacle(mars, 5, 4)
{:ok, {5, 6}} = MarsRover.Planet.add_obstacle(mars, 5, 6)
{:ok, {4, 5}} = MarsRover.Planet.add_obstacle(mars, 4, 5)
{:ok, {6, 5}} = MarsRover.Planet.add_obstacle(mars, 6, 5)
{:ok, rover} = MarsRover.Rover.start_link(mars, 5, 5, :n)
{:error, {:obstacle, {5, 4}}} = MarsRover.Rover.commands(rover, "f")
assert_last_good_position(rover, 5, 5)
{:error, {:obstacle, {6, 5}}} = MarsRover.Rover.commands(rover, "rf")
assert_last_good_position(rover, 5, 5)
{:error, {:obstacle, {5, 6}}} = MarsRover.Rover.commands(rover, "rf")
assert_last_good_position(rover, 5, 5)
{:error, {:obstacle, {4, 5}}} = MarsRover.Rover.commands(rover, "rf")
assert_last_good_position(rover, 5, 5)
end
defp assert_last_good_position(rover, good_x, good_y), do: assert MarsRover.Rover.position(rover) == {good_x, good_y}
end
| 38.697802 | 119 | 0.649723 |
9e7940aa2b891957ae8597449b532c265aff21af | 1,738 | ex | Elixir | lib/auto_api/capabilities/vehicle_location_capability.ex | nonninz/auto-api-elixir | 53e11542043285e94bbb5a0a3b8ffff0b1b47167 | [
"MIT"
] | null | null | null | lib/auto_api/capabilities/vehicle_location_capability.ex | nonninz/auto-api-elixir | 53e11542043285e94bbb5a0a3b8ffff0b1b47167 | [
"MIT"
] | null | null | null | lib/auto_api/capabilities/vehicle_location_capability.ex | nonninz/auto-api-elixir | 53e11542043285e94bbb5a0a3b8ffff0b1b47167 | [
"MIT"
] | null | null | null | # AutoAPI
# The MIT License
#
# Copyright (c) 2018- High-Mobility GmbH (https://high-mobility.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
defmodule AutoApi.VehicleLocationCapability do
@moduledoc """
Basic settings for Vehicle Location Capability
iex> alias AutoApi.VehicleLocationCapability, as: VL
iex> VL.identifier
<<0x00, 0x30>>
iex> VL.name
:vehicle_location
iex> VL.description
"Vehicle Location"
iex> length(VL.properties)
9
iex> List.first(VL.properties)
{4, :coordinates}
"""
@command_module AutoApi.VehicleLocationCommand
@state_module AutoApi.VehicleLocationState
use AutoApi.Capability, spec_file: "vehicle_location.json"
end
| 38.622222 | 79 | 0.750288 |
9e79452b3106fa30edbbe688e5d26bc5f283e7fb | 192 | exs | Elixir | test/bio/position/generator_test.exs | xire28/bio | c3266b551f8ac855653d6de7f13fd566f2daa549 | [
"MIT"
] | null | null | null | test/bio/position/generator_test.exs | xire28/bio | c3266b551f8ac855653d6de7f13fd566f2daa549 | [
"MIT"
] | null | null | null | test/bio/position/generator_test.exs | xire28/bio | c3266b551f8ac855653d6de7f13fd566f2daa549 | [
"MIT"
] | null | null | null | defmodule Bio.Position.GeneratorTest do
alias Bio.Position
use ExUnit.Case
doctest Position.Generator
test "create" do
assert %Position{} = Position.Generator.create()
end
end
| 17.454545 | 52 | 0.744792 |
9e795a58620ea6e3dfa64d29c6ffde85df1bce07 | 4,450 | ex | Elixir | lib/cp_mgmt/access_layer.ex | willfore/cp_mgmt | 7652c5102e0bf154293f97364609a4266bd70bd2 | [
"Apache-2.0"
] | null | null | null | lib/cp_mgmt/access_layer.ex | willfore/cp_mgmt | 7652c5102e0bf154293f97364609a4266bd70bd2 | [
"Apache-2.0"
] | null | null | null | lib/cp_mgmt/access_layer.ex | willfore/cp_mgmt | 7652c5102e0bf154293f97364609a4266bd70bd2 | [
"Apache-2.0"
] | null | null | null | defmodule CpMgmt.AccessLayer do
@moduledoc """
This module manages simple Access Layer functions with the Web API
"""
defstruct(status: nil, data: %{})
alias CpMgmt.AccessLayer
@doc """
Creates an Access Layer via the API
## Examples
iex> CpMgmt.AccessLayer.add("some-layer-name", ["add-default-rule": true])
{:ok,
%CpMgmt.AccessLayer{
data: %{
"applications-and-url-filtering" => false,
"color" => "black",
"comments" => "",
"content-awareness" => false,
"domain" => %{
"domain-type" => "domain",
"name" => "SMC User",
"uid" => "41e821a0-3720-11e3-aa6e-0800200c9fde"
},
"firewall" => true,
"icon" => "ApplicationFirewall/rulebase",
"implicit-cleanup-action" => "drop",
"meta-info" => %{
"creation-time" => %{
"iso-8601" => "2018-11-14T22:09-0600",
"posix" => 1542254971826
},
"creator" => "admin",
"last-modifier" => "admin",
"last-modify-time" => %{
"iso-8601" => "2018-11-14T22:09-0600",
"posix" => 1542254972362
},
"lock" => "locked by current session",
"validation-state" => "ok"
},
"mobile-access" => false,
"name" => "test",
"read-only" => false,
"shared" => false,
"tags" => [],
"type" => "access-layer",
"uid" => "978ff99e-23c0-4d1e-84d2-3ec2b7e283c2"
},
status: 200
}}
iex> CpMgmt.AccessLayer.add("some-layer-name")
{:error, %Cpmgmt.AccessLayer{status: 402, data: %{error_data}}}
"""
def add(name, options \\ []) do
params = Enum.into(options, %{name: name})
CpMgmt.logged_in?()
|> Tesla.post("/web_api/add-access-layer", params)
|> CpMgmt.transform_response()
|> CpMgmt.to_struct(%AccessLayer{})
|> CpMgmt.publish()
end
@doc """
Shows an Access layer
## Examples
iex> CpMgmt.AccessLayer.show("some-layer-name")
{:ok,
%CpMgmt.AccessLayer{
data: %{
"applications-and-url-filtering" => false,
"color" => "black",
"comments" => "",
"content-awareness" => false,
"domain" => %{
"domain-type" => "domain",
"name" => "SMC User",
"uid" => "41e821a0-3720-11e3-aa6e-0800200c9fde"
},
"firewall" => true,
"icon" => "ApplicationFirewall/rulebase",
"implicit-cleanup-action" => "drop",
"meta-info" => %{
"creation-time" => %{
"iso-8601" => "2018-11-14T22:09-0600",
"posix" => 1542254968431
},
"creator" => "admin",
"last-modifier" => "admin",
"last-modify-time" => %{
"iso-8601" => "2018-11-14T22:09-0600",
"posix" => 1542254969218
},
"lock" => "unlocked",
"validation-state" => "ok"
},
"mobile-access" => false,
"name" => "test",
"read-only" => false,
"shared" => false,
"tags" => [],
"type" => "access-layer",
"uid" => "ebfd1aa4-a60a-4937-b882-ba01d939d62c"
},
status: 200
}}
iex> CpMgmt.AccessLayer.show("some-layer-name")
{:error, %CpMgmt.AccessLayer{error, status}}
"""
def show(name) do
CpMgmt.logged_in?()
|> Tesla.post("/web_api/show-access-layer", %{name: name})
|> CpMgmt.transform_response()
|> CpMgmt.to_struct(%AccessLayer{})
end
@doc """
Removes an access layer
## Exmaples
iex> CpMgmt.AccessLayer.remove("some-layer-name")
{:ok, %CpMgmt.AccessLayer{data: %{"message" => "OK"}, status: 200}}
iex> CpMgmt.AccessLayer.remove("some-layer-name")
{:error, %CpMgmt.AccessLayer{error, status}}
"""
def remove(name) do
CpMgmt.logged_in?()
|> Tesla.post("/web_api/delete-access-layer", %{name: name})
|> CpMgmt.transform_response()
|> CpMgmt.to_struct(%AccessLayer{})
|> CpMgmt.publish()
end
@doc """
Shows all Access Layers with the same information as show.
"""
def show_all do
CpMgmt.logged_in?()
|> Tesla.post("/web_api/show-access-layers", %{})
|> CpMgmt.transform_response()
|> CpMgmt.to_struct(%AccessLayer{})
end
end
| 29.865772 | 80 | 0.510112 |
9e795cf5d4454e7f2b3bc5f943cc9879dff001be | 4,120 | exs | Elixir | test/client_test.exs | kanatohodets/zookeeper-elixir | 773f63583af4f3de923ddf6a07a686330ddd3a30 | [
"MIT"
] | 19 | 2015-10-08T07:23:32.000Z | 2021-06-25T09:30:28.000Z | test/client_test.exs | kanatohodets/zookeeper-elixir | 773f63583af4f3de923ddf6a07a686330ddd3a30 | [
"MIT"
] | 1 | 2016-02-18T22:39:01.000Z | 2016-02-18T22:39:01.000Z | test/client_test.exs | kanatohodets/zookeeper-elixir | 773f63583af4f3de923ddf6a07a686330ddd3a30 | [
"MIT"
] | 20 | 2015-08-13T00:04:08.000Z | 2021-05-27T23:08:13.000Z | defmodule Zookeeper.ClientTest do
use ExUnit.Case
alias Zookeeper.Client, as: ZK
alias Zookeeper.ZnodeStat
# TODO: test ACL
setup_all do
{:ok, pid} = ZK.start_link
pid |> cleanup
{:ok, pid: pid}
end
setup %{pid: pid}=context do
on_exit context, fn -> cleanup(pid) end
:ok
end
test "simple create", %{pid: pid} do
path = "/exunit"
assert {:ok, ^path} = ZK.create(pid, path)
assert {:ok, {"", _stat}} = ZK.get(pid, path)
end
test "create with data", %{pid: pid} do
path = "/exunit"
data = "data"
assert {:ok, path} == ZK.create(pid, path, data)
assert {:ok, {^data, _stat}} = ZK.get(pid, "/exunit")
end
test "create makepath", %{pid: pid} do
path = "/exunit/a/b/c"
data = "makepath"
assert {:error, :no_node} == ZK.create(pid, path)
assert {:ok, path} == ZK.create(pid, path, data, makepath: true)
assert {:ok, {^data, _stat}} = ZK.get(pid, path)
end
test "create ephemeral", %{pid: pid} do
path = "/exunit"
assert {:ok, path} == ZK.create(pid, path, "", create_mode: :ephemeral)
assert {:ok, {"", %ZnodeStat{owner_session_id: session_id}}} = ZK.get(pid, path)
assert session_id != 0
assert {:error, :no_children_for_ephemerals} == ZK.create(pid, "#{path}/a")
end
test "create sequential", %{pid: pid} do
assert {:ok, "/exunit/s" <> seq} = ZK.create(pid, "/exunit/s", "", create_mode: :persistent_sequential, makepath: true)
assert String.length(seq) > 0
end
test "get watch", %{pid: pid} do
path = "/exunit"
assert {:ok, path} == ZK.create(pid, path)
assert {:ok, {"", _stat}} = ZK.get(pid, path, self())
assert {:ok, _stat} = ZK.set(pid, path, "^.^")
assert_receive {ZK, ^path, :data}
end
test "exists", %{pid: pid} do
path = "/exunit"
assert {:error, :no_node} == ZK.exists(pid, path)
assert {:ok, path} == ZK.create(pid, path)
assert {:ok, %ZnodeStat{}} = ZK.exists(pid, path)
end
test "exists watch", %{pid: pid} do
path = "/exunit"
assert {:error, :no_node} == ZK.exists(pid, path, self())
assert {:ok, path} == ZK.create(pid, path)
assert_receive {ZK, ^path, :exists}
end
test "ensure path", %{pid: pid} do
path = "/exunit/a/b/c"
assert {:error, :no_node} == ZK.exists(pid, path)
assert :ok == ZK.ensure_path(pid, path)
assert {:ok, _stat} = ZK.exists(pid, path)
end
test "get children", %{pid: pid} do
path = "/exunit"
assert {:error, :no_node} = ZK.get_children(pid, path)
for i <- 0..5 do
assert {:ok, _path} = ZK.create(pid, "#{path}/#{i}", "", makepath: true)
end
assert {:ok, children} = ZK.get_children(pid, path)
assert Enum.map(0..5, &to_string/1) == Enum.sort(children)
end
test "get children watch", %{pid: pid} do
path = "/exunit"
assert {:ok, path} == ZK.create(pid, path)
assert {:ok, []} = ZK.get_children(pid, path, self())
assert {:ok, _path} = ZK.create(pid, "#{path}/a")
assert_receive {ZK, ^path, :children}
end
test "set", %{pid: pid} do
path = "/exunit"
assert {:error, :no_node} == ZK.set(pid, path, "")
assert {:ok, path} == ZK.create(pid, path)
assert {:ok, {"", %ZnodeStat{version: version}}} = ZK.get(pid, path)
assert {:ok, %ZnodeStat{version: version=1}} = ZK.set(pid, path, "a")
assert {:error, :bad_version} == ZK.set(pid, path, "b", 0)
assert {:ok, %ZnodeStat{version: 2}} = ZK.set(pid, path, "b", version)
end
test "test delete", %{pid: pid} do
path = "/exunit"
assert {:ok, path} == ZK.create(pid, path)
assert {:ok, _stat} = ZK.set(pid, path, "b")
assert {:error, :bad_version} == ZK.delete(pid, path, 0)
assert :ok == ZK.delete(pid, path, 1)
assert {:error, :no_node} == ZK.get(pid, path)
end
test "test recursive delete", %{pid: pid} do
path = "/exunit"
assert {:ok, _path} = ZK.create(pid, "#{path}/a/b/c", "", makepath: true)
assert {:error, :not_empty} == ZK.delete(pid, path)
assert :ok == ZK.delete(pid, path, -1, true)
end
defp cleanup(pid) do
pid |> ZK.delete("/exunit", -1, true)
end
end
| 31.450382 | 123 | 0.585194 |
9e797af1c5bc019ee5fe8cd27fe7287c6cf61ae7 | 8,837 | ex | Elixir | clients/display_video/lib/google_api/display_video/v1/api/partners.ex | EVLedger/elixir-google-api | 61edef19a5e2c7c63848f7030c6d8d651e4593d4 | [
"Apache-2.0"
] | null | null | null | clients/display_video/lib/google_api/display_video/v1/api/partners.ex | EVLedger/elixir-google-api | 61edef19a5e2c7c63848f7030c6d8d651e4593d4 | [
"Apache-2.0"
] | null | null | null | clients/display_video/lib/google_api/display_video/v1/api/partners.ex | EVLedger/elixir-google-api | 61edef19a5e2c7c63848f7030c6d8d651e4593d4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This file is auto generated by the elixir code generator program.
# Do not edit this file manually.
defmodule GoogleApi.DisplayVideo.V1.Api.Partners do
@moduledoc """
API calls for all endpoints tagged `Partners`.
"""
alias GoogleApi.DisplayVideo.V1.Connection
alias GoogleApi.Gax.{Request, Response}
@library_version Mix.Project.config() |> Keyword.get(:version, "")
@doc """
Gets a channel for a partner or advertiser.
## Parameters
* `connection` (*type:* `GoogleApi.DisplayVideo.V1.Connection.t`) - Connection to server
* `partner_id` (*type:* `String.t`) - The ID of the partner that owns the fetched channel.
* `channel_id` (*type:* `String.t`) - Required. The ID of the channel to fetch.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:advertiserId` (*type:* `String.t`) - The ID of the advertiser that owns the fetched channel.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.DisplayVideo.V1.Model.Channel{}}` on success
* `{:error, info}` on failure
"""
@spec displayvideo_partners_channels_get(
Tesla.Env.client(),
String.t(),
String.t(),
keyword(),
keyword()
) ::
{:ok, GoogleApi.DisplayVideo.V1.Model.Channel.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def displayvideo_partners_channels_get(
connection,
partner_id,
channel_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:advertiserId => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/v1/partners/{+partnerId}/channels/{+channelId}", %{
"partnerId" => URI.encode(partner_id, &URI.char_unreserved?/1),
"channelId" => URI.encode(channel_id, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.DisplayVideo.V1.Model.Channel{}])
end
@doc """
Lists channels for a partner or advertiser.
## Parameters
* `connection` (*type:* `GoogleApi.DisplayVideo.V1.Connection.t`) - Connection to server
* `partner_id` (*type:* `String.t`) - The ID of the partner that owns the channels.
* `optional_params` (*type:* `keyword()`) - Optional parameters
* `:"$.xgafv"` (*type:* `String.t`) - V1 error format.
* `:access_token` (*type:* `String.t`) - OAuth access token.
* `:alt` (*type:* `String.t`) - Data format for response.
* `:callback` (*type:* `String.t`) - JSONP
* `:fields` (*type:* `String.t`) - Selector specifying which fields to include in a partial response.
* `:key` (*type:* `String.t`) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
* `:oauth_token` (*type:* `String.t`) - OAuth 2.0 token for the current user.
* `:prettyPrint` (*type:* `boolean()`) - Returns response with indentations and line breaks.
* `:quotaUser` (*type:* `String.t`) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* `:uploadType` (*type:* `String.t`) - Legacy upload protocol for media (e.g. "media", "multipart").
* `:upload_protocol` (*type:* `String.t`) - Upload protocol for media (e.g. "raw", "multipart").
* `:advertiserId` (*type:* `String.t`) - The ID of the advertiser that owns the channels.
* `:filter` (*type:* `String.t`) - Allows filtering by channel fields.
Supported syntax:
* Filter expressions for channel currently can only contain at most one
* restriction.
* A restriction has the form of `{field} {operator} {value}`.
* The operator must be `CONTAINS (:)`.
* Supported fields:
- `displayName`
Examples:
* All channels for which the display name contains "google":
`displayName : "google"`.
The length of this field should be no more than 500 characters.
* `:orderBy` (*type:* `String.t`) - Field by which to sort the list.
Acceptable values are:
* `displayName` (default)
* `channelId`
The default sorting order is ascending. To specify descending order for a
field, a suffix " desc" should be added to the field name. Example:
`displayName desc`.
* `:pageSize` (*type:* `integer()`) - Requested page size. Must be between `1` and `100`. If unspecified will
default to `100`. Returns error code `INVALID_ARGUMENT` if an invalid value
is specified.
* `:pageToken` (*type:* `String.t`) - A token identifying a page of results the server should return.
Typically, this is the value of
next_page_token returned from the
previous call to `ListChannels` method. If not specified, the first page
of results will be returned.
* `opts` (*type:* `keyword()`) - Call options
## Returns
* `{:ok, %GoogleApi.DisplayVideo.V1.Model.ListChannelsResponse{}}` on success
* `{:error, info}` on failure
"""
@spec displayvideo_partners_channels_list(Tesla.Env.client(), String.t(), keyword(), keyword()) ::
{:ok, GoogleApi.DisplayVideo.V1.Model.ListChannelsResponse.t()}
| {:ok, Tesla.Env.t()}
| {:error, Tesla.Env.t()}
def displayvideo_partners_channels_list(
connection,
partner_id,
optional_params \\ [],
opts \\ []
) do
optional_params_config = %{
:"$.xgafv" => :query,
:access_token => :query,
:alt => :query,
:callback => :query,
:fields => :query,
:key => :query,
:oauth_token => :query,
:prettyPrint => :query,
:quotaUser => :query,
:uploadType => :query,
:upload_protocol => :query,
:advertiserId => :query,
:filter => :query,
:orderBy => :query,
:pageSize => :query,
:pageToken => :query
}
request =
Request.new()
|> Request.method(:get)
|> Request.url("/v1/partners/{+partnerId}/channels", %{
"partnerId" => URI.encode(partner_id, &URI.char_unreserved?/1)
})
|> Request.add_optional_params(optional_params_config, optional_params)
|> Request.library_version(@library_version)
connection
|> Connection.execute(request)
|> Response.decode(opts ++ [struct: %GoogleApi.DisplayVideo.V1.Model.ListChannelsResponse{}])
end
end
| 42.690821 | 196 | 0.621252 |
9e79a9a5793bc8a20193be572adb8d27775d4e46 | 2,070 | ex | Elixir | lib/honeydew/dispatcher/lru_node.ex | evadne/honeydew | c3c2f6095a28393cae13c0e686bdb6257d532ca1 | [
"MIT"
] | null | null | null | lib/honeydew/dispatcher/lru_node.ex | evadne/honeydew | c3c2f6095a28393cae13c0e686bdb6257d532ca1 | [
"MIT"
] | null | null | null | lib/honeydew/dispatcher/lru_node.ex | evadne/honeydew | c3c2f6095a28393cae13c0e686bdb6257d532ca1 | [
"MIT"
] | null | null | null | defmodule Honeydew.Dispatcher.LRUNode do
alias Honeydew.Dispatcher.LRU
# TODO: docs
def init do
# {node_queue, workers}
{:ok, {:queue.new, Map.new}}
end
def available?({_node_queue, workers}) do
workers
|> Map.values
|> Enum.any?(&LRU.available?/1)
end
def check_in(worker, {node_queue, workers}) do
node = worker_node(worker)
{node_queue, node_workers} =
workers
|> Map.get(node)
|> case do
nil ->
# this node isn't currently known
{:ok, node_workers} = LRU.init
{:queue.in(node, node_queue), node_workers}
node_workers ->
# there's already at least one worker from this node present
{node_queue, node_workers}
end
{node_queue, Map.put(workers, node, LRU.check_in(worker, node_workers))}
end
def check_out(job, {node_queue, workers} = state) do
with {{:value, node}, node_queue} <- :queue.out(node_queue),
%{^node => node_workers} <- workers,
{worker, node_workers} when not is_nil(worker) <- LRU.check_out(job, node_workers) do
if :queue.is_empty(node_workers) do
{worker, {node_queue, Map.delete(workers, node)}}
else
{worker, {:queue.in(node, node_queue), Map.put(workers, node, node_workers)}}
end
else _ ->
{nil, state}
end
end
def remove(worker, {node_queue, workers}) do
node = worker_node(worker)
with %{^node => node_workers} <- workers,
node_workers <- LRU.remove(worker, node_workers) do
if LRU.available?(node_workers) do
{node_queue, Map.put(workers, node, node_workers)}
else
{:queue.filter(&(&1 != node), node_queue), Map.delete(workers, node)}
end
else _ ->
# this means that we've been asked to remove a worker we don't know about
# this should never happen :o
{node_queue, workers}
end
end
# for testing
defp worker_node({_worker, node}), do: node
defp worker_node(worker) do
:erlang.node(worker)
end
end
| 27.972973 | 94 | 0.612077 |
9e79cb8e8c377a4298294e05983d760efcff1590 | 915 | ex | Elixir | lib/ergo.ex | mmower/ergo | f0cfa8debd6697c56509e7856578dc49666f3ff2 | [
"MIT"
] | 5 | 2021-07-11T13:01:56.000Z | 2021-12-29T17:02:00.000Z | lib/ergo.ex | mmower/ergo | f0cfa8debd6697c56509e7856578dc49666f3ff2 | [
"MIT"
] | null | null | null | lib/ergo.ex | mmower/ergo | f0cfa8debd6697c56509e7856578dc49666f3ff2 | [
"MIT"
] | null | null | null | defmodule Ergo do
@moduledoc "README.md"
|> File.read!()
|> String.split("<!-- MDOC !-->")
|> Enum.fetch!(1)
alias Ergo.{Context, Parser}
use Application
@doc ~S"""
`start/2` should be called before
"""
def start(_type, _args) do
Supervisor.start_link([Ergo.ParserRefs], strategy: :one_for_one)
end
@doc ~S"""
The `parser/2` function is a simple entry point to parsing inputs that constructs the Context record required.
Options
debug: [true | false]
# Examples
iex> alias Ergo.Terminals
iex> parser = Terminals.literal("Hello")
iex> assert %Ergo.Context{status: :ok, ast: "Hello", input: " World", index: 5, line: 1, col: 6} = Ergo.parse(parser, "Hello World")
"""
def parse(%Parser{} = parser, input, opts \\ []) when is_binary(input) do
input
|> Context.new(opts)
|> Parser.invoke(parser)
end
end
| 24.72973 | 138 | 0.610929 |
9e79ee69861d8b1a336a458fa436d206af120413 | 1,671 | exs | Elixir | mix.exs | pivstone/docker_client | 44d3093f97eebf7d2eb9f98e4b5d584cb8f58a0e | [
"MIT"
] | 1 | 2017-11-15T12:31:19.000Z | 2017-11-15T12:31:19.000Z | mix.exs | pivstone/docker_client | 44d3093f97eebf7d2eb9f98e4b5d584cb8f58a0e | [
"MIT"
] | null | null | null | mix.exs | pivstone/docker_client | 44d3093f97eebf7d2eb9f98e4b5d584cb8f58a0e | [
"MIT"
] | 2 | 2018-11-12T04:09:03.000Z | 2019-07-17T13:50:54.000Z | defmodule Docker.Mixfile do
use Mix.Project
def project do
[app: :docker_client,
version: "0.2.0",
elixir: "~> 1.3",
build_embedded: Mix.env == :prod,
start_permanent: Mix.env == :prod,
deps: deps(),
name: "docker_client",
test_coverage: [tool: ExCoveralls],
preferred_cli_env: ["coveralls": :test, "coveralls.detail": :test, "coveralls.post": :test, "coveralls.html": :test],
source_url: "https://github.com/pivstone/docker-us-connector",
docs: [main: "readme", # The main page in the docs
extras: ["README.md"]]
]
end
# Configuration for the OTP application
#
# Type "mix help compile.app" for more information
def application do
[applications: [:logger]]
end
# Dependencies can be Hex packages:
#
# {:mydep, "~> 0.3.0"}
#
# Or git/path repositories:
#
# {:mydep, git: "https://github.com/elixir-lang/mydep.git", tag: "0.1.0"}
#
# Type "mix help deps" for more examples and options
defp deps do
[
{:poison, "~> 2.2"},
{:ex_doc, "~> 0.14", only: :dev},
{:excoveralls, "~> 0.5", only: :test},
{:meck, "~> 0.8.4", only: :test}
]
end
def description do
"""
A Docker client via Unix socket .
"""
end
def package do
[# These are the default files included in the package
name: :docker_client,
files: ["lib", "mix.exs", "README*", "readme*", "LICENSE*", "license*"],
maintainers: ["[email protected]"],
licenses: ["MIT licenses"],
links: %{"GitHub" => "https://github.com/pivstone/docker_client",
"Docs" => "https://pivstone.github.io/docker_client"}]
end
end
| 27.393443 | 122 | 0.590066 |
9e79f19ebbc37854a64f7c5848b3b628af1db623 | 153 | exs | Elixir | eventLoggingService/test/event_logging_test.exs | andraspatka/jobportal-ms | 006c8ca212f88566113c4b5c00dfe1d4e421c034 | [
"MIT"
] | 1 | 2021-05-25T18:24:27.000Z | 2021-05-25T18:24:27.000Z | eventLoggingService/test/event_logging_test.exs | andraspatka/jobportal-ms | 006c8ca212f88566113c4b5c00dfe1d4e421c034 | [
"MIT"
] | 1 | 2021-05-23T09:50:10.000Z | 2021-05-23T09:50:10.000Z | eventLoggingService/test/event_logging_test.exs | andraspatka/jobportal-ms | 006c8ca212f88566113c4b5c00dfe1d4e421c034 | [
"MIT"
] | null | null | null | defmodule EventLoggingTest do
use ExUnit.Case
doctest EventLogging
test "greets the world" do
assert EventLogging.hello() == :world
end
end
| 17 | 41 | 0.738562 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.